diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..c74d779 --- /dev/null +++ b/.env.example @@ -0,0 +1,87 @@ +# ==================================== +# 🌌 GLOBAL LYRA CONFIG +# ==================================== +LOCAL_TZ_LABEL=America/New_York +DEFAULT_SESSION_ID=default + + +# ==================================== +# πŸ€– LLM BACKEND OPTIONS +# ==================================== +# Services choose which backend to use from these options +# Primary: vLLM on MI50 GPU +LLM_PRIMARY_PROVIDER=vllm +LLM_PRIMARY_URL=http://10.0.0.43:8000 +LLM_PRIMARY_MODEL=/model + +# Secondary: Ollama on 3090 GPU +LLM_SECONDARY_PROVIDER=ollama +LLM_SECONDARY_URL=http://10.0.0.3:11434 +LLM_SECONDARY_MODEL=qwen2.5:7b-instruct-q4_K_M + +# Cloud: OpenAI +LLM_CLOUD_PROVIDER=openai_chat +LLM_CLOUD_URL=https://api.openai.com/v1 +LLM_CLOUD_MODEL=gpt-4o-mini +OPENAI_API_KEY=sk-proj-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + +# Local Fallback: llama.cpp or LM Studio +LLM_FALLBACK_PROVIDER=openai_completions +LLM_FALLBACK_URL=http://10.0.0.41:11435 +LLM_FALLBACK_MODEL=llama-3.2-8b-instruct + +# Global LLM controls +LLM_TEMPERATURE=0.7 + + +# ==================================== +# πŸ—„οΈ DATABASE CONFIGURATION +# ==================================== +# Postgres (pgvector for NeoMem) +POSTGRES_USER=neomem +POSTGRES_PASSWORD=change_me_in_production +POSTGRES_DB=neomem +POSTGRES_HOST=neomem-postgres +POSTGRES_PORT=5432 + +# Neo4j Graph Database +NEO4J_URI=bolt://neomem-neo4j:7687 +NEO4J_USERNAME=neo4j +NEO4J_PASSWORD=change_me_in_production +NEO4J_AUTH=neo4j/change_me_in_production + + +# ==================================== +# 🧠 MEMORY SERVICES (NEOMEM) +# ==================================== +NEOMEM_API=http://neomem-api:7077 +NEOMEM_API_KEY=generate_secure_random_token_here +NEOMEM_HISTORY_DB=postgresql://neomem:change_me_in_production@neomem-postgres:5432/neomem + +# Embeddings configuration (used by NeoMem) +EMBEDDER_PROVIDER=openai +EMBEDDER_MODEL=text-embedding-3-small + + +# ==================================== +# πŸ”Œ INTERNAL SERVICE URLS +# ==================================== +# Using container names for Docker network communication +INTAKE_API_URL=http://intake:7080 +CORTEX_API=http://cortex:7081 +CORTEX_URL=http://cortex:7081/reflect +CORTEX_URL_INGEST=http://cortex:7081/ingest +RAG_API_URL=http://rag:7090 +RELAY_URL=http://relay:7078 + +# Persona service (optional) +PERSONA_URL=http://persona-sidecar:7080/current + + +# ==================================== +# πŸ”§ FEATURE FLAGS +# ==================================== +CORTEX_ENABLED=true +MEMORY_ENABLED=true +PERSONA_ENABLED=false +DEBUG_PROMPT=true diff --git a/.env.logging.example b/.env.logging.example new file mode 100644 index 0000000..b232a62 --- /dev/null +++ b/.env.logging.example @@ -0,0 +1,132 @@ +# ============================================================================ +# CORTEX LOGGING CONFIGURATION +# ============================================================================ +# This file contains all logging-related environment variables for the +# Cortex reasoning pipeline. Copy this to your .env file and adjust as needed. +# +# Log Detail Levels: +# minimal - Only errors and critical events +# summary - Stage completion + errors (DEFAULT - RECOMMENDED FOR PRODUCTION) +# detailed - Include raw LLM outputs, RAG results, timing breakdowns +# verbose - Everything including intermediate states, full JSON dumps +# +# Quick Start: +# - For debugging weak links: LOG_DETAIL_LEVEL=detailed +# - For finding performance bottlenecks: LOG_DETAIL_LEVEL=detailed + VERBOSE_DEBUG=true +# - For production: LOG_DETAIL_LEVEL=summary +# - For silent mode: LOG_DETAIL_LEVEL=minimal +# ============================================================================ + +# ----------------------------- +# Primary Logging Level +# ----------------------------- +# Controls overall verbosity across all components +LOG_DETAIL_LEVEL=detailed + +# Legacy verbose debug flag (kept for compatibility) +# When true, enables maximum logging including raw data dumps +VERBOSE_DEBUG=false + +# ----------------------------- +# LLM Logging +# ----------------------------- +# Enable raw LLM response logging (only works with detailed/verbose levels) +# Shows full JSON responses from each LLM backend call +# Set to "true" to see exact LLM outputs for debugging weak links +LOG_RAW_LLM_RESPONSES=true + +# ----------------------------- +# Context Logging +# ----------------------------- +# Show full raw intake data (L1-L30 summaries) in logs +# WARNING: Very verbose, use only for deep debugging +LOG_RAW_CONTEXT_DATA=false + +# ----------------------------- +# Loop Detection & Protection +# ----------------------------- +# Enable duplicate message detection to prevent processing loops +ENABLE_DUPLICATE_DETECTION=true + +# Maximum number of messages to keep in session history (prevents unbounded growth) +# Older messages are trimmed automatically +MAX_MESSAGE_HISTORY=100 + +# Session TTL in hours - sessions inactive longer than this are auto-expired +SESSION_TTL_HOURS=24 + +# ----------------------------- +# NeoMem / RAG Logging +# ----------------------------- +# Relevance score threshold for NeoMem results +RELEVANCE_THRESHOLD=0.4 + +# Enable NeoMem long-term memory retrieval +NEOMEM_ENABLED=false + +# ----------------------------- +# Autonomous Features +# ----------------------------- +# Enable autonomous tool invocation (RAG, WEB, WEATHER, CODEBRAIN) +ENABLE_AUTONOMOUS_TOOLS=true + +# Confidence threshold for autonomous tool invocation (0.0 - 1.0) +AUTONOMOUS_TOOL_CONFIDENCE_THRESHOLD=0.6 + +# Enable proactive monitoring and suggestions +ENABLE_PROACTIVE_MONITORING=true + +# Minimum priority for proactive suggestions to be included (0.0 - 1.0) +PROACTIVE_SUGGESTION_MIN_PRIORITY=0.6 + +# ============================================================================ +# EXAMPLE LOGGING OUTPUT AT DIFFERENT LEVELS +# ============================================================================ +# +# LOG_DETAIL_LEVEL=summary (RECOMMENDED): +# ──────────────────────────────────────────────────────────────────────────── +# βœ… [LLM] PRIMARY | 14:23:45.123 | Reply: Based on your question about... +# πŸ“Š Context | Session: abc123 | Messages: 42 | Last: 5.2min | RAG: 3 results +# 🧠 Monologue | question | Tone: curious +# ✨ PIPELINE COMPLETE | Session: abc123 | Total: 1250ms +# πŸ“€ Output: 342 characters +# ──────────────────────────────────────────────────────────────────────────── +# +# LOG_DETAIL_LEVEL=detailed (FOR DEBUGGING): +# ──────────────────────────────────────────────────────────────────────────── +# πŸš€ PIPELINE START | Session: abc123 | 14:23:45.123 +# πŸ“ User: What is the meaning of life? +# ──────────────────────────────────────────────────────────────────────────── +# 🧠 LLM CALL | Backend: PRIMARY | 14:23:45.234 +# ──────────────────────────────────────────────────────────────────────────── +# πŸ“ Prompt: You are Lyra, a thoughtful AI assistant... +# πŸ’¬ Reply: Based on philosophical perspectives, the meaning... +# ╭─ RAW RESPONSE ──────────────────────────────────────────────────────────── +# β”‚ { +# β”‚ "choices": [ +# β”‚ { +# β”‚ "message": { +# β”‚ "content": "Based on philosophical perspectives..." +# β”‚ } +# β”‚ } +# β”‚ ] +# β”‚ } +# ╰─────────────────────────────────────────────────────────────────────────── +# +# ✨ PIPELINE COMPLETE | Session: abc123 | Total: 1250ms +# ⏱️ Stage Timings: +# context : 150ms ( 12.0%) +# identity : 10ms ( 0.8%) +# monologue : 200ms ( 16.0%) +# reasoning : 450ms ( 36.0%) +# refinement : 300ms ( 24.0%) +# persona : 140ms ( 11.2%) +# ──────────────────────────────────────────────────────────────────────────── +# +# LOG_DETAIL_LEVEL=verbose (MAXIMUM DEBUG): +# Same as detailed but includes: +# - Full 50+ line raw JSON dumps +# - Complete intake data structures +# - All intermediate processing states +# - Detailed traceback on errors +# ============================================================================ diff --git a/.gitignore b/.gitignore index b07b74f..f55ed37 100644 --- a/.gitignore +++ b/.gitignore @@ -4,9 +4,24 @@ __pycache__/ *.pyc *.log - -# Env files (store secrets locally) +/.vscode/ +.vscode/ +# ============================= +# πŸ” Environment files (NEVER commit secrets!) +# ============================= +# Ignore all .env files .env +.env.local +.env.*.local +**/.env +**/.env.local + +# BUT track .env.example templates (safe to commit) +!.env.example +!**/.env.example + +# Ignore backup directory +.env-backups/ # ============================= # 🐳 Docker volumes (HUGE) @@ -40,3 +55,29 @@ models/ # ============================= node_modules/ core/relay/node_modules/ + +# ============================= +# πŸ’¬ Runtime data & sessions +# ============================= +# Session files (contain user conversation data) +core/relay/sessions/ +**/sessions/ +*.jsonl + +# Log directories +logs/ +**/logs/ +*-logs/ +intake-logs/ + +# Database files (generated at runtime) +*.db +*.sqlite +*.sqlite3 +neomem_history/ +**/neomem_history/ + +# Temporary and cache files +.cache/ +*.tmp +*.temp diff --git a/CHANGELOG.md b/CHANGELOG.md index ce887d0..23bb294 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,643 +1,1521 @@ -# Project Lyra β€” Modular Changelog -All notable changes to Project Lyra are organized by component. -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/) -and adheres to [Semantic Versioning](https://semver.org/). -# Last Updated: 11-13-25 +# Project Lyra Changelog + +All notable changes to Project Lyra. +Format based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/) and [Semantic Versioning](https://semver.org/). + --- -## 🧠 Lyra-Core ############################################################################## +## [Unreleased] -## [Lyra_RAG v0.1.0] 2025-11-07 -### Added -- Initial standalone RAG module for Project Lyra. -- Persistent ChromaDB vector store (`./chromadb`). -- Importer `rag_chat_import.py` with: - - Recursive folder scanning and category tagging. - - Smart chunking (~5 k chars). - - SHA-1 deduplication and chat-ID metadata. - - Timestamp fields (`file_modified`, `imported_at`). - - Background-safe operation (`nohup`/`tmux`). -- 68 Lyra-category chats imported: - - **6 556 new chunks added** - - **1 493 duplicates skipped** - - **7 997 total vectors** now stored. +--- +##[0.9.1] - 2025-12-29 +Fixed: + - chat auto scrolling now works. + - Session names don't change to auto gen UID anymore. -### API -- `/rag/search` FastAPI endpoint implemented (port 7090). -- Supports natural-language queries and returns top related excerpts. -- Added answer synthesis step using `gpt-4o-mini`. -### Verified -- Successful recall of Lyra-Core development history (v0.3.0 snapshot). -- Correct metadata and category tagging for all new imports. +## [0.9.0] - 2025-12-29 -### Next Planned -- Optional `where` filter parameter for category/date queries. -- Graceful β€œno results” handler for empty retrievals. -- `rag_docs_import.py` for PDFs and other document types. +### Added - Trilium Notes Integration -## [Lyra Core v0.3.2 + Web Ui v0.2.0] - 2025-10-28 +**Trilium ETAPI Knowledge Base Integration** +- **Trilium Tool Executor** [cortex/autonomy/tools/executors/trilium.py](cortex/autonomy/tools/executors/trilium.py) + - `search_notes(query, limit)` - Search through Trilium notes via ETAPI + - `create_note(title, content, parent_note_id)` - Create new notes in Trilium knowledge base + - Full ETAPI authentication and error handling + - Automatic parentNoteId defaulting to "root" for root-level notes + - Connection error handling with user-friendly messages +- **Tool Registry Integration** [cortex/autonomy/tools/registry.py](cortex/autonomy/tools/registry.py) + - Added `ENABLE_TRILIUM` feature flag + - Tool definitions with schema validation + - Provider-agnostic tool calling support +- **Setup Documentation** [TRILIUM_SETUP.md](TRILIUM_SETUP.md) + - Step-by-step ETAPI token generation guide + - Environment configuration instructions + - Troubleshooting section for common issues + - Security best practices for token management +- **API Reference Documentation** [docs/TRILIUM_API.md](docs/TRILIUM_API.md) + - Complete ETAPI endpoint reference + - Authentication and request/response examples + - Search syntax and advanced query patterns -### Added -- ** New UI ** - - Cleaned up UI look and feel. - -- ** Added "sessions" ** - - Now sessions persist over time. - - Ability to create new sessions or load sessions from a previous instance. - - When changing the session, it updates what the prompt is sending relay (doesn't prompt with messages from other sessions). - - Relay is correctly wired in. +**Environment Configuration** +- **New Environment Variables** [.env](.env) + - `ENABLE_TRILIUM=true` - Enable/disable Trilium integration + - `TRILIUM_URL=http://10.0.0.2:4292` - Trilium instance URL + - `TRILIUM_ETAPI_TOKEN` - ETAPI authentication token -## [Lyra-Core 0.3.1] - 2025-10-09 +**Capabilities Unlocked** +- Personal knowledge base search during conversations +- Automatic note creation from conversation insights +- Cross-reference information between chat and notes +- Context-aware responses using stored knowledge +- Future: Find duplicates, suggest organization, summarize notes -### Added -- **NVGRAM Integration (Full Pipeline Reconnected)** - - Replaced legacy Mem0 service with NVGRAM microservice (`nvgram-api` @ port 7077). - - Updated `server.js` in Relay to route all memory ops via `${NVGRAM_API}/memories` and `/search`. - - Added `.env` variable: - ``` - NVGRAM_API=http://nvgram-api:7077 - ``` - - Verified end-to-end Lyra conversation persistence: - - `relay β†’ nvgram-api β†’ postgres/neo4j β†’ relay β†’ ollama β†’ ui` - - βœ… Memories stored, retrieved, and re-injected successfully. +### Changed - Spelling Corrections -### Changed -- Renamed `MEM0_URL` β†’ `NVGRAM_API` across all relay environment configs. -- Updated Docker Compose service dependency order: - - `relay` now depends on `nvgram-api` healthcheck. - - Removed `mem0` references and volumes. -- Minor cleanup to Persona fetch block (null-checks and safer default persona string). +**Module Naming** +- Renamed `trillium.py` to `trilium.py` (corrected spelling) +- Updated all imports and references across codebase +- Fixed environment variable names (TRILLIUM β†’ TRILIUM) +- Updated documentation to use correct "Trilium" spelling + +--- + +## [0.8.0] - 2025-12-26 + +### Added - Tool Calling & "Show Your Work" Transparency Feature + +**Tool Calling System (Standard Mode)** +- **Function Calling Infrastructure** [cortex/autonomy/tools/](cortex/autonomy/tools/) + - Implemented agentic tool calling for Standard Mode with autonomous multi-step execution + - Tool registry system with JSON schema definitions + - Adapter pattern for provider-agnostic tool calling (OpenAI, Ollama, llama.cpp) + - Maximum 5 iterations per request to prevent runaway loops +- **Available Tools** + - `execute_code` - Sandboxed Python/JavaScript/Bash execution via Docker + - `web_search` - Tavily API integration for real-time web queries + - `trilium_search` - Internal Trilium knowledge base queries +- **Provider Adapters** [cortex/autonomy/tools/adapters/](cortex/autonomy/tools/adapters/) + - `OpenAIAdapter` - Native function calling support + - `OllamaAdapter` - XML-based tool calling for local models + - `LlamaCppAdapter` - XML-based tool calling for llama.cpp backend + - Automatic tool call parsing and result formatting +- **Code Execution Sandbox** [cortex/autonomy/tools/code_executor.py](cortex/autonomy/tools/code_executor.py) + - Docker-based isolated execution environment + - Support for Python, JavaScript (Node.js), and Bash + - 30-second timeout with automatic cleanup + - Returns stdout, stderr, exit code, and execution time + - Prevents filesystem access outside sandbox + +**"Show Your Work" - Real-Time Thinking Stream** +- **Server-Sent Events (SSE) Streaming** [cortex/router.py:478-527](cortex/router.py#L478-L527) + - New `/stream/thinking/{session_id}` endpoint for real-time event streaming + - Broadcasts internal thinking process during tool calling operations + - 30-second keepalive with automatic reconnection support + - Events: `connected`, `thinking`, `tool_call`, `tool_result`, `done`, `error` +- **Stream Manager** [cortex/autonomy/tools/stream_events.py](cortex/autonomy/tools/stream_events.py) + - Pub/sub system for managing SSE subscriptions per session + - Multiple clients can connect to same session stream + - Automatic cleanup of dead queues and closed connections + - Zero overhead when no subscribers active +- **FunctionCaller Integration** [cortex/autonomy/tools/function_caller.py](cortex/autonomy/tools/function_caller.py) + - Enhanced with event emission at each step: + - "thinking" events before each LLM call + - "tool_call" events when invoking tools + - "tool_result" events after tool execution + - "done" event with final answer + - "error" events on failures + - Session-aware streaming (only emits when subscribers exist) + - Provider-agnostic implementation works with all backends +- **Thinking Stream UI** [core/ui/thinking-stream.html](core/ui/thinking-stream.html) + - Dedicated popup window for real-time thinking visualization + - Color-coded events: green (thinking), orange (tool calls), blue (results), purple (done), red (errors) + - Auto-scrolling event feed with animations + - Connection status indicator with green/red dot + - Clear events button and session info display + - Mobile-friendly responsive design +- **UI Integration** [core/ui/index.html](core/ui/index.html) + - "🧠 Show Work" button in session selector + - Opens thinking stream in popup window + - Session ID passed via URL parameter for stream association + - Purple/violet button styling to match cyberpunk theme + +**Tool Calling Configuration** +- **Environment Variables** [.env](.env) + - `STANDARD_MODE_ENABLE_TOOLS=true` - Enable/disable tool calling + - `TAVILY_API_KEY` - API key for web search tool + - `TRILLIUM_API_URL` - URL for Trillium knowledge base +- **Standard Mode Tools Toggle** [cortex/router.py:389-470](cortex/router.py#L389-L470) + - `/simple` endpoint checks `STANDARD_MODE_ENABLE_TOOLS` environment variable + - Falls back to non-tool mode if disabled + - Logs tool usage statistics (iterations, tools used) + +### Changed - CORS & Architecture + +**CORS Support for SSE** +- **Added CORS Middleware** [cortex/main.py](cortex/main.py) + - FastAPI CORSMiddleware with wildcard origins for development + - Allows cross-origin SSE connections from nginx UI (port 8081) to cortex (port 7081) + - Credentials support enabled for authenticated requests + - All methods and headers permitted + +**Tool Calling Pipeline** +- **Standard Mode Enhancement** [cortex/router.py:389-470](cortex/router.py#L389-L470) + - `/simple` endpoint now supports optional tool calling + - Multi-iteration agentic loop with LLM + tool execution + - Tool results injected back into conversation for next iteration + - Graceful degradation to non-tool mode if tools disabled + +**JSON Response Formatting** +- **SSE Event Structure** [cortex/router.py:497-499](cortex/router.py#L497-L499) + - Fixed initial "connected" event to use proper JSON serialization + - Changed from f-string with nested quotes to `json.dumps()` + - Ensures valid JSON for all event types + +### Fixed - Critical JavaScript & SSE Issues + +**JavaScript Variable Scoping Bug** +- **Root cause**: `eventSource` variable used before declaration in [thinking-stream.html:218](core/ui/thinking-stream.html#L218) +- **Symptom**: `Uncaught ReferenceError: can't access lexical declaration 'eventSource' before initialization` +- **Solution**: Moved variable declarations before `connectStream()` call +- **Impact**: Thinking stream page now loads without errors and establishes SSE connection + +**SSE Connection Not Establishing** +- **Root cause**: CORS blocked cross-origin SSE requests from nginx (8081) to cortex (7081) +- **Symptom**: Browser silently blocked EventSource connection, no errors in console +- **Solution**: Added CORSMiddleware to cortex FastAPI app +- **Impact**: SSE streams now connect successfully across ports + +**Invalid JSON in SSE Events** +- **Root cause**: Initial "connected" event used f-string with nested quotes: `f"data: {{'type': 'connected', 'session_id': '{session_id}'}}\n\n"` +- **Symptom**: Browser couldn't parse malformed JSON, connection appeared stuck on "Connecting..." +- **Solution**: Used `json.dumps()` for proper JSON serialization +- **Impact**: Connected event now parsed correctly, status updates to green dot + +### Technical Improvements + +**Agentic Architecture** +- Multi-iteration reasoning loop with tool execution +- Provider-agnostic tool calling via adapter pattern +- Automatic tool result injection into conversation context +- Iteration limits to prevent infinite loops +- Comprehensive logging at each step + +**Event Streaming Performance** +- Zero overhead when no subscribers (check before emit) +- Efficient pub/sub with asyncio queues +- Automatic cleanup of disconnected clients +- 30-second keepalive prevents timeout issues +- Session-isolated streams prevent cross-talk + +**Code Quality** +- Clean separation: tool execution, adapters, streaming, UI +- Comprehensive error handling with fallbacks +- Detailed logging for debugging tool calls +- Type hints and docstrings throughout +- Modular design for easy extension + +**Security** +- Sandboxed code execution prevents filesystem access +- Timeout limits prevent resource exhaustion +- Docker isolation for untrusted code +- No code execution without explicit user request + +### Architecture - Tool Calling Flow + +**Standard Mode with Tools:** +``` +User (UI) β†’ Relay β†’ Cortex /simple + ↓ + Check STANDARD_MODE_ENABLE_TOOLS + ↓ + LLM generates tool call β†’ FunctionCaller + ↓ + Execute tool (Docker sandbox / API call) + ↓ + Inject result β†’ LLM (next iteration) + ↓ + Repeat until done or max iterations + ↓ + Return final answer β†’ UI +``` + +**Thinking Stream Flow:** +``` +Browser β†’ nginx:8081 β†’ thinking-stream.html + ↓ +EventSource connects to cortex:7081/stream/thinking/{session_id} + ↓ +ToolStreamManager.subscribe(session_id) β†’ asyncio.Queue + ↓ +User sends message β†’ /simple endpoint + ↓ +FunctionCaller emits events: + - emit("thinking") β†’ Queue β†’ SSE β†’ Browser + - emit("tool_call") β†’ Queue β†’ SSE β†’ Browser + - emit("tool_result") β†’ Queue β†’ SSE β†’ Browser + - emit("done") β†’ Queue β†’ SSE β†’ Browser + ↓ +Browser displays color-coded events in real-time +``` + +### Documentation + +- **Added** [THINKING_STREAM.md](THINKING_STREAM.md) - Complete guide to "Show Your Work" feature + - Usage examples with curl + - Event type reference + - Architecture diagrams + - Demo page instructions +- **Added** [UI_THINKING_STREAM.md](UI_THINKING_STREAM.md) - UI integration documentation + - Button placement and styling + - Popup window behavior + - Session association logic + +### Known Limitations + +**Tool Calling:** +- Limited to 5 iterations per request (prevents runaway loops) +- Python sandbox has no filesystem persistence (temporary only) +- Web search requires Tavily API key (not free tier unlimited) +- Trillium search requires separate knowledge base setup + +**Thinking Stream:** +- CORS wildcard (`*`) is development-only (should restrict in production) +- Stream ends after "done" event (must reconnect for new request) +- No historical replay (only shows real-time events) +- Single session per stream window + +### Migration Notes + +**For Users Upgrading:** +1. New environment variable: `STANDARD_MODE_ENABLE_TOOLS=true` (default: enabled) +2. Thinking stream accessible via "🧠 Show Work" button in UI +3. Tool calling works automatically in Standard Mode when enabled +4. No changes required to existing Standard Mode usage + +**For Developers:** +1. Cortex now includes CORS middleware for SSE +2. New `/stream/thinking/{session_id}` endpoint available +3. FunctionCaller requires `session_id` parameter for streaming +4. Tool adapters can be extended by adding to `AVAILABLE_TOOLS` registry + +--- + +## [0.7.0] - 2025-12-21 + +### Added - Standard Mode & UI Enhancements + +**Standard Mode Implementation** +- Added "Standard Mode" chat option that bypasses complex cortex reasoning pipeline + - Provides simple chatbot functionality for coding and practical tasks + - Maintains full conversation context across messages + - Backend-agnostic - works with SECONDARY (Ollama), OPENAI, or custom backends + - Created `/simple` endpoint in Cortex router [cortex/router.py:389](cortex/router.py#L389) +- Mode selector in UI with toggle between Standard and Cortex modes + - Standard Mode: Direct LLM chat with context retention + - Cortex Mode: Full 7-stage reasoning pipeline (unchanged) + +**Backend Selection System** +- UI settings modal with LLM backend selection for Standard Mode + - Radio button selector: SECONDARY (Ollama/Qwen), OPENAI (GPT-4o-mini), or custom + - Backend preference persisted in localStorage + - Custom backend text input for advanced users +- Backend parameter routing through entire stack: + - UI sends `backend` parameter in request body + - Relay forwards backend selection to Cortex + - Cortex `/simple` endpoint respects user's backend choice +- Environment-based fallback: Uses `STANDARD_MODE_LLM` if no backend specified + +**Session Management Overhaul** +- Complete rewrite of session system to use server-side persistence + - File-based storage in `core/relay/sessions/` directory + - Session files: `{sessionId}.json` for history, `{sessionId}.meta.json` for metadata + - Server is source of truth - sessions sync across browsers and reboots +- Session metadata system for friendly names + - Sessions display custom names instead of random IDs + - Rename functionality in session dropdown + - Last modified timestamps and message counts +- Full CRUD API for sessions in Relay: + - `GET /sessions` - List all sessions with metadata + - `GET /sessions/:id` - Retrieve session history + - `POST /sessions/:id` - Save session history + - `PATCH /sessions/:id/metadata` - Update session name/metadata + - `DELETE /sessions/:id` - Delete session and metadata +- Session management UI in settings modal: + - List of all sessions with message counts and timestamps + - Delete button for each session with confirmation + - Automatic session cleanup when deleting current session + +**UI Improvements** +- Settings modal with hamburger menu (βš™ Settings button) + - Backend selection section for Standard Mode + - Session management section with delete functionality + - Clean modal overlay with cyberpunk theme + - ESC key and click-outside to close +- Light/Dark mode toggle with dark mode as default + - Theme preference persisted in localStorage + - CSS variables for seamless theme switching + - Toggle button shows current mode (πŸŒ™ Dark Mode / β˜€οΈ Light Mode) +- Removed redundant model selector dropdown from header +- Fixed modal positioning and z-index layering + - Modal moved outside #chat container for proper rendering + - Fixed z-index: overlay (999), modal content (1001) + - Centered modal with proper backdrop blur + +**Context Retention for Standard Mode** +- Integration with Intake module for conversation history + - Added `get_recent_messages()` function in intake.py + - Standard Mode retrieves last 20 messages from session buffer + - Full context sent to LLM on each request +- Message array format support in LLM router: + - Updated Ollama provider to accept `messages` parameter + - Updated OpenAI provider to accept `messages` parameter + - Automatic conversion from messages to prompt string for non-chat APIs + +### Changed - Architecture & Routing + +**Relay Server Updates** [core/relay/server.js](core/relay/server.js) +- ES module migration for session persistence: + - Imported `fs/promises`, `path`, `fileURLToPath` for file operations + - Created `SESSIONS_DIR` constant for session storage location +- Mode-based routing in both `/chat` and `/v1/chat/completions` endpoints: + - Extracts `mode` parameter from request body (default: "cortex") + - Routes to `CORTEX_SIMPLE` for Standard Mode, `CORTEX_REASON` for Cortex Mode + - Backend parameter only used in Standard Mode +- Session persistence functions: + - `ensureSessionsDir()` - Creates sessions directory if needed + - `loadSession(sessionId)` - Reads session history from file + - `saveSession(sessionId, history, metadata)` - Writes session to file + - `loadSessionMetadata(sessionId)` - Reads session metadata + - `saveSessionMetadata(sessionId, metadata)` - Updates session metadata + - `listSessions()` - Returns all sessions with metadata, sorted by last modified + - `deleteSession(sessionId)` - Removes session and metadata files + +**Cortex Router Updates** [cortex/router.py](cortex/router.py) +- Added `backend` field to `ReasonRequest` Pydantic model (optional) +- Created `/simple` endpoint for Standard Mode: + - Bypasses reflection, reasoning, refinement stages + - Direct LLM call with conversation context + - Uses backend from request or falls back to `STANDARD_MODE_LLM` env variable + - Returns simple response structure without reasoning artifacts +- Backend selection logic in `/simple`: + - Normalizes backend names to uppercase + - Maps UI backend names to system backend names + - Validates backend availability before calling + +**Intake Integration** [cortex/intake/intake.py](cortex/intake/intake.py) +- Added `get_recent_messages(session_id, limit)` function: + - Retrieves last N messages from session buffer + - Returns empty list if session doesn't exist + - Used by `/simple` endpoint for context retrieval + +**LLM Router Enhancements** [cortex/llm/llm_router.py](cortex/llm/llm_router.py) +- Added `messages` parameter support across all providers +- Automatic message-to-prompt conversion for legacy APIs +- Chat completion format for Ollama and OpenAI providers +- Stop sequences for MI50/DeepSeek R1 to prevent runaway generation: + - `"User:"`, `"\nUser:"`, `"Assistant:"`, `"\n\n\n"` + +**Environment Configuration** [.env](.env) +- Added `STANDARD_MODE_LLM=SECONDARY` for default Standard Mode backend +- Added `CORTEX_SIMPLE_URL=http://cortex:7081/simple` for routing + +**UI Architecture** [core/ui/index.html](core/ui/index.html) +- Server-based session loading system: + - `loadSessionsFromServer()` - Fetches sessions from Relay API + - `renderSessions()` - Populates session dropdown from server data + - Session state synchronized with server on every change +- Backend selection persistence: + - Loads saved backend from localStorage on page load + - Includes backend parameter in request body when in Standard Mode + - Settings modal pre-selects current backend choice +- Dark mode by default: + - Checks localStorage for theme preference + - Sets dark theme if no preference found + - Toggle button updates localStorage and applies theme + +**CSS Styling** [core/ui/style.css](core/ui/style.css) +- Light mode CSS variables: + - `--bg-dark: #f5f5f5` (light background) + - `--text-main: #1a1a1a` (dark text) + - `--text-fade: #666` (dimmed text) +- Dark mode CSS variables (default): + - `--bg-dark: #0a0a0a` (dark background) + - `--text-main: #e6e6e6` (light text) + - `--text-fade: #999` (dimmed text) +- Modal positioning fixes: + - `position: fixed` with `top: 50%`, `left: 50%`, `transform: translate(-50%, -50%)` + - Z-index layering: overlay (999), content (1001) + - Backdrop blur effect on modal overlay +- Session list styling: + - Session item cards with hover effects + - Delete button with red hover state + - Message count and timestamp display + +### Fixed - Critical Issues + +**DeepSeek R1 Runaway Generation** +- Root cause: R1 reasoning model generates thinking process and hallucinates conversations +- Solution: + - Changed `STANDARD_MODE_LLM` to SECONDARY (Ollama/Qwen) instead of PRIMARY (MI50/R1) + - Added stop sequences to MI50 provider to prevent continuation + - Documented R1 limitations for Standard Mode usage + +**Context Not Maintained in Standard Mode** +- Root cause: `/simple` endpoint didn't retrieve conversation history from Intake +- Solution: + - Created `get_recent_messages()` function in intake.py + - Standard Mode now pulls last 20 messages from session buffer + - Full context sent to LLM with each request +- User feedback: "it's saying it hasn't received any other messages from me, so it looks like the standard mode llm isn't getting the full chat" + +**OpenAI Backend 400 Errors** +- Root cause: OpenAI provider only accepted prompt strings, not messages arrays +- Solution: Updated OpenAI provider to support messages parameter like Ollama +- Now handles chat completion format correctly + +**Modal Formatting Issues** +- Root cause: Settings modal inside #chat container with overflow constraints +- Symptoms: Modal appearing at bottom, jumbled layout, couldn't close +- Solution: + - Moved modal outside #chat container to be direct child of body + - Changed positioning from absolute to fixed + - Added proper z-index layering (overlay: 999, content: 1001) + - Removed old model selector from header +- User feedback: "the formating for the settings is all off. Its at the bottom and all jumbling together, i cant get it to go away" + +**Session Persistence Broken** +- Root cause: Sessions stored only in localStorage, not synced with server +- Symptoms: Sessions didn't persist across browsers or reboots, couldn't load messages +- Solution: Complete rewrite of session system + - Implemented server-side file persistence in Relay + - Created CRUD API endpoints for session management + - Updated UI to load sessions from server instead of localStorage + - Added metadata system for session names + - Sessions now survive container restarts and sync across browsers +- User feedback: "sessions seem to exist locally only, i cant get them to actually load any messages and there is now way to delete them. If i open the ui in a different browser those arent there." + +### Technical Improvements + +**Backward Compatibility** +- All changes include defaults to maintain existing behavior +- Cortex Mode completely unchanged - still uses full 7-stage pipeline +- Standard Mode is opt-in via UI mode selector +- If no backend specified, falls back to `STANDARD_MODE_LLM` env variable +- Existing requests without mode parameter default to "cortex" + +**Code Quality** +- Consistent async/await patterns throughout stack +- Proper error handling with fallbacks +- Clean separation between Standard and Cortex modes +- Session persistence abstracted into helper functions +- Modular UI code with clear event handlers + +**Performance** +- Standard Mode bypasses 6 of 7 reasoning stages for faster responses +- Session loading optimized with file-based caching +- Backend selection happens once per message, not per LLM call +- Minimal overhead for mode detection and routing + +### Architecture - Dual-Mode Chat System + +**Standard Mode Flow:** +``` +User (UI) β†’ Relay β†’ Cortex /simple β†’ Intake (get_recent_messages) +β†’ LLM (direct call with context) β†’ Relay β†’ UI +``` + +**Cortex Mode Flow (Unchanged):** +``` +User (UI) β†’ Relay β†’ Cortex /reason β†’ Reflection β†’ Reasoning +β†’ Refinement β†’ Persona β†’ Relay β†’ UI +``` + +**Session Persistence:** +``` +UI β†’ POST /sessions/:id β†’ Relay β†’ File system (sessions/*.json) +UI β†’ GET /sessions β†’ Relay β†’ List all sessions β†’ UI dropdown +``` + +### Known Limitations + +**Standard Mode:** +- No reflection, reasoning, or refinement stages +- No RAG integration (same as Cortex Mode - currently disabled) +- No NeoMem memory storage (same as Cortex Mode - currently disabled) +- DeepSeek R1 not recommended for Standard Mode (generates reasoning artifacts) + +**Session Management:** +- Sessions stored in container filesystem - need volume mount for true persistence +- No session import/export functionality yet +- No session search or filtering + +### Migration Notes + +**For Users Upgrading:** +1. Existing sessions in localStorage will not automatically migrate to server +2. Create new sessions after upgrade for server-side persistence +3. Theme preference (light/dark) will be preserved from localStorage +4. Backend preference will default to SECONDARY if not previously set + +**For Developers:** +1. Relay now requires `fs/promises` for session persistence +2. Cortex `/simple` endpoint expects `backend` parameter (optional) +3. UI sends `mode` and `backend` parameters in request body +4. Session files stored in `core/relay/sessions/` directory + +--- + +## [0.6.0] - 2025-12-18 + +### Added - Autonomy System (Phase 1 & 2) + +**Autonomy Phase 1** - Self-Awareness & Planning Foundation +- **Executive Planning Module** [cortex/autonomy/executive/planner.py](cortex/autonomy/executive/planner.py) + - Autonomous goal setting and task planning capabilities + - Multi-step reasoning for complex objectives + - Integration with self-state tracking +- **Self-State Management** [cortex/data/self_state.json](cortex/data/self_state.json) + - Persistent state tracking across sessions + - Memory of past actions and outcomes + - Self-awareness metadata storage +- **Self Analyzer** [cortex/autonomy/self/analyzer.py](cortex/autonomy/self/analyzer.py) + - Analyzes own performance and decision patterns + - Identifies areas for improvement + - Tracks cognitive patterns over time +- **Test Suite** [cortex/tests/test_autonomy_phase1.py](cortex/tests/test_autonomy_phase1.py) + - Unit tests for phase 1 autonomy features + +**Autonomy Phase 2** - Decision Making & Proactive Behavior +- **Autonomous Actions Module** [cortex/autonomy/actions/autonomous_actions.py](cortex/autonomy/actions/autonomous_actions.py) + - Self-initiated action execution + - Context-aware decision implementation + - Action logging and tracking +- **Pattern Learning System** [cortex/autonomy/learning/pattern_learner.py](cortex/autonomy/learning/pattern_learner.py) + - Learns from interaction patterns + - Identifies recurring user needs + - Adapts behavior based on learned patterns +- **Proactive Monitor** [cortex/autonomy/proactive/monitor.py](cortex/autonomy/proactive/monitor.py) + - Monitors system state for intervention opportunities + - Detects patterns requiring proactive response + - Background monitoring capabilities +- **Decision Engine** [cortex/autonomy/tools/decision_engine.py](cortex/autonomy/tools/decision_engine.py) + - Autonomous decision-making framework + - Weighs options and selects optimal actions + - Integrates with orchestrator for coordinated decisions +- **Orchestrator** [cortex/autonomy/tools/orchestrator.py](cortex/autonomy/tools/orchestrator.py) + - Coordinates multiple autonomy subsystems + - Manages tool selection and execution + - Handles NeoMem integration (with disable capability) +- **Test Suite** [cortex/tests/test_autonomy_phase2.py](cortex/tests/test_autonomy_phase2.py) + - Unit tests for phase 2 autonomy features + +**Autonomy Phase 2.5** - Pipeline Refinement +- Tightened integration between autonomy modules and reasoning pipeline +- Enhanced self-state persistence and tracking +- Improved orchestrator reliability +- NeoMem integration refinements in vector store handling [neomem/neomem/vector_stores/qdrant.py](neomem/neomem/vector_stores/qdrant.py) + +### Added - Documentation + +- **Complete AI Agent Breakdown** [docs/PROJECT_LYRA_COMPLETE_BREAKDOWN.md](docs/PROJECT_LYRA_COMPLETE_BREAKDOWN.md) + - Comprehensive system architecture documentation + - Detailed component descriptions + - Data flow diagrams + - Integration points and API specifications + +### Changed - Core Integration + +- **Router Updates** [cortex/router.py](cortex/router.py) + - Integrated autonomy subsystems into main routing logic + - Added endpoints for autonomous decision-making + - Enhanced state management across requests +- **Reasoning Pipeline** [cortex/reasoning/reasoning.py](cortex/reasoning/reasoning.py) + - Integrated autonomy-aware reasoning + - Self-state consideration in reasoning process +- **Persona Layer** [cortex/persona/speak.py](cortex/persona/speak.py) + - Autonomy-aware response generation + - Self-state reflection in personality expression +- **Context Handling** [cortex/context.py](cortex/context.py) + - NeoMem disable capability for flexible deployment + +### Changed - Development Environment + +- Updated [.gitignore](.gitignore) for better workspace management +- Cleaned up VSCode settings +- Removed [.vscode/settings.json](.vscode/settings.json) from repository + +### Technical Improvements + +- Modular autonomy architecture with clear separation of concerns +- Test-driven development for new autonomy features +- Enhanced state persistence across system restarts +- Flexible NeoMem integration with enable/disable controls + +### Architecture - Autonomy System Design + +The autonomy system operates in layers: +1. **Executive Layer** - High-level planning and goal setting +2. **Decision Layer** - Evaluates options and makes choices +3. **Action Layer** - Executes autonomous decisions +4. **Learning Layer** - Adapts behavior based on patterns +5. **Monitoring Layer** - Proactive awareness of system state + +All layers coordinate through the orchestrator and maintain state in `self_state.json`. + +--- + +## [0.5.2] - 2025-12-12 + +### Fixed - LLM Router & Async HTTP +- **Critical**: Replaced synchronous `requests` with async `httpx` in LLM router [cortex/llm/llm_router.py](cortex/llm/llm_router.py) + - Event loop blocking was causing timeouts and empty responses + - All three providers (MI50, Ollama, OpenAI) now use `await http_client.post()` + - Fixes "Expecting value: line 1 column 1 (char 0)" JSON parsing errors in intake +- **Critical**: Fixed missing `backend` parameter in intake summarization [cortex/intake/intake.py:285](cortex/intake/intake.py#L285) + - Was defaulting to PRIMARY (MI50) instead of respecting `INTAKE_LLM=SECONDARY` + - Now correctly uses configured backend (Ollama on 3090) +- **Relay**: Fixed session ID case mismatch [core/relay/server.js:87](core/relay/server.js#L87) + - UI sends `sessionId` (camelCase) but relay expected `session_id` (snake_case) + - Now accepts both variants: `req.body.session_id || req.body.sessionId` + - Custom session IDs now properly tracked instead of defaulting to "default" + +### Added - Error Handling & Diagnostics +- Added comprehensive error handling in LLM router for all providers + - HTTPError, JSONDecodeError, KeyError, and generic Exception handling + - Detailed error messages with exception type and description + - Provider-specific error logging (mi50, ollama, openai) +- Added debug logging in intake summarization + - Logs LLM response length and preview + - Validates non-empty responses before JSON parsing + - Helps diagnose empty or malformed responses + +### Added - Session Management +- Added session persistence endpoints in relay [core/relay/server.js:160-171](core/relay/server.js#L160-L171) + - `GET /sessions/:id` - Retrieve session history + - `POST /sessions/:id` - Save session history + - In-memory storage using Map (ephemeral, resets on container restart) + - Fixes UI "Failed to load session" errors + +### Changed - Provider Configuration +- Added `mi50` provider support for llama.cpp server [cortex/llm/llm_router.py:62-81](cortex/llm/llm_router.py#L62-L81) + - Uses `/completion` endpoint with `n_predict` parameter + - Extracts `content` field from response + - Configured for MI50 GPU with DeepSeek model +- Increased memory retrieval threshold from 0.78 to 0.90 [cortex/.env:20](cortex/.env#L20) + - Filters out low-relevance memories (only returns 90%+ similarity) + - Reduces noise in context retrieval + +### Technical Improvements +- Unified async HTTP handling across all LLM providers +- Better separation of concerns between provider implementations +- Improved error messages for debugging LLM API failures +- Consistent timeout handling (120 seconds for all providers) + +--- + +## [0.5.1] - 2025-12-11 + +### Fixed - Intake Integration +- **Critical**: Fixed `bg_summarize()` function not defined error + - Was only a `TYPE_CHECKING` stub, now implemented as logging stub + - Eliminated `NameError` preventing SESSIONS from persisting correctly + - Function now logs exchange additions and defers summarization to `/reason` endpoint +- **Critical**: Fixed `/ingest` endpoint unreachable code in [router.py:201-233](cortex/router.py#L201-L233) + - Removed early return that prevented `update_last_assistant_message()` from executing + - Removed duplicate `add_exchange_internal()` call + - Implemented lenient error handling (each operation wrapped in try/except) +- **Intake**: Added missing `__init__.py` to make intake a proper Python package [cortex/intake/__init__.py](cortex/intake/__init__.py) + - Prevents namespace package issues + - Enables proper module imports + - Exports `SESSIONS`, `add_exchange_internal`, `summarize_context` + +### Added - Diagnostics & Debugging +- Added diagnostic logging to verify SESSIONS singleton behavior + - Module initialization logs SESSIONS object ID [intake.py:14](cortex/intake/intake.py#L14) + - Each `add_exchange_internal()` call logs object ID and buffer state [intake.py:343-358](cortex/intake/intake.py#L343-L358) +- Added `/debug/sessions` HTTP endpoint [router.py:276-305](cortex/router.py#L276-L305) + - Inspect SESSIONS from within running Uvicorn worker + - Shows total sessions, session count, buffer sizes, recent exchanges + - Returns SESSIONS object ID for verification +- Added `/debug/summary` HTTP endpoint [router.py:238-271](cortex/router.py#L238-L271) + - Test `summarize_context()` for any session + - Returns L1/L5/L10/L20/L30 summaries + - Includes buffer size and exchange preview + +### Changed - Intake Architecture +- **Intake no longer standalone service** - runs inside Cortex container as pure Python module + - Imported as `from intake.intake import add_exchange_internal, SESSIONS` + - No HTTP calls between Cortex and Intake + - Eliminates network latency and dependency on Intake service being up +- **Deferred summarization**: `bg_summarize()` is now a no-op stub [intake.py:318-325](cortex/intake/intake.py#L318-L325) + - Actual summarization happens during `/reason` call via `summarize_context()` + - Simplifies async/sync complexity + - Prevents NameError when called from `add_exchange_internal()` +- **Lenient error handling**: `/ingest` endpoint always returns success [router.py:201-233](cortex/router.py#L201-L233) + - Each operation wrapped in try/except + - Logs errors but never fails to avoid breaking chat pipeline + - User requirement: never fail chat pipeline + +### Documentation +- Added single-worker constraint note in [cortex/Dockerfile:7-8](cortex/Dockerfile#L7-L8) + - Documents that SESSIONS requires single Uvicorn worker + - Notes that multi-worker scaling requires Redis or shared storage +- Updated plan documentation with root cause analysis + +--- + +## [0.5.0] - 2025-11-28 + +### Fixed - Critical API Wiring & Integration + +After the major architectural rewire (v0.4.x), this release fixes all critical endpoint mismatches and ensures end-to-end system connectivity. + +#### Cortex β†’ Intake Integration +- **Fixed** `IntakeClient` to use correct Intake v0.2 API endpoints + - Changed `GET /context/{session_id}` β†’ `GET /summaries?session_id={session_id}` + - Updated JSON response parsing to extract `summary_text` field + - Fixed environment variable name: `INTAKE_API` β†’ `INTAKE_API_URL` + - Corrected default port: `7083` β†’ `7080` + - Added deprecation warning to `summarize_turn()` method (endpoint removed in Intake v0.2) + +#### Relay β†’ UI Compatibility +- **Added** OpenAI-compatible endpoint `POST /v1/chat/completions` + - Accepts standard OpenAI format with `messages[]` array + - Returns OpenAI-compatible response structure with `choices[]` + - Extracts last message content from messages array + - Includes usage metadata (stub values for compatibility) +- **Refactored** Relay to use shared `handleChatRequest()` function + - Both `/chat` and `/v1/chat/completions` use same core logic + - Eliminates code duplication + - Consistent error handling across endpoints + +#### Relay β†’ Intake Connection +- **Fixed** Intake URL fallback in Relay server configuration + - Corrected port: `7082` β†’ `7080` + - Updated endpoint: `/summary` β†’ `/add_exchange` + - Now properly sends exchanges to Intake for summarization + +#### Code Quality & Python Package Structure +- **Added** missing `__init__.py` files to all Cortex subdirectories + - `cortex/llm/__init__.py` + - `cortex/reasoning/__init__.py` + - `cortex/persona/__init__.py` + - `cortex/ingest/__init__.py` + - `cortex/utils/__init__.py` + - Improves package imports and IDE support +- **Removed** unused import in `cortex/router.py`: `from unittest import result` +- **Deleted** empty file `cortex/llm/resolve_llm_url.py` (was 0 bytes, never implemented) + +### Verified Working + +Complete end-to-end message flow now operational: +``` +UI β†’ Relay (/v1/chat/completions) + ↓ +Relay β†’ Cortex (/reason) + ↓ +Cortex β†’ Intake (/summaries) [retrieves context] + ↓ +Cortex 4-stage pipeline: + 1. reflection.py β†’ meta-awareness notes + 2. reasoning.py β†’ draft answer + 3. refine.py β†’ polished answer + 4. persona/speak.py β†’ Lyra personality + ↓ +Cortex β†’ Relay (returns persona response) + ↓ +Relay β†’ Intake (/add_exchange) [async summary] + ↓ +Intake β†’ NeoMem (background memory storage) + ↓ +Relay β†’ UI (final response) +``` + +### Documentation +- **Added** comprehensive v0.5.0 changelog entry +- **Updated** README.md to reflect v0.5.0 architecture + - Documented new endpoints + - Updated data flow diagrams + - Clarified Intake v0.2 changes + - Corrected service descriptions + +### Issues Resolved +- ❌ Cortex could not retrieve context from Intake (wrong endpoint) +- ❌ UI could not send messages to Relay (endpoint mismatch) +- ❌ Relay could not send summaries to Intake (wrong port/endpoint) +- ❌ Python package imports were implicit (missing __init__.py) + +### Known Issues (Non-Critical) +- Session management endpoints not implemented in Relay (`GET/POST /sessions/:id`) +- RAG service currently disabled in docker-compose.yml +- Cortex `/ingest` endpoint is a stub returning `{"status": "ok"}` + +### Migration Notes +If upgrading from v0.4.x: +1. Pull latest changes from git +2. Verify environment variables in `.env` files: + - Check `INTAKE_API_URL=http://intake:7080` (not `INTAKE_API`) + - Verify all service URLs use correct ports +3. Restart Docker containers: `docker-compose down && docker-compose up -d` +4. Test with a simple message through the UI + +--- + +## [Infrastructure v1.0.0] - 2025-11-26 + +### Changed - Environment Variable Consolidation + +**Major reorganization to eliminate duplication and improve maintainability** + +- Consolidated 9 scattered `.env` files into single source of truth architecture +- Root `.env` now contains all shared infrastructure (LLM backends, databases, API keys, service URLs) +- Service-specific `.env` files minimized to only essential overrides: + - `cortex/.env`: Reduced from 42 to 22 lines (operational parameters only) + - `neomem/.env`: Reduced from 26 to 14 lines (LLM naming conventions only) + - `intake/.env`: Kept at 8 lines (already minimal) +- **Result**: ~24% reduction in total configuration lines (197 β†’ ~150) + +**Docker Compose Consolidation** +- All services now defined in single root `docker-compose.yml` +- Relay service updated with complete configuration (env_file, volumes) +- Removed redundant `core/docker-compose.yml` (marked as DEPRECATED) +- Standardized network communication to use Docker container names + +**Service URL Standardization** +- Internal services use container names: `http://neomem-api:7077`, `http://cortex:7081` +- External services use IP addresses: `http://10.0.0.43:8000` (vLLM), `http://10.0.0.3:11434` (Ollama) +- Removed IP/container name inconsistencies across files + +### Added - Security & Documentation + +**Security Templates** - Created `.env.example` files for all services +- Root `.env.example` with sanitized credentials +- Service-specific templates: `cortex/.env.example`, `neomem/.env.example`, `intake/.env.example`, `rag/.env.example` +- All `.env.example` files safe to commit to version control + +**Documentation** +- `ENVIRONMENT_VARIABLES.md`: Comprehensive reference for all environment variables + - Variable descriptions, defaults, and usage examples + - Multi-backend LLM strategy documentation + - Troubleshooting guide + - Security best practices +- `DEPRECATED_FILES.md`: Deletion guide for deprecated files with verification steps + +**Enhanced .gitignore** +- Ignores all `.env` files (including subdirectories) +- Tracks `.env.example` templates for documentation +- Ignores `.env-backups/` directory + +### Removed +- `core/.env` - Redundant with root `.env`, now deleted +- `core/docker-compose.yml` - Consolidated into main compose file (marked DEPRECATED) ### Fixed -- Relay startup no longer crashes when NVGRAM is unavailable β€” deferred connection handling. -- `/memories` POST failures no longer crash Relay; now logged gracefully as `relay error Error: memAdd failed: 500`. -- Improved injected prompt debugging (`DEBUG_PROMPT=true` now prints clean JSON). +- Eliminated duplicate `OPENAI_API_KEY` across 5+ files +- Eliminated duplicate LLM backend URLs across 4+ files +- Eliminated duplicate database credentials across 3+ files +- Resolved Cortex `environment:` section override in docker-compose (now uses env_file) -### Goals / Next Steps -- Add salience visualization (e.g., memory weights displayed in injected system message). -- Begin schema alignment with NVGRAM v0.1.2 for confidence scoring. -- Add relay auto-retry for transient 500 responses from NVGRAM. +### Architecture - Multi-Backend LLM Strategy + +Root `.env` provides all backend OPTIONS (PRIMARY, SECONDARY, CLOUD, FALLBACK), services choose which to USE: +- **Cortex** β†’ vLLM (PRIMARY) for autonomous reasoning +- **NeoMem** β†’ Ollama (SECONDARY) + OpenAI embeddings +- **Intake** β†’ vLLM (PRIMARY) for summarization +- **Relay** β†’ Fallback chain with user preference + +Preserves per-service flexibility while eliminating URL duplication. + +### Migration +- All original `.env` files backed up to `.env-backups/` with timestamp `20251126_025334` +- Rollback plan documented in `ENVIRONMENT_VARIABLES.md` +- Verification steps provided in `DEPRECATED_FILES.md` --- -## [Lyra-Core] v0.3.1 - 2025-09-27 -### Changed -- Removed salience filter logic; Cortex is now the default annotator. -- All user messages stored in Mem0; no discard tier applied. +## [0.4.x] - 2025-11-13 + +### Added - Multi-Stage Reasoning Pipeline + +**Cortex v0.5 - Complete architectural overhaul** + +- **New `reasoning.py` module** + - Async reasoning engine + - Accepts user prompt, identity, RAG block, and reflection notes + - Produces draft internal answers + - Uses primary backend (vLLM) + +- **New `reflection.py` module** + - Fully async meta-awareness layer + - Produces actionable JSON "internal notes" + - Enforces strict JSON schema and fallback parsing + - Forces cloud backend (`backend_override="cloud"`) + +- **Integrated `refine.py` into pipeline** + - New stage between reflection and persona + - Runs exclusively on primary vLLM backend (MI50) + - Produces final, internally consistent output for downstream persona layer + +- **Backend override system** + - Each LLM call can now select its own backend + - Enables multi-LLM cognition: Reflection β†’ cloud, Reasoning β†’ primary + +- **Identity loader** + - Added `identity.py` with `load_identity()` for consistent persona retrieval + +- **Ingest handler** + - Async stub created for future Intake β†’ NeoMem β†’ RAG pipeline + +**Cortex v0.4.1 - RAG Integration** + +- **RAG integration** + - Added `rag.py` with `query_rag()` and `format_rag_block()` + - Cortex now queries local RAG API (`http://10.0.0.41:7090/rag/search`) + - Synthesized answers and top excerpts injected into reasoning prompt + +### Changed - Unified LLM Architecture + +**Cortex v0.5** + +- **Unified LLM backend URL handling across Cortex** + - ENV variables must now contain FULL API endpoints + - Removed all internal path-appending (e.g. `.../v1/completions`) + - `llm_router.py` rewritten to use env-provided URLs as-is + - Ensures consistent behavior between draft, reflection, refine, and persona + +- **Rebuilt `main.py`** + - Removed old annotation/analysis logic + - New structure: load identity β†’ get RAG β†’ reflect β†’ reason β†’ return draft+notes + - Routes now clean and minimal (`/reason`, `/ingest`, `/health`) + - Async path throughout Cortex + +- **Refactored `llm_router.py`** + - Removed old fallback logic during overrides + - OpenAI requests now use `/v1/chat/completions` + - Added proper OpenAI Authorization headers + - Distinct payload format for vLLM vs OpenAI + - Unified, correct parsing across models + +- **Simplified Cortex architecture** + - Removed deprecated "context.py" and old reasoning code + - Relay completely decoupled from smart behavior + +- **Updated environment specification** + - `LLM_PRIMARY_URL` now set to `http://10.0.0.43:8000/v1/completions` + - `LLM_SECONDARY_URL` remains `http://10.0.0.3:11434/api/generate` (Ollama) + - `LLM_CLOUD_URL` set to `https://api.openai.com/v1/chat/completions` + +**Cortex v0.4.1** + +- **Revised `/reason` endpoint** + - Now builds unified context blocks: [Intake] β†’ recent summaries, [RAG] β†’ contextual knowledge, [User Message] β†’ current input + - Calls `call_llm()` for first pass, then `reflection_loop()` for meta-evaluation + - Returns `cortex_prompt`, `draft_output`, `final_output`, and normalized reflection + +- **Reflection Pipeline Stability** + - Cleaned parsing to normalize JSON vs. text reflections + - Added fallback handling for malformed or non-JSON outputs + - Log system improved to show raw JSON, extracted fields, and normalized summary + +- **Async Summarization (Intake v0.2.1)** + - Intake summaries now run in background threads to avoid blocking Cortex + - Summaries (L1–L∞) logged asynchronously with [BG] tags + +- **Environment & Networking Fixes** + - Verified `.env` variables propagate correctly inside Cortex container + - Confirmed Docker network connectivity between Cortex, Intake, NeoMem, and RAG + - Adjusted localhost calls to service-IP mapping + +- **Behavioral Updates** + - Cortex now performs conversation reflection (on user intent) and self-reflection (on its own answers) + - RAG context successfully grounds reasoning outputs + - Intake and NeoMem confirmed receiving summaries via `/add_exchange` + - Log clarity pass: all reflective and contextual blocks clearly labeled + +### Fixed + +**Cortex v0.5** + +- Resolved endpoint conflict where router expected base URLs and refine expected full URLs + - Fixed by standardizing full-URL behavior across entire system +- Reflection layer no longer fails silently (previously returned `[""]` due to MythoMax) +- Resolved 404/401 errors caused by incorrect OpenAI URL endpoints +- No more double-routing through vLLM during reflection +- Corrected async/sync mismatch in multiple locations +- Eliminated double-path bug (`/v1/completions/v1/completions`) caused by previous router logic + +### Removed + +**Cortex v0.5** + +- Legacy `annotate`, `reason_check` glue logic from old architecture +- Old backend probing junk code +- Stale imports and unused modules leftover from previous prototype + +### Verified + +**Cortex v0.5** + +- Cortex β†’ vLLM (MI50) β†’ refine β†’ final_output now functioning correctly +- Refine shows `used_primary_backend: true` and no fallback +- Manual curl test confirms endpoint accuracy + +### Known Issues + +**Cortex v0.5** + +- Refine sometimes prefixes output with `"Final Answer:"`; next version will sanitize this +- Hallucinations in draft_output persist due to weak grounding (fix in reasoning + RAG planned) + +**Cortex v0.4.1** + +- NeoMem tuning needed - improve retrieval latency and relevance +- Need dedicated `/reflections/recent` endpoint for Cortex +- Migrate to Cortex-first ingestion (Relay β†’ Cortex β†’ NeoMem) +- Add persistent reflection recall (use prior reflections as meta-context) +- Improve reflection JSON structure ("insight", "evaluation", "next_action" β†’ guaranteed fields) +- Tighten temperature and prompt control for factual consistency +- RAG optimization: add source ranking, filtering, multi-vector hybrid search +- Cache RAG responses per session to reduce duplicate calls + +### Notes + +**Cortex v0.5** + +This is the largest structural change to Cortex so far. It establishes: +- Multi-model cognition +- Clean layering +- Identity + reflection separation +- Correct async code +- Deterministic backend routing +- Predictable JSON reflection + +The system is now ready for: +- Refinement loops +- Persona-speaking layer +- Containerized RAG +- Long-term memory integration +- True emergent-behavior experiments + +--- + +## [0.3.x] - 2025-10-28 to 2025-09-26 ### Added -- Cortex annotations (`metadata.cortex`) now attached to memories. -- Debug logging improvements: + +**[Lyra Core v0.3.2 + Web UI v0.2.0] - 2025-10-28** + +- **New UI** + - Cleaned up UI look and feel + +- **Sessions** + - Sessions now persist over time + - Ability to create new sessions or load sessions from previous instance + - When changing session, updates what the prompt sends to relay (doesn't prompt with messages from other sessions) + - Relay correctly wired in + +**[Lyra-Core 0.3.1] - 2025-10-09** + +- **NVGRAM Integration (Full Pipeline Reconnected)** + - Replaced legacy Mem0 service with NVGRAM microservice (`nvgram-api` @ port 7077) + - Updated `server.js` in Relay to route all memory ops via `${NVGRAM_API}/memories` and `/search` + - Added `.env` variable: `NVGRAM_API=http://nvgram-api:7077` + - Verified end-to-end Lyra conversation persistence: `relay β†’ nvgram-api β†’ postgres/neo4j β†’ relay β†’ ollama β†’ ui` + - βœ… Memories stored, retrieved, and re-injected successfully + +**[Lyra-Core v0.3.0] - 2025-09-26** + +- **Salience filtering** in Relay + - `.env` configurable: `SALIENCE_ENABLED`, `SALIENCE_MODE`, `SALIENCE_MODEL`, `SALIENCE_API_URL` + - Supports `heuristic` and `llm` classification modes + - LLM-based salience filter integrated with Cortex VM running `llama-server` +- Logging improvements + - Added debug logs for salience mode, raw LLM output, and unexpected outputs + - Fail-closed behavior for unexpected LLM responses +- Successfully tested with **Phi-3.5-mini** and **Qwen2-0.5B-Instruct** as salience classifiers +- Verified end-to-end flow: Relay β†’ salience filter β†’ Mem0 add/search β†’ Persona injection β†’ LLM reply + +**[Cortex v0.3.0] - 2025-10-31** + +- **Cortex Service (FastAPI)** + - New standalone reasoning engine (`cortex/main.py`) with endpoints: + - `GET /health` – reports active backend + NeoMem status + - `POST /reason` – evaluates `{prompt, response}` pairs + - `POST /annotate` – experimental text analysis + - Background NeoMem health monitor (5-minute interval) + +- **Multi-Backend Reasoning Support** + - Environment-driven backend selection via `LLM_FORCE_BACKEND` + - Supports: Primary (vLLM MI50), Secondary (Ollama 3090), Cloud (OpenAI), Fallback (llama.cpp CPU) + - Per-backend model variables: `LLM_PRIMARY_MODEL`, `LLM_SECONDARY_MODEL`, `LLM_CLOUD_MODEL`, `LLM_FALLBACK_MODEL` + +- **Response Normalization Layer** + - Implemented `normalize_llm_response()` to merge streamed outputs and repair malformed JSON + - Handles Ollama's multi-line streaming and Mythomax's missing punctuation issues + - Prints concise debug previews of merged content + +- **Environment Simplification** + - Each service (`intake`, `cortex`, `neomem`) now maintains its own `.env` file + - Removed reliance on shared/global env file to prevent cross-contamination + - Verified Docker Compose networking across containers + +**[NeoMem 0.1.2] - 2025-10-27** (formerly NVGRAM) + +- **Renamed NVGRAM to NeoMem** + - All future updates under name NeoMem + - Features unchanged + +**[NVGRAM 0.1.1] - 2025-10-08** + +- **Async Memory Rewrite (Stability + Safety Patch)** + - Introduced `AsyncMemory` class with fully asynchronous vector and graph store writes + - Added input sanitation to prevent embedding errors (`'list' object has no attribute 'replace'`) + - Implemented `flatten_messages()` helper in API layer to clean malformed payloads + - Added structured request logging via `RequestLoggingMiddleware` (FastAPI middleware) + - Health endpoint (`/health`) returns structured JSON `{status, version, service}` + - Startup logs include sanitized embedder config with masked API keys + +**[NVGRAM 0.1.0] - 2025-10-07** + +- **Initial fork of Mem0 β†’ NVGRAM** + - Created fully independent local-first memory engine based on Mem0 OSS + - Renamed all internal modules, Docker services, environment variables from `mem0` β†’ `nvgram` + - New service name: `nvgram-api`, default port 7077 + - Maintains same API endpoints (`/memories`, `/search`) for drop-in compatibility + - Uses FastAPI, Postgres, and Neo4j as persistent backends + +**[Lyra-Mem0 0.3.2] - 2025-10-05** + +- **Ollama LLM reasoning** alongside OpenAI embeddings + - Introduced `LLM_PROVIDER=ollama`, `LLM_MODEL`, and `OLLAMA_HOST` in `.env.3090` + - Verified local 3090 setup using `qwen2.5:7b-instruct-q4_K_M` + - Split processing: Embeddings β†’ OpenAI `text-embedding-3-small`, LLM β†’ Local Ollama +- Added `.env.3090` template for self-hosted inference nodes +- Integrated runtime diagnostics and seeder progress tracking + - File-level + message-level progress bars + - Retry/back-off logic for timeouts (3 attempts) + - Event logging (`ADD / UPDATE / NONE`) for every memory record +- Expanded Docker health checks for Postgres, Qdrant, and Neo4j containers +- Added GPU-friendly long-run configuration for continuous seeding (validated on RTX 3090) + +**[Lyra-Mem0 0.3.1] - 2025-10-03** + +- HuggingFace TEI integration (local 3090 embedder) +- Dual-mode environment switch between OpenAI cloud and local +- CSV export of memories from Postgres (`payload->>'data'`) + +**[Lyra-Mem0 0.3.0]** + +- **Ollama embeddings** in Mem0 OSS container + - Configure `EMBEDDER_PROVIDER=ollama`, `EMBEDDER_MODEL`, `OLLAMA_HOST` via `.env` + - Mounted `main.py` override from host into container to load custom `DEFAULT_CONFIG` + - Installed `ollama` Python client into custom API container image +- `.env.3090` file for external embedding mode (3090 machine) +- Workflow for multiple embedding modes: LAN-based 3090/Ollama, Local-only CPU, OpenAI fallback + +**[Lyra-Mem0 v0.2.1]** + +- **Seeding pipeline** + - Built Python seeder script to bulk-insert raw Cloud Lyra exports into Mem0 + - Implemented incremental seeding option (skip existing memories, only add new ones) + - Verified insert process with Postgres-backed history DB + +**[Intake v0.1.0] - 2025-10-27** + +- Receives messages from relay and summarizes them in cascading format +- Continues to summarize smaller amounts of exchanges while generating large-scale conversational summaries (L20) +- Currently logs summaries to .log file in `/project-lyra/intake-logs/` + +**[Lyra-Cortex v0.2.0] - 2025-09-26** + +- Integrated **llama-server** on dedicated Cortex VM (Proxmox) +- Verified Phi-3.5-mini-instruct_Uncensored-Q4_K_M running with 8 vCPUs +- Benchmarked Phi-3.5-mini performance: ~18 tokens/sec CPU-only on Ryzen 7 7800X +- Salience classification functional but sometimes inconsistent +- Tested **Qwen2-0.5B-Instruct GGUF** as alternative salience classifier + - Much faster throughput (~350 tokens/sec prompt, ~100 tokens/sec eval) + - More responsive but over-classifies messages as "salient" +- Established `.env` integration for model ID (`SALIENCE_MODEL`), enabling hot-swap between models + +### Changed + +**[Lyra-Core 0.3.1] - 2025-10-09** + +- Renamed `MEM0_URL` β†’ `NVGRAM_API` across all relay environment configs +- Updated Docker Compose service dependency order + - `relay` now depends on `nvgram-api` healthcheck + - Removed `mem0` references and volumes +- Minor cleanup to Persona fetch block (null-checks and safer default persona string) + +**[Lyra-Core v0.3.1] - 2025-09-27** + +- Removed salience filter logic; Cortex is now default annotator +- All user messages stored in Mem0; no discard tier applied +- Cortex annotations (`metadata.cortex`) now attached to memories +- Debug logging improvements - Pretty-print Cortex annotations - Injected prompt preview - Memory search hit list with scores -- `.env` toggle (`CORTEX_ENABLED`) to bypass Cortex when needed. +- `.env` toggle (`CORTEX_ENABLED`) to bypass Cortex when needed + +**[Lyra-Core v0.3.0] - 2025-09-26** + +- Refactored `server.js` to gate `mem.add()` calls behind salience filter +- Updated `.env` to support `SALIENCE_MODEL` + +**[Cortex v0.3.0] - 2025-10-31** + +- Refactored `reason_check()` to dynamically switch between **prompt** and **chat** mode depending on backend +- Enhanced startup logs to announce active backend, model, URL, and mode +- Improved error handling with clearer "Reasoning error" messages + +**[NVGRAM 0.1.1] - 2025-10-08** + +- Replaced synchronous `Memory.add()` with async-safe version supporting concurrent vector + graph writes +- Normalized indentation and cleaned duplicate `main.py` references +- Removed redundant `FastAPI()` app reinitialization +- Updated internal logging to INFO-level timing format +- Deprecated `@app.on_event("startup")` β†’ will migrate to `lifespan` handler in v0.1.2 + +**[NVGRAM 0.1.0] - 2025-10-07** + +- Removed dependency on external `mem0ai` SDK β€” all logic now local +- Re-pinned requirements: fastapi==0.115.8, uvicorn==0.34.0, pydantic==2.10.4, python-dotenv==1.0.1, psycopg>=3.2.8, ollama +- Adjusted `docker-compose` and `.env` templates to use new NVGRAM naming + +**[Lyra-Mem0 0.3.2] - 2025-10-05** + +- Updated `main.py` configuration block to load `LLM_PROVIDER`, `LLM_MODEL`, `OLLAMA_BASE_URL` + - Fallback to OpenAI if Ollama unavailable +- Adjusted `docker-compose.yml` mount paths to correctly map `/app/main.py` +- Normalized `.env` loading so `mem0-api` and host environment share identical values +- Improved seeder logging and progress telemetry +- Added explicit `temperature` field to `DEFAULT_CONFIG['llm']['config']` + +**[Lyra-Mem0 0.3.0]** + +- `docker-compose.yml` updated to mount local `main.py` and `.env.3090` +- Built custom Dockerfile (`mem0-api-server:latest`) extending base image with `pip install ollama` +- Updated `requirements.txt` to include `ollama` package +- Adjusted Mem0 container config so `main.py` pulls environment variables with `dotenv` +- Tested new embeddings path with curl `/memories` API call + +**[Lyra-Mem0 v0.2.1]** + +- Updated `main.py` to load configuration from `.env` using `dotenv` and support multiple embedder backends +- Mounted host `main.py` into container so local edits persist across rebuilds +- Updated `docker-compose.yml` to mount `.env.3090` and support swap between profiles +- Built custom Dockerfile (`mem0-api-server:latest`) including `pip install ollama` +- Updated `requirements.txt` with `ollama` dependency +- Adjusted startup flow so container automatically connects to external Ollama host (LAN IP) +- Added logging to confirm model pulls and embedding requests ### Fixed -- Parsing failures from Markdown-wrapped Cortex JSON via fence cleaner. -- Relay no longer β€œhangs” on malformed Cortex outputs. ---- +**[Lyra-Core 0.3.1] - 2025-10-09** -### [Lyra-Core] v0.3.0 β€” 2025-09-26 -#### Added -- Implemented **salience filtering** in Relay: - - `.env` configurable: `SALIENCE_ENABLED`, `SALIENCE_MODE`, `SALIENCE_MODEL`, `SALIENCE_API_URL`. - - Supports `heuristic` and `llm` classification modes. - - LLM-based salience filter integrated with Cortex VM running `llama-server`. -- Logging improvements: - - Added debug logs for salience mode, raw LLM output, and unexpected outputs. - - Fail-closed behavior for unexpected LLM responses. -- Successfully tested with **Phi-3.5-mini** and **Qwen2-0.5B-Instruct** as salience classifiers. -- Verified end-to-end flow: Relay β†’ salience filter β†’ Mem0 add/search β†’ Persona injection β†’ LLM reply. +- Relay startup no longer crashes when NVGRAM is unavailable β€” deferred connection handling +- `/memories` POST failures no longer crash Relay; now logged gracefully as `relay error Error: memAdd failed: 500` +- Improved injected prompt debugging (`DEBUG_PROMPT=true` now prints clean JSON) -#### Changed -- Refactored `server.js` to gate `mem.add()` calls behind salience filter. -- Updated `.env` to support `SALIENCE_MODEL`. +**[Lyra-Core v0.3.1] - 2025-09-27** -#### Known Issues -- Small models (e.g. Qwen2-0.5B) tend to over-classify as "salient". -- Phi-3.5-mini sometimes returns truncated tokens ("sali", "fi"). -- CPU-only inference is functional but limited; larger models recommended once GPU is available. +- Parsing failures from Markdown-wrapped Cortex JSON via fence cleaner +- Relay no longer "hangs" on malformed Cortex outputs ---- +**[Cortex v0.3.0] - 2025-10-31** -### [Lyra-Core] v0.2.0 β€” 2025-09-24 -#### Added -- Migrated Relay to use `mem0ai` SDK instead of raw fetch calls. -- Implemented `sessionId` support (client-supplied, fallback to `default`). -- Added debug logs for memory add/search. -- Cleaned up Relay structure for clarity. +- Corrected broken vLLM endpoint routing (`/v1/completions`) +- Stabilized cross-container health reporting for NeoMem +- Resolved JSON parse failures caused by streaming chunk delimiters ---- +**[NVGRAM 0.1.1] - 2025-10-08** -### [Lyra-Core] v0.1.0 β€” 2025-09-23 -#### Added -- First working MVP of **Lyra Core Relay**. -- Relay service accepts `POST /v1/chat/completions` (OpenAI-compatible). -- Memory integration with Mem0: - - `POST /memories` on each user message. - - `POST /search` before LLM call. -- Persona Sidecar integration (`GET /current`). -- OpenAI GPT + Ollama (Mythomax) support in Relay. -- Simple browser-based chat UI (talks to Relay at `http://:7078`). -- `.env` standardization for Relay + Mem0 + Postgres + Neo4j. -- Working Neo4j + Postgres backing stores for Mem0. -- Initial MVP relay service with raw fetch calls to Mem0. -- Dockerized with basic healthcheck. +- Eliminated repeating 500 error from OpenAI embedder caused by non-string message content +- Masked API key leaks from boot logs +- Ensured Neo4j reconnects gracefully on first retry -#### Fixed -- Resolved crash loop in Neo4j by restricting env vars (`NEO4J_AUTH` only). -- Relay now correctly reads `MEM0_URL` and `MEM0_API_KEY` from `.env`. +**[Lyra-Mem0 0.3.2] - 2025-10-05** -#### Known Issues -- No feedback loop (thumbs up/down) yet. -- Forget/delete flow is manual (via memory IDs). -- Memory latency ~1–4s depending on embedding model. +- Resolved crash during startup: `TypeError: OpenAIConfig.__init__() got an unexpected keyword argument 'ollama_base_url'` +- Corrected mount type mismatch (file vs directory) causing `OCI runtime create failed` errors +- Prevented duplicate or partial postings when retry logic triggered multiple concurrent requests +- "Unknown event" warnings now safely ignored (no longer break seeding loop) +- Confirmed full dual-provider operation in logs (`api.openai.com` + `10.0.0.3:11434/api/chat`) ---- +**[Lyra-Mem0 0.3.1] - 2025-10-03** -## 🧩 lyra-neomem (used to be NVGRAM / Lyra-Mem0) ############################################################################## +- `.env` CRLF vs LF line ending issues +- Local seeding now possible via HuggingFace server -## [NeoMem 0.1.2] - 2025-10-27 -### Changed -- **Renamed NVGRAM to neomem** - - All future updates will be under the name NeoMem. - - Features have not changed. +**[Lyra-Mem0 0.3.0]** -## [NVGRAM 0.1.1] - 2025-10-08 -### Added -- **Async Memory Rewrite (Stability + Safety Patch)** - - Introduced `AsyncMemory` class with fully asynchronous vector and graph store writes. - - Added **input sanitation** to prevent embedding errors (`'list' object has no attribute 'replace'`). - - Implemented `flatten_messages()` helper in API layer to clean malformed payloads. - - Added structured request logging via `RequestLoggingMiddleware` (FastAPI middleware). - - Health endpoint (`/health`) now returns structured JSON `{status, version, service}`. - - Startup logs now include **sanitized embedder config** with API keys masked for safety: - ``` - >>> Embedder config (sanitized): {'provider': 'openai', 'config': {'model': 'text-embedding-3-small', 'api_key': '***'}} - βœ… Connected to Neo4j on attempt 1 - 🧠 NVGRAM v0.1.1 β€” Neural Vectorized Graph Recall and Memory initialized - ``` +- Resolved container boot failure caused by missing `ollama` dependency (`ModuleNotFoundError`) +- Fixed config overwrite issue where rebuilding container restored stock `main.py` +- Worked around Neo4j error (`vector.similarity.cosine(): mismatched vector dimensions`) by confirming OpenAI vs. Ollama embedding vector sizes -### Changed -- Replaced synchronous `Memory.add()` with async-safe version supporting concurrent vector + graph writes. -- Normalized indentation and cleaned duplicate `main.py` references under `/nvgram/` vs `/nvgram/server/`. -- Removed redundant `FastAPI()` app reinitialization. -- Updated internal logging to INFO-level timing format: - 2025-10-08 21:48:45 [INFO] POST /memories -> 200 (11189.1 ms) -- Deprecated `@app.on_event("startup")` (FastAPI deprecation warning) β†’ will migrate to `lifespan` handler in v0.1.2. +**[Lyra-Mem0 v0.2.1]** -### Fixed -- Eliminated repeating 500 error from OpenAI embedder caused by non-string message content. -- Masked API key leaks from boot logs. -- Ensured Neo4j reconnects gracefully on first retry. - -### Goals / Next Steps -- Integrate **salience scoring** and **embedding confidence weight** fields in Postgres schema. -- Begin testing with full Lyra Relay + Persona Sidecar pipeline for live session memory recall. -- Migrate from deprecated `on_event` β†’ `lifespan` pattern in 0.1.2. - ---- - -## [NVGRAM 0.1.0] - 2025-10-07 -### Added -- **Initial fork of Mem0 β†’ NVGRAM**: - - Created a fully independent local-first memory engine based on Mem0 OSS. - - Renamed all internal modules, Docker services, and environment variables from `mem0` β†’ `nvgram`. - - New service name: **`nvgram-api`**, default port **7077**. - - Maintains same API endpoints (`/memories`, `/search`) for drop-in compatibility with Lyra Core. - - Uses **FastAPI**, **Postgres**, and **Neo4j** as persistent backends. - - Verified clean startup: - ``` - βœ… Connected to Neo4j on attempt 1 - INFO: Uvicorn running on http://0.0.0.0:7077 - ``` - - `/docs` and `/openapi.json` confirmed reachable and functional. - -### Changed -- Removed dependency on the external `mem0ai` SDK β€” all logic now local. -- Re-pinned requirements: - - fastapi==0.115.8 - - uvicorn==0.34.0 - - pydantic==2.10.4 - - python-dotenv==1.0.1 - - psycopg>=3.2.8 - - ollama -- Adjusted `docker-compose` and `.env` templates to use new NVGRAM naming and image paths. - -### Goals / Next Steps -- Integrate NVGRAM as the new default backend in Lyra Relay. -- Deprecate remaining Mem0 references and archive old configs. -- Begin versioning as a standalone project (`nvgram-core`, `nvgram-api`, etc.). - ---- - -## [Lyra-Mem0 0.3.2] - 2025-10-05 -### Added -- Support for **Ollama LLM reasoning** alongside OpenAI embeddings: - - Introduced `LLM_PROVIDER=ollama`, `LLM_MODEL`, and `OLLAMA_HOST` in `.env.3090`. - - Verified local 3090 setup using `qwen2.5:7b-instruct-q4_K_M`. - - Split processing pipeline: - - Embeddings β†’ OpenAI `text-embedding-3-small` - - LLM β†’ Local Ollama (`http://10.0.0.3:11434/api/chat`). -- Added `.env.3090` template for self-hosted inference nodes. -- Integrated runtime diagnostics and seeder progress tracking: - - File-level + message-level progress bars. - - Retry/back-off logic for timeouts (3 attempts). - - Event logging (`ADD / UPDATE / NONE`) for every memory record. -- Expanded Docker health checks for Postgres, Qdrant, and Neo4j containers. -- Added GPU-friendly long-run configuration for continuous seeding (validated on RTX 3090). - -### Changed -- Updated `main.py` configuration block to load: - - `LLM_PROVIDER`, `LLM_MODEL`, and `OLLAMA_BASE_URL`. - - Fallback to OpenAI if Ollama unavailable. -- Adjusted `docker-compose.yml` mount paths to correctly map `/app/main.py`. -- Normalized `.env` loading so `mem0-api` and host environment share identical values. -- Improved seeder logging and progress telemetry for clearer diagnostics. -- Added explicit `temperature` field to `DEFAULT_CONFIG['llm']['config']` for tuning future local inference runs. - -### Fixed -- Resolved crash during startup: - `TypeError: OpenAIConfig.__init__() got an unexpected keyword argument 'ollama_base_url'`. -- Corrected mount type mismatch (file vs directory) causing `OCI runtime create failed` errors. -- Prevented duplicate or partial postings when retry logic triggered multiple concurrent requests. -- β€œUnknown event” warnings now safely ignored (no longer break seeding loop). -- Confirmed full dual-provider operation in logs (`api.openai.com` + `10.0.0.3:11434/api/chat`). - -### Observations -- Stable GPU utilization: ~8 GB VRAM @ 92 % load, β‰ˆ 67 Β°C under sustained seeding. -- Next revision will re-format seed JSON to preserve `role` context (user vs assistant). - ---- - -## [Lyra-Mem0 0.3.1] - 2025-10-03 -### Added -- HuggingFace TEI integration (local 3090 embedder). -- Dual-mode environment switch between OpenAI cloud and local. -- CSV export of memories from Postgres (`payload->>'data'`). - -### Fixed -- `.env` CRLF vs LF line ending issues. -- Local seeding now possible via huggingface server running - ---- - -## [Lyra-mem0 0.3.0] -### Added -- Support for **Ollama embeddings** in Mem0 OSS container: - - Added ability to configure `EMBEDDER_PROVIDER=ollama` and set `EMBEDDER_MODEL` + `OLLAMA_HOST` via `.env`. - - Mounted `main.py` override from host into container to load custom `DEFAULT_CONFIG`. - - Installed `ollama` Python client into custom API container image. -- `.env.3090` file created for external embedding mode (3090 machine): - - EMBEDDER_PROVIDER=ollama - - EMBEDDER_MODEL=mxbai-embed-large - - OLLAMA_HOST=http://10.0.0.3:11434 -- Workflow to support **multiple embedding modes**: - 1. Fast LAN-based 3090/Ollama embeddings - 2. Local-only CPU embeddings (Lyra Cortex VM) - 3. OpenAI fallback embeddings - -### Changed -- `docker-compose.yml` updated to mount local `main.py` and `.env.3090`. -- Built **custom Dockerfile** (`mem0-api-server:latest`) extending base image with `pip install ollama`. -- Updated `requirements.txt` to include `ollama` package. -- Adjusted Mem0 container config so `main.py` pulls environment variables with `dotenv` (`load_dotenv()`). -- Tested new embeddings path with curl `/memories` API call. - -### Fixed -- Resolved container boot failure caused by missing `ollama` dependency (`ModuleNotFoundError`). -- Fixed config overwrite issue where rebuilding container restored stock `main.py`. -- Worked around Neo4j error (`vector.similarity.cosine(): mismatched vector dimensions`) by confirming OpenAI vs. Ollama embedding vector sizes and planning to standardize at 1536-dim. - --- - -## [Lyra-mem0 v0.2.1] - -### Added -- **Seeding pipeline**: - - Built Python seeder script to bulk-insert raw Cloud Lyra exports into Mem0. - - Implemented incremental seeding option (skip existing memories, only add new ones). - - Verified insert process with Postgres-backed history DB and curl `/memories/search` sanity check. -- **Ollama embedding support** in Mem0 OSS container: - - Added configuration for `EMBEDDER_PROVIDER=ollama`, `EMBEDDER_MODEL`, and `OLLAMA_HOST` via `.env`. - - Created `.env.3090` profile for LAN-connected 3090 machine with Ollama. - - Set up three embedding modes: - 1. Fast LAN-based 3090/Ollama - 2. Local-only CPU model (Lyra Cortex VM) - 3. OpenAI fallback - -### Changed -- Updated `main.py` to load configuration from `.env` using `dotenv` and support multiple embedder backends. -- Mounted host `main.py` into container so local edits persist across rebuilds. -- Updated `docker-compose.yml` to mount `.env.3090` and support swap between profiles. -- Built **custom Dockerfile** (`mem0-api-server:latest`) including `pip install ollama`. -- Updated `requirements.txt` with `ollama` dependency. -- Adjusted startup flow so container automatically connects to external Ollama host (LAN IP). -- Added logging to confirm model pulls and embedding requests. - -### Fixed -- Seeder process originally failed on old memories β€” now skips duplicates and continues batch. -- Resolved container boot error (`ModuleNotFoundError: ollama`) by extending image. -- Fixed overwrite issue where stock `main.py` replaced custom config during rebuild. -- Worked around Neo4j `vector.similarity.cosine()` dimension mismatch by investigating OpenAI (1536-dim) vs Ollama (1024-dim) schemas. - -### Notes -- To fully unify embedding modes, a Hugging Face / local model with **1536-dim embeddings** will be needed (to match OpenAI’s schema and avoid Neo4j errors). -- Current Ollama model (`mxbai-embed-large`) works, but returns 1024-dim vectors. -- Seeder workflow validated but should be wrapped in a repeatable weekly run for full Cloudβ†’Local sync. - ---- - -## [Lyra-Mem0 v0.2.0] - 2025-09-30 -### Added -- Standalone **Lyra-Mem0** stack created at `~/lyra-mem0/` - - Includes **Postgres (pgvector)**, **Qdrant**, **Neo4j**, and **SQLite** for history tracking. - - Added working `docker-compose.mem0.yml` and custom `Dockerfile` for building the Mem0 API server. -- Verified REST API functionality: - - `POST /memories` works for adding memories. - - `POST /search` works for semantic search. -- Successful end-to-end test with persisted memory: - *"Likes coffee in the morning"* β†’ retrievable via search. βœ… - -### Changed -- Split architecture into **modular stacks**: - - `~/lyra-core` (Relay, Persona-Sidecar, etc.) - - `~/lyra-mem0` (Mem0 OSS memory stack) -- Removed old embedded mem0 containers from the Lyra-Core compose file. -- Added Lyra-Mem0 section in README.md. - -### Next Steps -- Wire **Relay β†’ Mem0 API** (integration not yet complete). -- Add integration tests to verify persistence and retrieval from within Lyra-Core. - ---- - -## 🧠 Lyra-Cortex ############################################################################## - -## [ Cortex - v0.5] -2025-11-13 - -### Added -- **New `reasoning.py` module** - - Async reasoning engine. - - Accepts user prompt, identity, RAG block, and reflection notes. - - Produces draft internal answers. - - Uses primary backend (vLLM). -- **New `reflection.py` module** - - Fully async. - - Produces actionable JSON β€œinternal notes.” - - Enforces strict JSON schema and fallback parsing. - - Forces cloud backend (`backend_override="cloud"`). -- Integrated `refine.py` into Cortex reasoning pipeline: - - New stage between reflection and persona. - - Runs exclusively on primary vLLM backend (MI50). - - Produces final, internally consistent output for downstream persona layer. -- **Backend override system** - - Each LLM call can now select its own backend. - - Enables multi-LLM cognition: Reflection β†’ cloud, Reasoning β†’ primary. - -- **identity loader** - - Added `identity.py` with `load_identity()` for consistent persona retrieval. - -- **ingest_handler** - - Async stub created for future Intake β†’ NeoMem β†’ RAG pipeline. - -### Changed -- Unified LLM backend URL handling across Cortex: - - ENV variables must now contain FULL API endpoints. - - Removed all internal path-appending (e.g. `.../v1/completions`). - - `llm_router.py` rewritten to use env-provided URLs as-is. - - Ensures consistent behavior between draft, reflection, refine, and persona. -- **Rebuilt `main.py`** - - Removed old annotation/analysis logic. - - New structure: load identity β†’ get RAG β†’ reflect β†’ reason β†’ return draft+notes. - - Routes now clean and minimal (`/reason`, `/ingest`, `/health`). - - Async path throughout Cortex. - -- **Refactored `llm_router.py`** - - Removed old fallback logic during overrides. - - OpenAI requests now use `/v1/chat/completions`. - - Added proper OpenAI Authorization headers. - - Distinct payload format for vLLM vs OpenAI. - - Unified, correct parsing across models. - -- **Simplified Cortex architecture** - - Removed deprecated β€œcontext.py” and old reasoning code. - - Relay completely decoupled from smart behavior. - -- Updated environment specification: - - `LLM_PRIMARY_URL` now set to `http://10.0.0.43:8000/v1/completions`. - - `LLM_SECONDARY_URL` remains `http://10.0.0.3:11434/api/generate` (Ollama). - - `LLM_CLOUD_URL` set to `https://api.openai.com/v1/chat/completions`. - -### Fixed -- Resolved endpoint conflict where: - - Router expected base URLs. - - Refine expected full URLs. - - Refine always fell back due to hitting incorrect endpoint. - - Fixed by standardizing full-URL behavior across entire system. -- Reflection layer no longer fails silently (previously returned `[""]` due to MythoMax). -- Resolved 404/401 errors caused by incorrect OpenAI URL endpoints. -- No more double-routing through vLLM during reflection. -- Corrected async/sync mismatch in multiple locations. -- Eliminated double-path bug (`/v1/completions/v1/completions`) caused by previous router logic. - -### Removed -- Legacy `annotate`, `reason_check` glue logic from old architecture. -- Old backend probing junk code. -- Stale imports and unused modules leftover from previous prototype. - -### Verified -- Cortex β†’ vLLM (MI50) β†’ refine β†’ final_output now functioning correctly. -- refine shows `used_primary_backend: true` and no fallback. -- Manual curl test confirms endpoint accuracy. +- Seeder process originally failed on old memories β€” now skips duplicates and continues batch +- Resolved container boot error (`ModuleNotFoundError: ollama`) by extending image +- Fixed overwrite issue where stock `main.py` replaced custom config during rebuild +- Worked around Neo4j `vector.similarity.cosine()` dimension mismatch ### Known Issues -- refine sometimes prefixes output with `"Final Answer:"`; next version will sanitize this. -- hallucinations in draft_output persist due to weak grounding (fix in reasoning + RAG planned). -### Pending / Known Issues -- **RAG service does not exist** β€” requires containerized FastAPI service. -- Reasoning layer lacks self-revision loop (deliberate thought cycle). -- No speak/persona generation layer yet (`speak.py` planned). -- Intake summaries not yet routing into RAG or reflection layer. -- No refinement engine between reasoning and speak. +**[Lyra-Core v0.3.0] - 2025-09-26** -### Notes -This is the largest structural change to Cortex so far. -It establishes: -- multi-model cognition -- clean layering -- identity + reflection separation -- correct async code -- deterministic backend routing -- predictable JSON reflection +- Small models (e.g. Qwen2-0.5B) tend to over-classify as "salient" +- Phi-3.5-mini sometimes returns truncated tokens ("sali", "fi") +- CPU-only inference is functional but limited; larger models recommended once GPU available -The system is now ready for: -- refinement loops -- persona-speaking layer -- containerized RAG -- long-term memory integration -- true emergent-behavior experiments +**[Lyra-Cortex v0.2.0] - 2025-09-26** +- Small models tend to drift or over-classify +- CPU-only 7B+ models expected to be slow; GPU passthrough recommended for larger models +- Need to set up `systemd` service for `llama-server` to auto-start on VM reboot +### Observations + +**[Lyra-Mem0 0.3.2] - 2025-10-05** + +- Stable GPU utilization: ~8 GB VRAM @ 92% load, β‰ˆ 67Β°C under sustained seeding +- Next revision will re-format seed JSON to preserve `role` context (user vs assistant) + +**[Lyra-Mem0 v0.2.1]** + +- To fully unify embedding modes, a Hugging Face / local model with **1536-dim embeddings** will be needed (to match OpenAI's schema) +- Current Ollama model (`mxbai-embed-large`) works, but returns 1024-dim vectors +- Seeder workflow validated but should be wrapped in repeatable weekly run for full Cloudβ†’Local sync + +### Next Steps + +**[Lyra-Core 0.3.1] - 2025-10-09** + +- Add salience visualization (e.g., memory weights displayed in injected system message) +- Begin schema alignment with NVGRAM v0.1.2 for confidence scoring +- Add relay auto-retry for transient 500 responses from NVGRAM + +**[NVGRAM 0.1.1] - 2025-10-08** + +- Integrate salience scoring and embedding confidence weight fields in Postgres schema +- Begin testing with full Lyra Relay + Persona Sidecar pipeline for live session memory recall +- Migrate from deprecated `on_event` β†’ `lifespan` pattern in 0.1.2 + +**[NVGRAM 0.1.0] - 2025-10-07** + +- Integrate NVGRAM as new default backend in Lyra Relay +- Deprecate remaining Mem0 references and archive old configs +- Begin versioning as standalone project (`nvgram-core`, `nvgram-api`, etc.) + +**[Intake v0.1.0] - 2025-10-27** + +- Feed intake into NeoMem +- Generate daily/hourly overall summary (IE: Today Brian and Lyra worked on x, y, and z) +- Generate session-aware summaries with own intake hopper + +--- + +## [0.2.x] - 2025-09-30 to 2025-09-24 -## [ Cortex - v0.4.1] - 2025-11-5 ### Added -- **RAG intergration** - - Added rag.py with query_rag() and format_rag_block(). - - Cortex now queries the local RAG API (http://10.0.0.41:7090/rag/search) for contextual augmentation. - - Synthesized answers and top excerpts are injected into the reasoning prompt. -### Changed ### -- **Revised /reason endpoint.** - - Now builds unified context blocks: - - [Intake] β†’ recent summaries - - [RAG] β†’ contextual knowledge - - [User Message] β†’ current input - - Calls call_llm() for the first pass, then reflection_loop() for meta-evaluation. - - Returns cortex_prompt, draft_output, final_output, and normalized reflection. -- **Reflection Pipeline Stability** - - Cleaned parsing to normalize JSON vs. text reflections. - - Added fallback handling for malformed or non-JSON outputs. - - Log system improved to show raw JSON, extracted fields, and normalized summary. -- **Async Summarization (Intake v0.2.1)** - - Intake summaries now run in background threads to avoid blocking Cortex. - - Summaries (L1–L∞) logged asynchronously with [BG] tags. -- **Environment & Networking Fixes** - - Verified .env variables propagate correctly inside the Cortex container. - - Confirmed Docker network connectivity between Cortex, Intake, NeoMem, and RAG (shared serversdown_lyra_net). - - Adjusted localhost calls to service-IP mapping (10.0.0.41 for Cortex host). - -- **Behavioral Updates** - - Cortex now performs conversation reflection (on user intent) and self-reflection (on its own answers). - - RAG context successfully grounds reasoning outputs. - - Intake and NeoMem confirmed receiving summaries via /add_exchange. - - Log clarity pass: all reflective and contextual blocks clearly labeled. -- **Known Gaps / Next Steps** - - NeoMem Tuning - - Improve retrieval latency and relevance. - - Implement a dedicated /reflections/recent endpoint for Cortex. - - Migrate to Cortex-first ingestion (Relay β†’ Cortex β†’ NeoMem). -- **Cortex Enhancements** - - Add persistent reflection recall (use prior reflections as meta-context). - - Improve reflection JSON structure ("insight", "evaluation", "next_action" β†’ guaranteed fields). - - Tighten temperature and prompt control for factual consistency. -- **RAG Optimization** - -Add source ranking, filtering, and multi-vector hybrid search. - -Cache RAG responses per session to reduce duplicate calls. -- **Documentation / Monitoring** - -Add health route for RAG and Intake summaries. - -Include internal latency metrics in /health endpoint. +**[Lyra-Mem0 v0.2.0] - 2025-09-30** -Consolidate logs into unified β€œLyra Cortex Console” for tracing all module calls. +- Standalone **Lyra-Mem0** stack created at `~/lyra-mem0/` + - Includes Postgres (pgvector), Qdrant, Neo4j, and SQLite for history tracking + - Added working `docker-compose.mem0.yml` and custom `Dockerfile` for building Mem0 API server +- Verified REST API functionality + - `POST /memories` works for adding memories + - `POST /search` works for semantic search +- Successful end-to-end test with persisted memory: *"Likes coffee in the morning"* β†’ retrievable via search βœ… -## [Cortex - v0.3.0] – 2025-10-31 -### Added -- **Cortex Service (FastAPI)** - - New standalone reasoning engine (`cortex/main.py`) with endpoints: - - `GET /health` – reports active backend + NeoMem status. - - `POST /reason` – evaluates `{prompt, response}` pairs. - - `POST /annotate` – experimental text analysis. - - Background NeoMem health monitor (5-minute interval). +**[Lyra-Core v0.2.0] - 2025-09-24** -- **Multi-Backend Reasoning Support** - - Added environment-driven backend selection via `LLM_FORCE_BACKEND`. - - Supports: - - **Primary** β†’ vLLM (MI50 node @ 10.0.0.43) - - **Secondary** β†’ Ollama (3090 node @ 10.0.0.3) - - **Cloud** β†’ OpenAI API - - **Fallback** β†’ llama.cpp (CPU) - - Introduced per-backend model variables: - `LLM_PRIMARY_MODEL`, `LLM_SECONDARY_MODEL`, `LLM_CLOUD_MODEL`, `LLM_FALLBACK_MODEL`. - -- **Response Normalization Layer** - - Implemented `normalize_llm_response()` to merge streamed outputs and repair malformed JSON. - - Handles Ollama’s multi-line streaming and Mythomax’s missing punctuation issues. - - Prints concise debug previews of merged content. - -- **Environment Simplification** - - Each service (`intake`, `cortex`, `neomem`) now maintains its own `.env` file. - - Removed reliance on shared/global env file to prevent cross-contamination. - - Verified Docker Compose networking across containers. +- Migrated Relay to use `mem0ai` SDK instead of raw fetch calls +- Implemented `sessionId` support (client-supplied, fallback to `default`) +- Added debug logs for memory add/search +- Cleaned up Relay structure for clarity ### Changed -- Refactored `reason_check()` to dynamically switch between **prompt** and **chat** mode depending on backend. -- Enhanced startup logs to announce active backend, model, URL, and mode. -- Improved error handling with clearer β€œReasoning error” messages. + +**[Lyra-Mem0 v0.2.0] - 2025-09-30** + +- Split architecture into modular stacks: + - `~/lyra-core` (Relay, Persona-Sidecar, etc.) + - `~/lyra-mem0` (Mem0 OSS memory stack) +- Removed old embedded mem0 containers from Lyra-Core compose file +- Added Lyra-Mem0 section in README.md + +### Next Steps + +**[Lyra-Mem0 v0.2.0] - 2025-09-30** + +- Wire **Relay β†’ Mem0 API** (integration not yet complete) +- Add integration tests to verify persistence and retrieval from within Lyra-Core + +--- + +## [0.1.x] - 2025-09-25 to 2025-09-23 + +### Added + +**[Lyra_RAG v0.1.0] - 2025-11-07** + +- Initial standalone RAG module for Project Lyra +- Persistent ChromaDB vector store (`./chromadb`) +- Importer `rag_chat_import.py` with: + - Recursive folder scanning and category tagging + - Smart chunking (~5k chars) + - SHA-1 deduplication and chat-ID metadata + - Timestamp fields (`file_modified`, `imported_at`) + - Background-safe operation (`nohup`/`tmux`) +- 68 Lyra-category chats imported: + - 6,556 new chunks added + - 1,493 duplicates skipped + - 7,997 total vectors stored + +**[Lyra_RAG v0.1.0 API] - 2025-11-07** + +- `/rag/search` FastAPI endpoint implemented (port 7090) +- Supports natural-language queries and returns top related excerpts +- Added answer synthesis step using `gpt-4o-mini` + +**[Lyra-Core v0.1.0] - 2025-09-23** + +- First working MVP of **Lyra Core Relay** +- Relay service accepts `POST /v1/chat/completions` (OpenAI-compatible) +- Memory integration with Mem0: + - `POST /memories` on each user message + - `POST /search` before LLM call +- Persona Sidecar integration (`GET /current`) +- OpenAI GPT + Ollama (Mythomax) support in Relay +- Simple browser-based chat UI (talks to Relay at `http://:7078`) +- `.env` standardization for Relay + Mem0 + Postgres + Neo4j +- Working Neo4j + Postgres backing stores for Mem0 +- Initial MVP relay service with raw fetch calls to Mem0 +- Dockerized with basic healthcheck + +**[Lyra-Cortex v0.1.0] - 2025-09-25** + +- First deployment as dedicated Proxmox VM (5 vCPU / 18 GB RAM / 100 GB SSD) +- Built **llama.cpp** with `llama-server` target via CMake +- Integrated **Phi-3.5 Mini Instruct (Uncensored, Q4_K_M GGUF)** model +- Verified API compatibility at `/v1/chat/completions` +- Local test successful via `curl` β†’ ~523 token response generated +- Performance benchmark: ~11.5 tokens/sec (CPU-only on Ryzen 7800X) +- Confirmed usable for salience scoring, summarization, and lightweight reasoning ### Fixed -- Corrected broken vLLM endpoint routing (`/v1/completions`). -- Stabilized cross-container health reporting for NeoMem. -- Resolved JSON parse failures caused by streaming chunk delimiters. + +**[Lyra-Core v0.1.0] - 2025-09-23** + +- Resolved crash loop in Neo4j by restricting env vars (`NEO4J_AUTH` only) +- Relay now correctly reads `MEM0_URL` and `MEM0_API_KEY` from `.env` + +### Verified + +**[Lyra_RAG v0.1.0] - 2025-11-07** + +- Successful recall of Lyra-Core development history (v0.3.0 snapshot) +- Correct metadata and category tagging for all new imports + +### Known Issues + +**[Lyra-Core v0.1.0] - 2025-09-23** + +- No feedback loop (thumbs up/down) yet +- Forget/delete flow is manual (via memory IDs) +- Memory latency ~1–4s depending on embedding model + +### Next Planned + +**[Lyra_RAG v0.1.0] - 2025-11-07** + +- Optional `where` filter parameter for category/date queries +- Graceful "no results" handler for empty retrievals +- `rag_docs_import.py` for PDFs and other document types --- - -## Next Planned – [v0.4.0] -### Planned Additions -- **Reflection Mode** - - Introduce `REASONING_MODE=factcheck|reflection`. - - Output schema: - ```json - { "insight": "...", "evaluation": "...", "next_action": "..." } - ``` - -- **Cortex-First Pipeline** - - UI β†’ Cortex β†’ [Reflection + Verifier + Memory] β†’ Speech LLM β†’ User. - - Allows Lyra to β€œthink before speaking.” - -- **Verifier Stub** - - New `/verify` endpoint for search-based factual grounding. - - Asynchronous external truth checking. - -- **Memory Integration** - - Feed reflective outputs into NeoMem. - - Enable β€œdream” cycles for autonomous self-review. - ---- - -**Status:** 🟒 Stable Core – Multi-backend reasoning operational. -**Next milestone:** *v0.4.0 β€” Reflection Mode + Thought Pipeline orchestration.* - ---- - -### [Intake] v0.1.0 - 2025-10-27 - - Recieves messages from relay and summarizes them in a cascading format. - - Continues to summarize smaller amounts of exhanges while also generating large scale conversational summaries. (L20) - - Currently logs summaries to .log file in /project-lyra/intake-logs/ - ** Next Steps ** - - Feed intake into neomem. - - Generate a daily/hourly/etc overall summary, (IE: Today Brian and Lyra worked on x, y, and z) - - Generate session aware summaries, with its own intake hopper. - - -### [Lyra-Cortex] v0.2.0 β€” 2025-09-26 -**Added -- Integrated **llama-server** on dedicated Cortex VM (Proxmox). -- Verified Phi-3.5-mini-instruct_Uncensored-Q4_K_M running with 8 vCPUs. -- Benchmarked Phi-3.5-mini performance: - - ~18 tokens/sec CPU-only on Ryzen 7 7800X. - - Salience classification functional but sometimes inconsistent ("sali", "fi", "jamming"). -- Tested **Qwen2-0.5B-Instruct GGUF** as alternative salience classifier: - - Much faster throughput (~350 tokens/sec prompt, ~100 tokens/sec eval). - - More responsive but over-classifies messages as β€œsalient.” -- Established `.env` integration for model ID (`SALIENCE_MODEL`), enabling hot-swap between models. - -** Known Issues -- Small models tend to drift or over-classify. -- CPU-only 7B+ models expected to be slow; GPU passthrough recommended for larger models. -- Need to set up a `systemd` service for `llama-server` to auto-start on VM reboot. - ---- - -### [Lyra-Cortex] v0.1.0 β€” 2025-09-25 -#### Added -- First deployment as dedicated Proxmox VM (5 vCPU / 18 GB RAM / 100 GB SSD). -- Built **llama.cpp** with `llama-server` target via CMake. -- Integrated **Phi-3.5 Mini Instruct (Uncensored, Q4_K_M GGUF)** model. -- Verified **API compatibility** at `/v1/chat/completions`. -- Local test successful via `curl` β†’ ~523 token response generated. -- Performance benchmark: ~11.5 tokens/sec (CPU-only on Ryzen 7800X). -- Confirmed usable for salience scoring, summarization, and lightweight reasoning. diff --git a/DEPRECATED_FILES.md b/DEPRECATED_FILES.md new file mode 100644 index 0000000..830c417 --- /dev/null +++ b/DEPRECATED_FILES.md @@ -0,0 +1,91 @@ +# Deprecated Files - Safe to Delete + +This file lists all deprecated files that can be safely deleted after verification. + +## Files Marked for Deletion + +### Docker Compose Files + +#### `/core/docker-compose.yml.DEPRECATED` +- **Status**: DEPRECATED +- **Reason**: All services consolidated into main `/docker-compose.yml` +- **Replaced by**: `/docker-compose.yml` (relay service now has complete config) +- **Safe to delete**: Yes, after verifying main docker-compose works + +### Environment Files + +All original `.env` files have been consolidated. Backups exist in `.env-backups/` directory. + +#### Previously Deleted (Already Done) +- βœ… `/core/.env` - Deleted (redundant with root .env) + +### Experimental/Orphaned Files + +#### `/core/env experiments/` (entire directory) +- **Status**: User will handle separately +- **Contains**: `.env`, `.env.local`, `.env.openai` +- **Action**: User to review and clean up + +## Verification Steps Before Deleting + +Before deleting the deprecated files, verify: + +1. **Test main docker-compose.yml works:** + ```bash + cd /home/serversdown/project-lyra + docker-compose down + docker-compose up -d + docker-compose ps # All services should be running + ``` + +2. **Verify relay service has correct config:** + ```bash + docker exec relay env | grep -E "LLM_|NEOMEM_|OPENAI" + docker exec relay ls -la /app/sessions # Sessions volume mounted + ``` + +3. **Test relay functionality:** + - Send a test message through relay + - Verify memory storage works + - Confirm LLM backend connections work + +## Deletion Commands + +After successful verification, run: + +```bash +cd /home/serversdown/project-lyra + +# Delete deprecated docker-compose file +rm core/docker-compose.yml.DEPRECATED + +# Optionally clean up backup directory after confirming everything works +# (Keep backups for at least a few days/weeks) +# rm -rf .env-backups/ +``` + +## Files to Keep + +These files should **NOT** be deleted: + +- βœ… `.env` (root) - Single source of truth +- βœ… `.env.example` (root) - Security template (commit to git) +- βœ… `cortex/.env` - Service-specific config +- βœ… `cortex/.env.example` - Security template (commit to git) +- βœ… `neomem/.env` - Service-specific config +- βœ… `neomem/.env.example` - Security template (commit to git) +- βœ… `intake/.env` - Service-specific config +- βœ… `intake/.env.example` - Security template (commit to git) +- βœ… `rag/.env.example` - Security template (commit to git) +- βœ… `docker-compose.yml` - Main orchestration file +- βœ… `ENVIRONMENT_VARIABLES.md` - Documentation +- βœ… `.gitignore` - Git configuration + +## Backup Information + +All original `.env` files backed up to: +- Location: `/home/serversdown/project-lyra/.env-backups/` +- Timestamp: `20251126_025334` +- Files: 6 original .env files + +Keep backups until you're confident the new setup is stable (recommended: 2-4 weeks). diff --git a/LOGGING_MIGRATION.md b/LOGGING_MIGRATION.md new file mode 100644 index 0000000..8ae5d56 --- /dev/null +++ b/LOGGING_MIGRATION.md @@ -0,0 +1,178 @@ +# Logging System Migration Complete + +## βœ… What Changed + +The old `VERBOSE_DEBUG` logging system has been completely replaced with the new structured `LOG_DETAIL_LEVEL` system. + +### Files Modified + +1. **[.env](.env)** - Removed `VERBOSE_DEBUG`, cleaned up duplicate `LOG_DETAIL_LEVEL` settings +2. **[cortex/.env](cortex/.env)** - Removed `VERBOSE_DEBUG` from cortex config +3. **[cortex/router.py](cortex/router.py)** - Replaced `VERBOSE_DEBUG` checks with `LOG_DETAIL_LEVEL` +4. **[cortex/context.py](cortex/context.py)** - Replaced `VERBOSE_DEBUG` with `LOG_DETAIL_LEVEL`, removed verbose file logging setup + +## 🎯 New Logging Configuration + +### Single Environment Variable + +Set `LOG_DETAIL_LEVEL` in your `.env` file: + +```bash +LOG_DETAIL_LEVEL=detailed +``` + +### Logging Levels + +| Level | Lines/Message | What You See | +|-------|---------------|--------------| +| **minimal** | 1-2 | Only errors and critical events | +| **summary** | 5-7 | Pipeline completion, errors, warnings (production mode) | +| **detailed** | 30-50 | LLM outputs, timing breakdowns, context (debugging mode) | +| **verbose** | 100+ | Everything including raw JSON dumps (deep debugging) | + +## πŸ“Š What You Get at Each Level + +### Summary Mode (Production) +``` +πŸ“Š Context | Session: abc123 | Messages: 42 | Last: 5.2min | RAG: 3 results +🧠 Monologue | question | Tone: curious + +==================================================================================================== +✨ PIPELINE COMPLETE | Session: abc123 | Total: 1250ms +==================================================================================================== +πŸ“€ Output: 342 characters +==================================================================================================== +``` + +### Detailed Mode (Debugging - RECOMMENDED) +``` +==================================================================================================== +πŸš€ PIPELINE START | Session: abc123 | 14:23:45.123 +==================================================================================================== +πŸ“ User: What is the meaning of life? +──────────────────────────────────────────────────────────────────────────────────────────────────── + +──────────────────────────────────────────────────────────────────────────────────────────────────── +🧠 LLM CALL | Backend: PRIMARY | 14:23:45.234 +──────────────────────────────────────────────────────────────────────────────────────────────────── +πŸ“ Prompt: You are Lyra, analyzing the user's question... +πŸ’¬ Reply: Based on the context provided, here's my analysis... +──────────────────────────────────────────────────────────────────────────────────────────────────── + +πŸ“Š Context | Session: abc123 | Messages: 42 | Last: 5.2min | RAG: 3 results +──────────────────────────────────────────────────────────────────────────────────────────────────── +[CONTEXT] Session abc123 | User: What is the meaning of life? +──────────────────────────────────────────────────────────────────────────────────────────────────── + Mode: default | Mood: neutral | Project: None + Tools: RAG, WEB, WEATHER, CODEBRAIN, POKERBRAIN + + ╭─ INTAKE SUMMARIES ──────────────────────────────────────────────── + β”‚ L1 : Last message discussed philosophy... + β”‚ L5 : Recent 5 messages covered existential topics... + β”‚ L10 : Past 10 messages showed curiosity pattern... + ╰─────────────────────────────────────────────────────────────────── + + ╭─ RAG RESULTS (3) ────────────────────────────────────────────── + β”‚ [1] 0.923 | Previous discussion about purpose... + β”‚ [2] 0.891 | Note about existential philosophy... + β”‚ [3] 0.867 | Memory of Viktor Frankl discussion... + ╰─────────────────────────────────────────────────────────────────── +──────────────────────────────────────────────────────────────────────────────────────────────────── + +🧠 Monologue | question | Tone: curious + +==================================================================================================== +✨ PIPELINE COMPLETE | Session: abc123 | Total: 1250ms +==================================================================================================== +⏱️ Stage Timings: + context : 150ms ( 12.0%) + identity : 10ms ( 0.8%) + monologue : 200ms ( 16.0%) + tools : 0ms ( 0.0%) + reflection : 50ms ( 4.0%) + reasoning : 450ms ( 36.0%) ← BOTTLENECK! + refinement : 300ms ( 24.0%) + persona : 140ms ( 11.2%) + learning : 50ms ( 4.0%) +πŸ“€ Output: 342 characters +==================================================================================================== +``` + +### Verbose Mode (Maximum Debug) +Same as detailed, plus: +- Full raw JSON responses from LLMs (50-line boxes) +- Complete intake data structures +- Stack traces on errors + +## πŸš€ How to Use + +### For Finding Weak Links (Your Use Case) +```bash +# In .env: +LOG_DETAIL_LEVEL=detailed + +# Restart services: +docker-compose restart cortex relay +``` + +You'll now see: +- βœ… Which LLM backend is used +- βœ… What prompts are sent to each LLM +- βœ… What each LLM responds with +- βœ… Timing breakdown showing which stage is slow +- βœ… Context being used (RAG, intake summaries) +- βœ… Clean, hierarchical structure + +### For Production +```bash +LOG_DETAIL_LEVEL=summary +``` + +### For Deep Debugging +```bash +LOG_DETAIL_LEVEL=verbose +``` + +## πŸ” Finding Performance Bottlenecks + +With `detailed` mode, look for: + +1. **Slow stages in timing breakdown:** + ``` + reasoning : 3450ms ( 76.0%) ← THIS IS YOUR BOTTLENECK! + ``` + +2. **Backend failures:** + ``` + ⚠️ [LLM] PRIMARY failed | 14:23:45.234 | Connection timeout + βœ… [LLM] SECONDARY | Reply: Based on... ← Fell back to secondary + ``` + +3. **Loop detection:** + ``` + ⚠️ DUPLICATE MESSAGE DETECTED | Session: abc123 + πŸ” LOOP DETECTED - Returning cached context + ``` + +## πŸ“ Removed Features + +The following old logging features have been removed: + +- ❌ `VERBOSE_DEBUG` environment variable (replaced with `LOG_DETAIL_LEVEL`) +- ❌ File logging to `/app/logs/cortex_verbose_debug.log` (use `docker logs` instead) +- ❌ Separate verbose handlers in Python logging +- ❌ Per-module verbose flags + +## ✨ New Features + +- βœ… Single unified logging configuration +- βœ… Hierarchical, scannable output +- βœ… Collapsible data sections (boxes) +- βœ… Stage timing always shown in detailed mode +- βœ… Performance profiling built-in +- βœ… Loop detection and warnings +- βœ… Clean error formatting + +--- + +**The logging is now clean, concise, and gives you exactly what you need to find weak links!** 🎯 diff --git a/LOGGING_QUICK_REF.md b/LOGGING_QUICK_REF.md new file mode 100644 index 0000000..a0fb88c --- /dev/null +++ b/LOGGING_QUICK_REF.md @@ -0,0 +1,176 @@ +# Cortex Logging Quick Reference + +## 🎯 TL;DR + +**Finding weak links in the LLM chain?** +```bash +export LOG_DETAIL_LEVEL=detailed +export VERBOSE_DEBUG=true +``` + +**Production use?** +```bash +export LOG_DETAIL_LEVEL=summary +``` + +--- + +## πŸ“Š Log Levels Comparison + +| Level | Output Lines/Message | Use Case | Raw LLM Output? | +|-------|---------------------|----------|-----------------| +| **minimal** | 1-2 | Silent production | ❌ No | +| **summary** | 5-7 | Production (DEFAULT) | ❌ No | +| **detailed** | 30-50 | Debugging, finding bottlenecks | βœ… Parsed only | +| **verbose** | 100+ | Deep debugging, seeing raw data | βœ… Full JSON | + +--- + +## πŸ” Common Debugging Tasks + +### See Raw LLM Outputs +```bash +export LOG_DETAIL_LEVEL=verbose +``` +Look for: +``` +╭─ RAW RESPONSE ──────────────────────────────────── +β”‚ { "choices": [ { "message": { "content": "..." } } ] } +╰─────────────────────────────────────────────────── +``` + +### Find Performance Bottlenecks +```bash +export LOG_DETAIL_LEVEL=detailed +``` +Look for: +``` +⏱️ Stage Timings: + reasoning : 3450ms ( 76.0%) ← SLOW! +``` + +### Check Which RAG Memories Are Used +```bash +export LOG_DETAIL_LEVEL=detailed +``` +Look for: +``` +╭─ RAG RESULTS (5) ────────────────────────────── +β”‚ [1] 0.923 | Memory content... +``` + +### Detect Loops +```bash +export ENABLE_DUPLICATE_DETECTION=true # (default) +``` +Look for: +``` +⚠️ DUPLICATE MESSAGE DETECTED +πŸ” LOOP DETECTED - Returning cached context +``` + +### See All Backend Failures +```bash +export LOG_DETAIL_LEVEL=summary # or higher +``` +Look for: +``` +⚠️ [LLM] PRIMARY failed | Connection timeout +⚠️ [LLM] SECONDARY failed | Model not found +βœ… [LLM] CLOUD | Reply: Based on... +``` + +--- + +## πŸ› οΈ Environment Variables Cheat Sheet + +```bash +# Verbosity Control +LOG_DETAIL_LEVEL=detailed # minimal | summary | detailed | verbose +VERBOSE_DEBUG=false # true = maximum verbosity (legacy) + +# Raw Data Visibility +LOG_RAW_CONTEXT_DATA=false # Show full intake L1-L30 dumps + +# Loop Protection +ENABLE_DUPLICATE_DETECTION=true # Detect duplicate messages +MAX_MESSAGE_HISTORY=100 # Trim history after N messages +SESSION_TTL_HOURS=24 # Expire sessions after N hours + +# Features +NEOMEM_ENABLED=false # Enable long-term memory +ENABLE_AUTONOMOUS_TOOLS=true # Enable tool invocation +ENABLE_PROACTIVE_MONITORING=true # Enable suggestions +``` + +--- + +## πŸ“‹ Sample Output + +### Summary Mode (Default - Production) +``` +βœ… [LLM] PRIMARY | 14:23:45.123 | Reply: Based on your question... +πŸ“Š Context | Session: abc123 | Messages: 42 | Last: 5.2min | RAG: 5 results +🧠 Monologue | question | Tone: curious +✨ PIPELINE COMPLETE | Session: abc123 | Total: 1250ms +πŸ“€ Output: 342 characters +``` + +### Detailed Mode (Debugging) +``` +════════════════════════════════════════════════════════════════════════════ +πŸš€ PIPELINE START | Session: abc123 | 14:23:45.123 +════════════════════════════════════════════════════════════════════════════ +πŸ“ User: What is the meaning of life? +──────────────────────────────────────────────────────────────────────────── + +──────────────────────────────────────────────────────────────────────────── +🧠 LLM CALL | Backend: PRIMARY | 14:23:45.234 +──────────────────────────────────────────────────────────────────────────── +πŸ“ Prompt: You are Lyra, a thoughtful AI assistant... +πŸ’¬ Reply: Based on philosophical perspectives... + +πŸ“Š Context | Session: abc123 | Messages: 42 | Last: 5.2min | RAG: 5 results + ╭─ RAG RESULTS (5) ────────────────────────────── + β”‚ [1] 0.923 | Previous philosophy discussion... + β”‚ [2] 0.891 | Existential note... + ╰──────────────────────────────────────────────── + +════════════════════════════════════════════════════════════════════════════ +✨ PIPELINE COMPLETE | Session: abc123 | Total: 1250ms +════════════════════════════════════════════════════════════════════════════ +⏱️ Stage Timings: + context : 150ms ( 12.0%) + reasoning : 450ms ( 36.0%) ← Largest component + persona : 140ms ( 11.2%) +πŸ“€ Output: 342 characters +════════════════════════════════════════════════════════════════════════════ +``` + +--- + +## ⚑ Quick Troubleshooting + +| Symptom | Check | Fix | +|---------|-------|-----| +| **Logs too verbose** | Current level | Set `LOG_DETAIL_LEVEL=summary` | +| **Can't see LLM outputs** | Current level | Set `LOG_DETAIL_LEVEL=detailed` or `verbose` | +| **Repeating operations** | Loop warnings | Check for `πŸ” LOOP DETECTED` messages | +| **Slow responses** | Stage timings | Look for stages >1000ms in detailed mode | +| **Missing RAG data** | NEOMEM_ENABLED | Set `NEOMEM_ENABLED=true` | +| **Out of memory** | Message history | Lower `MAX_MESSAGE_HISTORY` | + +--- + +## πŸ“ Key Files + +- **[.env.logging.example](.env.logging.example)** - Full configuration guide +- **[LOGGING_REFACTOR_SUMMARY.md](LOGGING_REFACTOR_SUMMARY.md)** - Detailed explanation +- **[cortex/utils/logging_utils.py](cortex/utils/logging_utils.py)** - Logging utilities +- **[cortex/context.py](cortex/context.py)** - Context + loop protection +- **[cortex/router.py](cortex/router.py)** - Pipeline stages +- **[core/relay/lib/llm.js](core/relay/lib/llm.js)** - LLM backend logging + +--- + +**Need more detail? See [LOGGING_REFACTOR_SUMMARY.md](LOGGING_REFACTOR_SUMMARY.md)** diff --git a/LOGGING_REFACTOR_SUMMARY.md b/LOGGING_REFACTOR_SUMMARY.md new file mode 100644 index 0000000..2b3c919 --- /dev/null +++ b/LOGGING_REFACTOR_SUMMARY.md @@ -0,0 +1,352 @@ +# Cortex Logging Refactor Summary + +## 🎯 Problem Statement + +The cortex chat loop had severe logging issues that made debugging impossible: + +1. **Massive verbosity**: 100+ log lines per chat message +2. **Raw LLM dumps**: Full JSON responses pretty-printed on every call (1000s of lines) +3. **Repeated data**: NeoMem results logged 71 times individually +4. **No structure**: Scattered emoji logs with no hierarchy +5. **Impossible to debug**: Couldn't tell if loops were happening or just verbose logging +6. **No loop protection**: Unbounded message history growth, no session cleanup, no duplicate detection + +## βœ… What Was Fixed + +### 1. **Structured Hierarchical Logging** + +**Before:** +``` +πŸ” RAW LLM RESPONSE: { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1234567890, + "model": "gpt-4", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Here is a very long response that goes on for hundreds of lines..." + } + } + ], + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } +} +🧠 Trying backend: PRIMARY (http://localhost:8000) +βœ… Success via PRIMARY +[STAGE 0] Collecting unified context... +[STAGE 0] Context collected - 5 RAG results +[COLLECT_CONTEXT] Intake data retrieved: +{ + "L1": [...], + "L5": [...], + "L10": {...}, + "L20": {...}, + "L30": {...} +} +[COLLECT_CONTEXT] NeoMem search returned 71 results + [1] Score: 0.923 - Memory content here... + [2] Score: 0.891 - More memory content... + [3] Score: 0.867 - Even more content... + ... (68 more lines) +``` + +**After (summary mode - DEFAULT):** +``` +βœ… [LLM] PRIMARY | 14:23:45.123 | Reply: Based on your question about... +πŸ“Š Context | Session: abc123 | Messages: 42 | Last: 5.2min | RAG: 5 results +🧠 Monologue | question | Tone: curious +✨ PIPELINE COMPLETE | Session: abc123 | Total: 1250ms +πŸ“€ Output: 342 characters +``` + +**After (detailed mode - for debugging):** +``` +════════════════════════════════════════════════════════════════════════════════════════════════════ +πŸš€ PIPELINE START | Session: abc123 | 14:23:45.123 +════════════════════════════════════════════════════════════════════════════════════════════════════ +πŸ“ User: What is the meaning of life? +──────────────────────────────────────────────────────────────────────────────────────────────────── + +──────────────────────────────────────────────────────────────────────────────────────────────────── +🧠 LLM CALL | Backend: PRIMARY | 14:23:45.234 +──────────────────────────────────────────────────────────────────────────────────────────────────── +πŸ“ Prompt: You are Lyra, a thoughtful AI assistant... +πŸ’¬ Reply: Based on philosophical perspectives, the meaning... + +πŸ“Š Context | Session: abc123 | Messages: 42 | Last: 5.2min | RAG: 5 results +──────────────────────────────────────────────────────────────────────────────────────────────────── +[CONTEXT] Session abc123 | User: What is the meaning of life? +──────────────────────────────────────────────────────────────────────────────────────────────────── + Mode: default | Mood: neutral | Project: None + Tools: RAG, WEB, WEATHER, CODEBRAIN, POKERBRAIN + + ╭─ INTAKE SUMMARIES ──────────────────────────────────────────────── + β”‚ L1 : Last message discussed philosophy... + β”‚ L5 : Recent 5 messages covered existential topics... + β”‚ L10 : Past 10 messages showed curiosity pattern... + β”‚ L20 : Session focused on deep questions... + β”‚ L30 : Long-term trend shows philosophical interest... + ╰─────────────────────────────────────────────────────────────────── + + ╭─ RAG RESULTS (5) ────────────────────────────────────────────── + β”‚ [1] 0.923 | Previous discussion about purpose and meaning... + β”‚ [2] 0.891 | Note about existential philosophy... + β”‚ [3] 0.867 | Memory of Viktor Frankl discussion... + β”‚ [4] 0.834 | Reference to stoic philosophy... + β”‚ [5] 0.801 | Buddhism and the middle path... + ╰─────────────────────────────────────────────────────────────────── +──────────────────────────────────────────────────────────────────────────────────────────────────── + +════════════════════════════════════════════════════════════════════════════════════════════════════ +✨ PIPELINE COMPLETE | Session: abc123 | Total: 1250ms +════════════════════════════════════════════════════════════════════════════════════════════════════ +⏱️ Stage Timings: + context : 150ms ( 12.0%) + identity : 10ms ( 0.8%) + monologue : 200ms ( 16.0%) + tools : 0ms ( 0.0%) + reflection : 50ms ( 4.0%) + reasoning : 450ms ( 36.0%) + refinement : 300ms ( 24.0%) + persona : 140ms ( 11.2%) +πŸ“€ Output: 342 characters +════════════════════════════════════════════════════════════════════════════════════════════════════ +``` + +### 2. **Configurable Verbosity Levels** + +Set via `LOG_DETAIL_LEVEL` environment variable: + +- **`minimal`**: Only errors and critical events +- **`summary`**: Stage completion + errors (DEFAULT - recommended for production) +- **`detailed`**: Include raw LLM outputs, RAG results, timing breakdowns (for debugging) +- **`verbose`**: Everything including full JSON dumps (for deep debugging) + +### 3. **Raw LLM Output Visibility** βœ… + +**You can now see raw LLM outputs clearly!** + +In `detailed` or `verbose` mode, LLM calls show: +- Backend used +- Prompt preview +- Parsed reply +- **Raw JSON response in collapsible format** (verbose only) + +``` +╭─ RAW RESPONSE ──────────────────────────────────────────────────────────────────────────── +β”‚ { +β”‚ "id": "chatcmpl-123", +β”‚ "object": "chat.completion", +β”‚ "model": "gpt-4", +β”‚ "choices": [ +β”‚ { +β”‚ "message": { +β”‚ "content": "Full response here..." +β”‚ } +β”‚ } +β”‚ ] +β”‚ } +╰─────────────────────────────────────────────────────────────────────────────────────────── +``` + +### 4. **Loop Detection & Protection** βœ… + +**New safety features:** + +- **Duplicate message detection**: Prevents processing the same message twice +- **Message history trimming**: Auto-trims to last 100 messages (configurable via `MAX_MESSAGE_HISTORY`) +- **Session TTL**: Auto-expires inactive sessions after 24 hours (configurable via `SESSION_TTL_HOURS`) +- **Hash-based detection**: Uses MD5 hash to detect exact duplicate messages + +**Example warning when loop detected:** +``` +⚠️ DUPLICATE MESSAGE DETECTED | Session: abc123 | Message: What is the meaning of life? +πŸ” LOOP DETECTED - Returning cached context to prevent processing duplicate +``` + +### 5. **Performance Timing** βœ… + +In `detailed` mode, see exactly where time is spent: + +``` +⏱️ Stage Timings: + context : 150ms ( 12.0%) ← Context collection + identity : 10ms ( 0.8%) ← Identity loading + monologue : 200ms ( 16.0%) ← Inner monologue + tools : 0ms ( 0.0%) ← Autonomous tools + reflection : 50ms ( 4.0%) ← Reflection notes + reasoning : 450ms ( 36.0%) ← Main reasoning (BOTTLENECK) + refinement : 300ms ( 24.0%) ← Answer refinement + persona : 140ms ( 11.2%) ← Persona layer +``` + +**This helps you identify weak links in the chain!** + +## πŸ“ Files Modified + +### Core Changes + +1. **[llm.js](core/relay/lib/llm.js)** + - Removed massive JSON dump on line 53 + - Added structured logging with 4 verbosity levels + - Shows raw responses only in verbose mode (collapsible format) + - Tracks failed backends and shows summary on total failure + +2. **[context.py](cortex/context.py)** + - Condensed 71-line NeoMem loop to 5-line summary + - Removed repeated intake data dumps + - Added structured hierarchical logging with boxes + - Added duplicate message detection + - Added message history trimming + - Added session TTL and cleanup + +3. **[router.py](cortex/router.py)** + - Replaced 15+ stage logs with unified pipeline summary + - Added stage timing collection + - Shows performance breakdown in detailed mode + - Clean start/end markers with total duration + +### New Files + +4. **[utils/logging_utils.py](cortex/utils/logging_utils.py)** (NEW) + - Reusable structured logging utilities + - `PipelineLogger` class for hierarchical logging + - Collapsible data sections + - Stage timing tracking + - Future-ready for expansion + +5. **[.env.logging.example](.env.logging.example)** (NEW) + - Complete logging configuration guide + - Shows example output at each verbosity level + - Documents all environment variables + - Production-ready defaults + +6. **[LOGGING_REFACTOR_SUMMARY.md](LOGGING_REFACTOR_SUMMARY.md)** (THIS FILE) + +## πŸš€ How to Use + +### For Finding Weak Links (Your Use Case) + +```bash +# Set in your .env or export: +export LOG_DETAIL_LEVEL=detailed +export VERBOSE_DEBUG=false # or true for even more detail + +# Now run your chat - you'll see: +# 1. Which LLM backend is used +# 2. Raw LLM outputs (in verbose mode) +# 3. Exact timing per stage +# 4. Which stage is taking longest +``` + +### For Production + +```bash +export LOG_DETAIL_LEVEL=summary + +# Minimal, clean logs: +# βœ… [LLM] PRIMARY | 14:23:45.123 | Reply: Based on your question... +# ✨ PIPELINE COMPLETE | Session: abc123 | Total: 1250ms +``` + +### For Deep Debugging + +```bash +export LOG_DETAIL_LEVEL=verbose +export LOG_RAW_CONTEXT_DATA=true + +# Shows EVERYTHING including full JSON dumps +``` + +## πŸ” Finding Weak Links - Quick Guide + +**Problem: "Which LLM stage is failing or producing bad output?"** + +1. Set `LOG_DETAIL_LEVEL=detailed` +2. Run a test conversation +3. Look for timing anomalies: + ``` + reasoning : 3450ms ( 76.0%) ← BOTTLENECK! + ``` +4. Look for errors: + ``` + ⚠️ Reflection failed: Connection timeout + ``` +5. Check raw LLM outputs (set `VERBOSE_DEBUG=true`): + ``` + ╭─ RAW RESPONSE ──────────────────────────────────── + β”‚ { + β”‚ "choices": [ + β”‚ { "message": { "content": "..." } } + β”‚ ] + β”‚ } + ╰─────────────────────────────────────────────────── + ``` + +**Problem: "Is the loop repeating operations?"** + +1. Enable duplicate detection (on by default) +2. Look for loop warnings: + ``` + ⚠️ DUPLICATE MESSAGE DETECTED | Session: abc123 + πŸ” LOOP DETECTED - Returning cached context + ``` +3. Check stage timings - repeated stages will show up as duplicates + +**Problem: "Which RAG memories are being used?"** + +1. Set `LOG_DETAIL_LEVEL=detailed` +2. Look for RAG results box: + ``` + ╭─ RAG RESULTS (5) ────────────────────────────── + β”‚ [1] 0.923 | Previous discussion about X... + β”‚ [2] 0.891 | Note about Y... + ╰──────────────────────────────────────────────── + ``` + +## πŸ“Š Environment Variables Reference + +| Variable | Default | Description | +|----------|---------|-------------| +| `LOG_DETAIL_LEVEL` | `summary` | Verbosity: minimal/summary/detailed/verbose | +| `VERBOSE_DEBUG` | `false` | Legacy flag for maximum verbosity | +| `LOG_RAW_CONTEXT_DATA` | `false` | Show full intake data dumps | +| `ENABLE_DUPLICATE_DETECTION` | `true` | Detect and prevent duplicate messages | +| `MAX_MESSAGE_HISTORY` | `100` | Max messages to keep per session | +| `SESSION_TTL_HOURS` | `24` | Auto-expire sessions after N hours | + +## πŸŽ‰ Results + +**Before:** 1000+ lines of logs per chat message, unreadable, couldn't identify issues + +**After (summary mode):** 5 lines of structured logs, clear and actionable + +**After (detailed mode):** ~50 lines with full visibility into each stage, timing, and raw outputs + +**Loop protection:** Automatic detection and prevention of duplicate processing + +**You can now:** +βœ… See raw LLM outputs clearly (in detailed/verbose mode) +βœ… Identify performance bottlenecks (stage timings) +βœ… Detect loops and duplicates (automatic) +βœ… Find failing stages (error markers) +βœ… Scan logs quickly (hierarchical structure) +βœ… Debug production issues (adjustable verbosity) + +## πŸ”§ Next Steps (Optional Improvements) + +1. **Structured JSON logging**: Output as JSON for log aggregation tools +2. **Log rotation**: Implement file rotation for verbose logs +3. **Metrics export**: Export stage timings to Prometheus/Grafana +4. **Error categorization**: Tag errors by type (network, timeout, parsing, etc.) +5. **Performance alerts**: Auto-alert when stages exceed thresholds + +--- + +**Happy debugging! You can now see what's actually happening in the cortex loop.** 🎯 diff --git a/README.md b/README.md index f8a1eed..37dcab5 100644 --- a/README.md +++ b/README.md @@ -1,265 +1,902 @@ -##### Project Lyra - README v0.3.0 - needs fixing ##### +# Project Lyra - README v0.9.1 -Lyra is a modular persistent AI companion system. -It provides memory-backed chat using **NeoMem** + **Relay** + **Persona Sidecar**, -with optional subconscious annotation powered by **Cortex VM** running local LLMs. +Lyra is a modular persistent AI companion system with advanced reasoning capabilities and autonomous decision-making. +It provides memory-backed chat using **Relay** + **Cortex** with integrated **Autonomy System**, +featuring a multi-stage reasoning pipeline powered by HTTP-based LLM backends. + +**NEW in v0.9.0:** Trilium Notes integration - Search and create notes from conversations + +**Current Version:** v0.9.1 (2025-12-29) + +> **Note:** As of v0.6.0, NeoMem is **disabled by default** while we work out integration hiccups in the pipeline. The autonomy system is being refined independently before full memory integration. + +## Mission Statement + +The point of Project Lyra is to give an AI chatbot more abilities than a typical chatbot. Typical chatbots are essentially amnesic and forget evertything about your project. Lyra helps keep projects organized and remembers everything you have done. Think of her abilities as a notepad/schedule/database/co-creator/collaborator all with its own executive function. Say something in passing, Lyra remembers it then reminds you of it later. -## Mission Statement ## - The point of project lyra is to give an AI chatbot more abilities than a typical chatbot. typical chat bots are essentially amnesic and forget everything about your project. Lyra helps keep projects organized and remembers everything you have done. Think of her abilities as a notepad/schedule/data base/ co-creator/collaborattor all with its own executive function. Say something in passing, Lyra remembers it then reminds you of it later. - ---- - -## Structure ## - Project Lyra exists as a series of docker containers that run independentally of each other but are all networked together. Think of it as how the brain has regions, Lyra has modules: - ## A. VM 100 - lyra-core: - 1. ** Core v0.3.1 - Docker Stack - - Relay - (docker container) - The main harness that connects the modules together and accepts input from the user. - - UI - (HTML) - This is how the user communicates with lyra. ATM its a typical instant message interface, but plans are to make it much more than that. - - Persona - (docker container) - This is the personality of lyra, set how you want her to behave. Give specific instructions for output. Basically prompt injection. - - All of this is built and controlled by a single .env and docker-compose.lyra.yml. - 2. **NeoMem v0.1.0 - (docker stack) - - NeoMem is Lyra's main long term memory data base. It is a fork of mem0 oss. Uses vector databases and graph. - - NeoMem launches with a single separate docker-compose.neomem.yml. - - ## B. VM 101 - lyra - cortex - 3. ** Cortex - VM containing docker stack - - This is the working reasoning layer of Lyra. - - Built to be flexible in deployment. Run it locally or remotely (via wan/lan) - - Intake v0.1.0 - (docker Container) gives conversations context and purpose - - Intake takes the last N exchanges and summarizes them into coherrent short term memories. - - Uses a cascading summarization setup that quantizes the exchanges. Summaries occur at L2, L5, L10, L15, L20 etc. - - Keeps the bot aware of what is going on with out having to send it the whole chat every time. - - Cortex - Docker container containing: - - Reasoning Layer - - TBD - - Reflect - (docker continer) - Not yet implemented, road map. - - Calls back to NeoMem after N exchanges and N summaries and edits memories created during the initial messaging step. This helps contain memories to coherrent thoughts, reduces the noise. - - Can be done actively and asynchronously, or on a time basis (think human sleep and dreams). - - This stage is not yet built, this is just an idea. - - ## C. Remote LLM APIs: - 3. **AI Backends - - Lyra doesnt run models her self, she calls up APIs. - - Endlessly customizable as long as it outputs to the same schema. - --- +## Architecture Overview -## πŸš€ Features ## +Project Lyra operates as a **single docker-compose deployment** with multiple Docker containers networked together in a microservices architecture. Like how the brain has regions, Lyra has modules: -# Lyra-Core VM (VM100) -- **Relay **: - - The main harness and orchestrator of Lyra. - - OpenAI-compatible endpoint: `POST /v1/chat/completions` - - Injects persona + relevant memories into every LLM call - - Routes all memory storage/retrieval through **NeoMem** - - Logs spans (`neomem.add`, `neomem.search`, `persona.fetch`, `llm.generate`) +### Core Services -- **NeoMem (Memory Engine)**: - - Forked from Mem0 OSS and fully independent. - - Drop-in compatible API (`/memories`, `/search`). - - Local-first: runs on FastAPI with Postgres + Neo4j. - - No external SDK dependencies. - - Default service: `neomem-api` (port 7077). - - Capable of adding new memories and updating previous memories. Compares existing embeddings and performs in place updates when a memory is judged to be a semantic match. +**1. Relay** (Node.js/Express) - Port 7078 +- Main orchestrator and message router +- Coordinates all module interactions +- OpenAI-compatible endpoint: `POST /v1/chat/completions` +- Internal endpoint: `POST /chat` +- Dual-mode routing: Standard Mode (simple chat) or Cortex Mode (full reasoning) +- Server-side session persistence with file-based storage +- Session management API: `GET/POST/PATCH/DELETE /sessions` +- Manages async calls to Cortex ingest +- *(NeoMem integration currently disabled in v0.6.0)* -- **UI**: - - Lightweight static HTML chat page. - - Connects to Relay at `http://:7078`. - - Nice cyberpunk theme! - - Saves and loads sessions, which then in turn send to relay. +**2. UI** (Static HTML) - Port 8081 (nginx) +- Browser-based chat interface with cyberpunk theme +- Mode selector (Standard/Cortex) in header +- Settings modal with backend selection and session management +- Light/Dark mode toggle (dark by default) +- **NEW in v0.8.0:** "🧠 Show Work" button for real-time thinking stream + - Opens popup window with live SSE connection + - Color-coded events: thinking, tool calls, results, completion + - Auto-scrolling with animations + - Session-aware (matches current chat session) +- Server-synced session management (persists across browsers and reboots) +- OpenAI-compatible message format + +**3. NeoMem** (Python/FastAPI) - Port 7077 - **DISABLED IN v0.6.0** +- Long-term memory database (fork of Mem0 OSS) +- Vector storage (PostgreSQL + pgvector) + Graph storage (Neo4j) +- RESTful API: `/memories`, `/search` +- Semantic memory updates and retrieval +- No external SDK dependencies - fully local +- **Status:** Currently disabled while pipeline integration is refined + +### Reasoning Layer + +**4. Cortex** (Python/FastAPI) - Port 7081 +- Primary reasoning engine with multi-stage pipeline and autonomy system +- **Includes embedded Intake module** (no separate service as of v0.5.1) +- **Integrated Autonomy System** (NEW in v0.6.0) - See Autonomy System section below +- **Tool Calling System** (NEW in v0.8.0) - Agentic execution for Standard Mode + - Sandboxed code execution (Python, JavaScript, Bash) + - Web search via Tavily API + - **Trilium knowledge base integration** (NEW in v0.9.0) + - Multi-iteration autonomous tool use (max 5 iterations) + - Real-time thinking stream via SSE +- **Dual Operating Modes:** + - **Standard Mode** (v0.7.0) - Simple chatbot with context retention + tool calling (v0.8.0) + - Bypasses reflection, reasoning, refinement stages + - Direct LLM call with conversation history + - User-selectable backend (SECONDARY, OPENAI, or custom) + - **NEW:** Autonomous tool calling for code execution, web search, knowledge queries + - **NEW:** "Show Your Work" real-time thinking stream + - Faster responses for coding and practical tasks + - **Cortex Mode** - Full 4-stage reasoning pipeline + 1. **Reflection** - Generates meta-awareness notes about conversation + 2. **Reasoning** - Creates initial draft answer using context + 3. **Refinement** - Polishes and improves the draft + 4. **Persona** - Applies Lyra's personality and speaking style +- Integrates with Intake for short-term context via internal Python imports +- Flexible LLM router supporting multiple backends via HTTP +- **Endpoints:** + - `POST /reason` - Main reasoning pipeline (Cortex Mode) + - `POST /simple` - Direct LLM chat with tool calling (Standard Mode) + - `GET /stream/thinking/{session_id}` - SSE stream for thinking events **NEW in v0.8.0** + - `POST /ingest` - Receives conversation exchanges from Relay + - `GET /health` - Service health check + - `GET /debug/sessions` - Inspect in-memory SESSIONS state + - `GET /debug/summary` - Test summarization for a session + +**5. Intake** (Python Module) - **Embedded in Cortex** +- **No longer a standalone service** - runs as Python module inside Cortex container +- Short-term memory management with session-based circular buffer +- In-memory SESSIONS dictionary: `session_id β†’ {buffer: deque(maxlen=200), created_at: timestamp}` +- Multi-level summarization (L1/L5/L10/L20/L30) produced by `summarize_context()` +- Deferred summarization - actual summary generation happens during `/reason` call +- Internal Python API: + - `add_exchange_internal(exchange)` - Direct function call from Cortex + - `summarize_context(session_id, exchanges)` - Async LLM-based summarization + - `SESSIONS` - Module-level global state (requires single Uvicorn worker) + +### LLM Backends (HTTP-based) + +**All LLM communication is done via HTTP APIs:** +- **PRIMARY**: llama.cpp server (`http://10.0.0.44:8080`) - AMD MI50 GPU backend +- **SECONDARY**: Ollama server (`http://10.0.0.3:11434`) - RTX 3090 backend + - Model: qwen2.5:7b-instruct-q4_K_M +- **CLOUD**: OpenAI API (`https://api.openai.com/v1`) - Cloud-based models + - Model: gpt-4o-mini +- **FALLBACK**: Local backup (`http://10.0.0.41:11435`) - Emergency fallback + - Model: llama-3.2-8b-instruct + +Each module can be configured to use a different backend via environment variables. + +### Autonomy System (NEW in v0.6.0) + +**Cortex Autonomy Subsystems** - Multi-layered autonomous decision-making and learning +- **Executive Layer** [cortex/autonomy/executive/](cortex/autonomy/executive/) + - High-level planning and goal setting + - Multi-step reasoning for complex objectives + - Strategic decision making +- **Decision Engine** [cortex/autonomy/tools/decision_engine.py](cortex/autonomy/tools/decision_engine.py) + - Autonomous decision-making framework + - Option evaluation and selection + - Coordinated decision orchestration +- **Autonomous Actions** [cortex/autonomy/actions/](cortex/autonomy/actions/) + - Self-initiated action execution + - Context-aware behavior implementation + - Action logging and tracking +- **Pattern Learning** [cortex/autonomy/learning/](cortex/autonomy/learning/) + - Learns from interaction patterns + - Identifies recurring user needs + - Adaptive behavior refinement +- **Proactive Monitoring** [cortex/autonomy/proactive/](cortex/autonomy/proactive/) + - System state monitoring + - Intervention opportunity detection + - Background awareness capabilities +- **Self-Analysis** [cortex/autonomy/self/](cortex/autonomy/self/) + - Performance tracking and analysis + - Cognitive pattern identification + - Self-state persistence in [cortex/data/self_state.json](cortex/data/self_state.json) +- **Orchestrator** [cortex/autonomy/tools/orchestrator.py](cortex/autonomy/tools/orchestrator.py) + - Coordinates all autonomy subsystems + - Manages tool selection and execution + - Handles external integrations (with enable/disable controls) + +**Autonomy Architecture:** +The autonomy system operates in coordinated layers, all maintaining state in `self_state.json`: +1. Executive Layer β†’ Planning and goals +2. Decision Layer β†’ Evaluation and choices +3. Action Layer β†’ Execution +4. Learning Layer β†’ Pattern adaptation +5. Monitoring Layer β†’ Proactive awareness + +--- + +## Data Flow Architecture (v0.7.0) + +### Standard Mode Flow (NEW in v0.7.0): + +``` +User (UI) β†’ POST /v1/chat/completions {mode: "standard", backend: "SECONDARY"} + ↓ +Relay (7078) + ↓ POST /simple +Cortex (7081) + ↓ (internal Python call) +Intake module β†’ get_recent_messages() (last 20 messages) + ↓ +Direct LLM call (user-selected backend: SECONDARY/OPENAI/custom) + ↓ +Returns simple response to Relay + ↓ +Relay β†’ POST /ingest (async) + ↓ +Cortex β†’ add_exchange_internal() β†’ SESSIONS buffer + ↓ +Relay β†’ POST /sessions/:id (save session to file) + ↓ +Relay β†’ UI (returns final response) + +Note: Bypasses reflection, reasoning, refinement, persona stages +``` + +### Cortex Mode Flow (Full Reasoning): + +``` +User (UI) β†’ POST /v1/chat/completions {mode: "cortex"} + ↓ +Relay (7078) + ↓ POST /reason +Cortex (7081) + ↓ (internal Python call) +Intake module β†’ summarize_context() + ↓ +Autonomy System β†’ Decision evaluation & pattern learning + ↓ +Cortex processes (4 stages): + 1. reflection.py β†’ meta-awareness notes (CLOUD backend) + 2. reasoning.py β†’ draft answer (PRIMARY backend, autonomy-aware) + 3. refine.py β†’ refined answer (PRIMARY backend) + 4. persona/speak.py β†’ Lyra personality (CLOUD backend, autonomy-aware) + ↓ +Returns persona answer to Relay + ↓ +Relay β†’ POST /ingest (async) + ↓ +Cortex β†’ add_exchange_internal() β†’ SESSIONS buffer + ↓ +Autonomy System β†’ Update self_state.json (pattern tracking) + ↓ +Relay β†’ POST /sessions/:id (save session to file) + ↓ +Relay β†’ UI (returns final response) + +Note: NeoMem integration disabled in v0.6.0 +``` + +### Session Persistence Flow (NEW in v0.7.0): + +``` +UI loads β†’ GET /sessions β†’ Relay β†’ List all sessions from files β†’ UI dropdown +User sends message β†’ POST /sessions/:id β†’ Relay β†’ Save to sessions/*.json +User renames session β†’ PATCH /sessions/:id/metadata β†’ Relay β†’ Update *.meta.json +User deletes session β†’ DELETE /sessions/:id β†’ Relay β†’ Remove session files + +Sessions stored in: core/relay/sessions/ +- {sessionId}.json (conversation history) +- {sessionId}.meta.json (name, timestamps, metadata) +``` + +### Cortex 4-Stage Reasoning Pipeline: + +1. **Reflection** (`reflection.py`) - Cloud LLM (OpenAI) + - Analyzes user intent and conversation context + - Generates meta-awareness notes + - "What is the user really asking?" + +2. **Reasoning** (`reasoning.py`) - Primary LLM (llama.cpp) + - Retrieves short-term context from Intake module + - Creates initial draft answer + - Integrates context, reflection notes, and user prompt + +3. **Refinement** (`refine.py`) - Primary LLM (llama.cpp) + - Polishes the draft answer + - Improves clarity and coherence + - Ensures factual consistency + +4. **Persona** (`speak.py`) - Cloud LLM (OpenAI) + - Applies Lyra's personality and speaking style + - Natural, conversational output + - Final answer returned to user + +--- + +## Features + +### Core Services + +**Relay**: +- Main orchestrator and message router +- OpenAI-compatible endpoint: `POST /v1/chat/completions` +- Internal endpoint: `POST /chat` +- Health check: `GET /_health` +- **NEW:** Dual-mode routing (Standard/Cortex) +- **NEW:** Server-side session persistence with CRUD API +- **NEW:** Session management endpoints: + - `GET /sessions` - List all sessions + - `GET /sessions/:id` - Retrieve session history + - `POST /sessions/:id` - Save session history + - `PATCH /sessions/:id/metadata` - Update session metadata + - `DELETE /sessions/:id` - Delete session +- Async non-blocking calls to Cortex +- Shared request handler for code reuse +- Comprehensive error handling + +**NeoMem (Memory Engine)**: +- Forked from Mem0 OSS - fully independent +- Drop-in compatible API (`/memories`, `/search`) +- Local-first: runs on FastAPI with Postgres + Neo4j +- No external SDK dependencies +- Semantic memory updates - compares embeddings and performs in-place updates +- Default service: `neomem-api` (port 7077) + +**UI**: +- Lightweight static HTML chat interface +- Cyberpunk theme with light/dark mode toggle +- **NEW:** Mode selector (Standard/Cortex) in header +- **NEW:** Settings modal (βš™ button) with: + - Backend selection for Standard Mode (SECONDARY/OPENAI/custom) + - Session management (view, delete sessions) + - Theme toggle (dark mode default) +- **NEW:** Server-synced session management + - Sessions persist across browsers and reboots + - Rename sessions with custom names + - Delete sessions with confirmation + - Automatic session save on every message +- OpenAI message format support + +### Reasoning Layer + +**Cortex** (v0.7.0): +- **NEW:** Dual operating modes: + - **Standard Mode** - Simple chat with context (`/simple` endpoint) + - User-selectable backend (SECONDARY, OPENAI, or custom) + - Full conversation history via Intake integration + - Bypasses reasoning pipeline for faster responses + - **Cortex Mode** - Full reasoning pipeline (`/reason` endpoint) + - Multi-stage processing: reflection β†’ reasoning β†’ refine β†’ persona + - Per-stage backend selection + - Autonomy system integration +- Flexible LLM backend routing via HTTP +- Async processing throughout +- Embedded Intake module for short-term context +- `/reason`, `/simple`, `/ingest`, `/health`, `/debug/sessions`, `/debug/summary` endpoints +- Lenient error handling - never fails the chat pipeline + +**Intake** (Embedded Module): +- **Architectural change**: Now runs as Python module inside Cortex container +- In-memory SESSIONS management (session_id β†’ buffer) +- Multi-level summarization: L1 (ultra-short), L5 (short), L10 (medium), L20 (detailed), L30 (full) +- Deferred summarization strategy - summaries generated during `/reason` call +- `bg_summarize()` is a logging stub - actual work deferred +- **Single-worker constraint**: SESSIONS requires single Uvicorn worker or Redis/shared storage + +**LLM Router**: +- Dynamic backend selection via HTTP +- Environment-driven configuration +- Support for llama.cpp, Ollama, OpenAI, custom endpoints +- Per-module backend preferences: + - `CORTEX_LLM=SECONDARY` (Ollama for reasoning) + - `INTAKE_LLM=PRIMARY` (llama.cpp for summarization) + - `SPEAK_LLM=OPENAI` (Cloud for persona) + - `NEOMEM_LLM=PRIMARY` (llama.cpp for memory operations) + +### Beta Lyrae (RAG Memory DB) - Currently Disabled -# Beta Lyrae (RAG Memory DB) - added 11-3-25 - **RAG Knowledge DB - Beta Lyrae (sheliak)** - - This module implements the **Retrieval-Augmented Generation (RAG)** layer for Project Lyra. + - This module implements the **Retrieval-Augmented Generation (RAG)** layer for Project Lyra. - It serves as the long-term searchable memory store that Cortex and Relay can query for relevant context before reasoning or response generation. - The system uses: - - **ChromaDB** for persistent vector storage - - **OpenAI Embeddings (`text-embedding-3-small`)** for semantic similarity - - **FastAPI** (port 7090) for the `/rag/search` REST endpoint - - Directory Layout - rag/ - β”œβ”€β”€ rag_chat_import.py # imports JSON chat logs - β”œβ”€β”€ rag_docs_import.py # (planned) PDF/EPUB/manual importer - β”œβ”€β”€ rag_build.py # legacy single-folder builder - β”œβ”€β”€ rag_query.py # command-line query helper - β”œβ”€β”€ rag_api.py # FastAPI service providing /rag/search - β”œβ”€β”€ chromadb/ # persistent vector store - β”œβ”€β”€ chatlogs/ # organized source data - β”‚ β”œβ”€β”€ poker/ - β”‚ β”œβ”€β”€ work/ - β”‚ β”œβ”€β”€ lyra/ - β”‚ β”œβ”€β”€ personal/ - β”‚ └── ... - └── import.log # progress log for batch runs - - **OpenAI chatlog importer. - - Takes JSON formatted chat logs and imports it to the RAG. - - **fetures include:** - - Recursive folder indexing with **category detection** from directory name - - Smart chunking for long messages (5 000 chars per slice) - - Automatic deduplication using SHA-1 hash of file + chunk - - Timestamps for both file modification and import time - - Full progress logging via tqdm - - Safe to run in background with nohup … & - - Metadata per chunk: - ```json - { - "chat_id": "", - "chunk_index": 0, - "source": "chatlogs/lyra/0002_cortex_LLMs_11-1-25.json", - "title": "cortex LLMs 11-1-25", - "role": "assistant", - "category": "lyra", - "type": "chat", - "file_modified": "2025-11-06T23:41:02", - "imported_at": "2025-11-07T03:55:00Z" - }``` + - **Status**: Disabled in docker-compose.yml (v0.5.1) -# Cortex VM (VM101, CT201) - - **CT201 main reasoning orchestrator.** - - This is the internal brain of Lyra. - - Running in a privellaged LXC. - - Currently a locally served LLM running on a Radeon Instinct HI50, using a customized version of vLLM that lets it use ROCm. - - Accessible via 10.0.0.43:8000/v1/completions. +The system uses: +- **ChromaDB** for persistent vector storage +- **OpenAI Embeddings (`text-embedding-3-small`)** for semantic similarity +- **FastAPI** (port 7090) for the `/rag/search` REST endpoint - - **Intake v0.1.1 ** - - Recieves messages from relay and summarizes them in a cascading format. - - Continues to summarize smaller amounts of exhanges while also generating large scale conversational summaries. (L20) - - Intake then sends to cortex for self reflection, neomem for memory consolidation. - - - **Reflect ** - -TBD +Directory Layout: +``` +rag/ +β”œβ”€β”€ rag_chat_import.py # imports JSON chat logs +β”œβ”€β”€ rag_docs_import.py # (planned) PDF/EPUB/manual importer +β”œβ”€β”€ rag_build.py # legacy single-folder builder +β”œβ”€β”€ rag_query.py # command-line query helper +β”œβ”€β”€ rag_api.py # FastAPI service providing /rag/search +β”œβ”€β”€ chromadb/ # persistent vector store +β”œβ”€β”€ chatlogs/ # organized source data +β”‚ β”œβ”€β”€ poker/ +β”‚ β”œβ”€β”€ work/ +β”‚ β”œβ”€β”€ lyra/ +β”‚ β”œβ”€β”€ personal/ +β”‚ └── ... +└── import.log # progress log for batch runs +``` -# Self hosted vLLM server # - - **CT201 main reasoning orchestrator.** - - This is the internal brain of Lyra. - - Running in a privellaged LXC. - - Currently a locally served LLM running on a Radeon Instinct HI50, using a customized version of vLLM that lets it use ROCm. - - Accessible via 10.0.0.43:8000/v1/completions. - - **Stack Flow** - - [Proxmox Host] - └── loads AMDGPU driver - └── boots CT201 (order=2) - - [CT201 GPU Container] - β”œβ”€β”€ lyra-start-vllm.sh β†’ starts vLLM ROCm model server - β”œβ”€β”€ lyra-vllm.service β†’ runs the above automatically - β”œβ”€β”€ lyra-core.service β†’ launches Cortex + Intake Docker stack - └── Docker Compose β†’ runs Cortex + Intake containers - - [Cortex Container] - β”œβ”€β”€ Listens on port 7081 - β”œβ”€β”€ Talks to NVGRAM (mem API) + Intake - └── Main relay between Lyra UI ↔ memory ↔ model - - [Intake Container] - β”œβ”€β”€ Listens on port 7080 - β”œβ”€β”€ Summarizes every few exchanges - β”œβ”€β”€ Writes summaries to /app/logs/summaries.log - └── Future: sends summaries β†’ Cortex for reflection - - -# Additional information available in the trilium docs. # ---- - -## πŸ“¦ Requirements - -- Docker + Docker Compose -- Postgres + Neo4j (for NeoMem) -- Access to an open AI or ollama style API. -- OpenAI API key (for Relay fallback LLMs) - -**Dependencies:** - - fastapi==0.115.8 - - uvicorn==0.34.0 - - pydantic==2.10.4 - - python-dotenv==1.0.1 - - psycopg>=3.2.8 - - ollama +**OpenAI chatlog importer features:** +- Recursive folder indexing with **category detection** from directory name +- Smart chunking for long messages (5,000 chars per slice) +- Automatic deduplication using SHA-1 hash of file + chunk +- Timestamps for both file modification and import time +- Full progress logging via tqdm +- Safe to run in background with `nohup … &` --- -πŸ”Œ Integration Notes +## Docker Deployment -Lyra-Core connects to neomem-api:8000 inside Docker or localhost:7077 locally. +All services run in a single docker-compose stack with the following containers: -API endpoints remain identical to Mem0 (/memories, /search). +**Active Services:** +- **relay** - Main orchestrator (port 7078) +- **cortex** - Reasoning engine with embedded Intake and Autonomy System (port 7081) -History and entity graphs managed internally via Postgres + Neo4j. +**Disabled Services (v0.6.0):** +- **neomem-postgres** - PostgreSQL with pgvector extension (port 5432) - *disabled while refining pipeline* +- **neomem-neo4j** - Neo4j graph database (ports 7474, 7687) - *disabled while refining pipeline* +- **neomem-api** - NeoMem memory service (port 7077) - *disabled while refining pipeline* +- **intake** - No longer needed (embedded in Cortex as of v0.5.1) +- **rag** - Beta Lyrae RAG service (port 7090) - currently disabled + +All containers communicate via the `lyra_net` Docker bridge network. + +## External LLM Services + +The following LLM backends are accessed via HTTP (not part of docker-compose): + +- **llama.cpp Server** (`http://10.0.0.44:8080`) + - AMD MI50 GPU-accelerated inference + - Primary backend for reasoning and refinement stages + - Model path: `/model` + +- **Ollama Server** (`http://10.0.0.3:11434`) + - RTX 3090 GPU-accelerated inference + - Secondary/configurable backend + - Model: qwen2.5:7b-instruct-q4_K_M + +- **OpenAI API** (`https://api.openai.com/v1`) + - Cloud-based inference + - Used for reflection and persona stages + - Model: gpt-4o-mini + +- **Fallback Server** (`http://10.0.0.41:11435`) + - Emergency backup endpoint + - Local llama-3.2-8b-instruct model --- -🧱 Architecture Snapshot +## Version History - User β†’ Relay β†’ Cortex - ↓ - [RAG Search] - ↓ - [Reflection Loop] - ↓ - Intake (async summaries) - ↓ - NeoMem (persistent memory) +### v0.9.0 (2025-12-29) - Current Release +**Major Feature: Trilium Notes Integration** +- βœ… Added Trilium ETAPI integration for knowledge base access +- βœ… `search_notes()` tool for searching personal notes during conversations +- βœ… `create_note()` tool for capturing insights and information +- βœ… ETAPI authentication with secure token management +- βœ… Complete setup documentation and API reference +- βœ… Environment configuration with feature flag (`ENABLE_TRILIUM`) +- βœ… Automatic parent note handling (defaults to "root") +- βœ… Connection error handling and user-friendly messages -**Cortex v0.4.1 introduces the first fully integrated reasoning loop.** -- Data Flow: - - User message enters Cortex via /reason. - - Cortex assembles context: - - Intake summaries (short-term memory) - - RAG contextual data (knowledge base) - - LLM generates initial draft (call_llm). - - Reflection loop critiques and refines the answer. - - Intake asynchronously summarizes and sends snapshots to NeoMem. +**Key Capabilities:** +- Search your Trilium notes during conversations for context +- Create new notes from conversation insights automatically +- Cross-reference information between chat and knowledge base +- Future: Find duplicates, suggest organization, summarize notes -RAG API Configuration: -Set RAG_API_URL in .env (default: http://localhost:7090). +**Documentation:** +- Added [TRILIUM_SETUP.md](TRILIUM_SETUP.md) - Complete setup guide +- Added [docs/TRILIUM_API.md](docs/TRILIUM_API.md) - Full API reference + +### v0.8.0 (2025-12-26) +**Major Feature: Agentic Tool Calling + "Show Your Work"** +- βœ… Added tool calling system for Standard Mode +- βœ… Real-time thinking stream visualization +- βœ… Sandboxed code execution (Python, JavaScript, Bash) +- βœ… Web search integration via Tavily API +- βœ… Server-Sent Events (SSE) for live tool execution updates + +### v0.7.0 (2025-12-21) +**Major Features: Standard Mode + Backend Selection + Session Persistence** +- βœ… Added Standard Mode for simple chatbot functionality +- βœ… UI mode selector (Standard/Cortex) in header +- βœ… Settings modal with backend selection for Standard Mode +- βœ… Server-side session persistence with file-based storage +- βœ… Session management UI (view, rename, delete sessions) +- βœ… Light/Dark mode toggle (dark by default) +- βœ… Context retention in Standard Mode via Intake integration +- βœ… Fixed modal positioning and z-index issues +- βœ… Cortex `/simple` endpoint for direct LLM calls +- βœ… Session CRUD API in Relay +- βœ… Full backward compatibility - Cortex Mode unchanged + +**Key Changes:** +- Standard Mode bypasses 6 of 7 reasoning stages for faster responses +- Sessions now sync across browsers and survive container restarts +- User can select SECONDARY (Ollama), OPENAI, or custom backend for Standard Mode +- Theme preference and backend selection persisted in localStorage +- Session files stored in `core/relay/sessions/` directory + +### v0.6.0 (2025-12-18) +**Major Feature: Autonomy System (Phase 1, 2, and 2.5)** +- βœ… Added autonomous decision-making framework +- βœ… Implemented executive planning and goal-setting layer +- βœ… Added pattern learning system for adaptive behavior +- βœ… Implemented proactive monitoring capabilities +- βœ… Created self-analysis and performance tracking system +- βœ… Integrated self-state persistence (`cortex/data/self_state.json`) +- βœ… Built decision engine with orchestrator coordination +- βœ… Added autonomous action execution framework +- βœ… Integrated autonomy into reasoning and persona layers +- βœ… Created comprehensive test suites for autonomy features +- βœ… Added complete system breakdown documentation + +**Architecture Changes:** +- Autonomy system integrated into Cortex reasoning pipeline +- Multi-layered autonomous decision-making architecture +- Self-state tracking across sessions +- NeoMem disabled by default while refining pipeline integration +- Enhanced orchestrator with flexible service controls + +**Documentation:** +- Added [PROJECT_LYRA_COMPLETE_BREAKDOWN.md](docs/PROJECT_LYRA_COMPLETE_BREAKDOWN.md) +- Updated changelog with comprehensive autonomy system details + +### v0.5.1 (2025-12-11) +**Critical Intake Integration Fixes:** +- βœ… Fixed `bg_summarize()` NameError preventing SESSIONS persistence +- βœ… Fixed `/ingest` endpoint unreachable code +- βœ… Added `cortex/intake/__init__.py` for proper package structure +- βœ… Added diagnostic logging to verify SESSIONS singleton behavior +- βœ… Added `/debug/sessions` and `/debug/summary` endpoints +- βœ… Documented single-worker constraint in Dockerfile +- βœ… Implemented lenient error handling (never fails chat pipeline) +- βœ… Intake now embedded in Cortex - no longer standalone service + +**Architecture Changes:** +- Intake module runs inside Cortex container as pure Python import +- No HTTP calls between Cortex and Intake (internal function calls) +- SESSIONS persist correctly in Uvicorn worker +- Deferred summarization strategy (summaries generated during `/reason`) + +### v0.5.0 (2025-11-28) +- βœ… Fixed all critical API wiring issues +- βœ… Added OpenAI-compatible endpoint to Relay (`/v1/chat/completions`) +- βœ… Fixed Cortex β†’ Intake integration +- βœ… Added missing Python package `__init__.py` files +- βœ… End-to-end message flow verified and working + +### Infrastructure v1.0.0 (2025-11-26) +- Consolidated 9 scattered `.env` files into single source of truth +- Multi-backend LLM strategy implemented +- Docker Compose consolidation +- Created `.env.example` security templates + +### v0.4.x (Major Rewire) +- Cortex multi-stage reasoning pipeline +- LLM router with multi-backend support +- Major architectural restructuring + +### v0.3.x +- Beta Lyrae RAG system +- NeoMem integration +- Basic Cortex reasoning loop --- -## Setup and Operation ## +## Known Issues (v0.7.0) -## Beta Lyrae - RAG memory system ## -**Requirements** - -Env= python 3.10+ - -Dependences: pip install chromadb openai tqdm python-dotenv fastapi uvicorn jq - -Persistent storage path: ./chromadb (can be moved to /mnt/data/lyra_rag_db) +### Temporarily Disabled +- **NeoMem disabled by default** - Being refined independently before full integration + - PostgreSQL + pgvector storage inactive + - Neo4j graph database inactive + - Memory persistence endpoints not active +- RAG service (Beta Lyrae) currently disabled in docker-compose.yml -**Import Chats** - - Chats need to be formatted into the correct format of - ``` - "messages": [ - { - "role:" "user", - "content": "Message here" - }, - "messages": [ - { - "role:" "assistant", - "content": "Message here" - },``` - - Organize the chats into categorical folders. This step is optional, but it helped me keep it straight. - - run "python3 rag_chat_import.py", chats will then be imported automatically. For reference, it took 32 Minutes to import 68 Chat logs (aprox 10.3MB). +### Standard Mode Limitations +- No reflection, reasoning, or refinement stages (by design) +- DeepSeek R1 not recommended for Standard Mode (generates reasoning artifacts) +- No RAG integration (same as Cortex Mode - currently disabled) +- No NeoMem memory storage (same as Cortex Mode - currently disabled) -**Build API Server** - - Run: rag_build.py, this automatically builds the chromaDB using data saved in the /chatlogs/ folder. (docs folder to be added in future.) - - Run: rag_api.py or ```uvicorn rag_api:app --host 0.0.0.0 --port 7090``` +### Session Management Limitations +- Sessions stored in container filesystem - requires volume mount for true persistence +- No session import/export functionality yet +- No session search or filtering +- Old localStorage sessions don't automatically migrate to server -**Query** - - Run: python3 rag_query.py "Question here?" - - For testing a curl command can reach it too - ``` - curl -X POST http://127.0.0.1:7090/rag/search \ - -H "Content-Type: application/json" \ - -d '{ - "query": "What is the current state of Cortex and Project Lyra?", - "where": {"category": "lyra"} - }' - ``` - -# Beta Lyrae - RAG System +### Operational Notes +- **Single-worker constraint**: Cortex must run with single Uvicorn worker to maintain SESSIONS state + - Multi-worker scaling requires migrating SESSIONS to Redis or shared storage +- Diagnostic endpoints (`/debug/sessions`, `/debug/summary`) available for troubleshooting +- Backend selection only affects Standard Mode - Cortex Mode uses environment-configured backends -## πŸ“– License -NeoMem is a derivative work based on the Mem0 OSS project (Apache 2.0). -This fork retains the original Apache 2.0 license and adds local modifications. +### Future Enhancements +- Re-enable NeoMem integration after pipeline refinement +- Full autonomy system maturation and optimization +- Re-enable RAG service integration +- Session import/export functionality +- Session search and filtering UI +- Migrate SESSIONS to Redis for multi-worker support +- Add request correlation IDs for tracing +- Comprehensive health checks across all services +- Enhanced pattern learning with long-term memory integration + +--- + +## Quick Start + +### Prerequisites +- Docker + Docker Compose +- At least one HTTP-accessible LLM endpoint (llama.cpp, Ollama, or OpenAI API key) + +### Setup +1. Copy `.env.example` to `.env` and configure your LLM backend URLs and API keys: + ```bash + # Required: Configure at least one LLM backend + LLM_PRIMARY_URL=http://10.0.0.44:8080 # llama.cpp + LLM_SECONDARY_URL=http://10.0.0.3:11434 # Ollama + OPENAI_API_KEY=sk-... # OpenAI + ``` + +2. Start all services with docker-compose: + ```bash + docker-compose up -d + ``` + +3. Check service health: + ```bash + # Relay health + curl http://localhost:7078/_health + + # Cortex health + curl http://localhost:7081/health + + # NeoMem health + curl http://localhost:7077/health + ``` + +4. Access the UI at `http://localhost:8081` + +### Using the UI + +**Mode Selection:** +- Use the **Mode** dropdown in the header to switch between: + - **Standard** - Simple chatbot for coding and practical tasks + - **Cortex** - Full reasoning pipeline with autonomy features + +**Settings Menu:** +1. Click the **βš™ Settings** button in the header +2. **Backend Selection** (Standard Mode only): + - Choose **SECONDARY** (Ollama/Qwen on 3090) - Fast, local + - Choose **OPENAI** (GPT-4o-mini) - Cloud-based, high quality + - Enter custom backend name for advanced configurations +3. **Session Management**: + - View all saved sessions with message counts and timestamps + - Click πŸ—‘οΈ to delete unwanted sessions +4. **Theme Toggle**: + - Click **πŸŒ™ Dark Mode** or **β˜€οΈ Light Mode** to switch themes + +**Session Management:** +- Sessions automatically save on every message +- Use the **Session** dropdown to switch between sessions +- Click **βž• New** to create a new session +- Click **✏️ Rename** to rename the current session +- Sessions persist across browsers and container restarts + +### Test + +**Test Standard Mode:** +```bash +curl -X POST http://localhost:7078/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "mode": "standard", + "backend": "SECONDARY", + "messages": [{"role": "user", "content": "Hello!"}], + "sessionId": "test" + }' +``` + +**Test Cortex Mode (Full Reasoning):** +```bash +curl -X POST http://localhost:7078/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "mode": "cortex", + "messages": [{"role": "user", "content": "Hello Lyra!"}], + "sessionId": "test" + }' +``` + +**Test Cortex /ingest endpoint:** +```bash +curl -X POST http://localhost:7081/ingest \ + -H "Content-Type: application/json" \ + -d '{ + "session_id": "test", + "user_msg": "Hello", + "assistant_msg": "Hi there!" + }' +``` + +**Inspect SESSIONS state:** +```bash +curl http://localhost:7081/debug/sessions +``` + +**Get summary for a session:** +```bash +curl "http://localhost:7081/debug/summary?session_id=test" +``` + +**List all sessions:** +```bash +curl http://localhost:7078/sessions +``` + +**Get session history:** +```bash +curl http://localhost:7078/sessions/sess-abc123 +``` + +**Delete a session:** +```bash +curl -X DELETE http://localhost:7078/sessions/sess-abc123 +``` + +All backend databases (PostgreSQL and Neo4j) are automatically started as part of the docker-compose stack. + +--- + +## Environment Variables + +### LLM Backend Configuration + +**Backend URLs (Full API endpoints):** +```bash +LLM_PRIMARY_URL=http://10.0.0.44:8080 # llama.cpp +LLM_PRIMARY_MODEL=/model + +LLM_SECONDARY_URL=http://10.0.0.3:11434 # Ollama +LLM_SECONDARY_MODEL=qwen2.5:7b-instruct-q4_K_M + +LLM_OPENAI_URL=https://api.openai.com/v1 +LLM_OPENAI_MODEL=gpt-4o-mini +OPENAI_API_KEY=sk-... +``` + +**Module-specific backend selection:** +```bash +CORTEX_LLM=SECONDARY # Use Ollama for reasoning +INTAKE_LLM=PRIMARY # Use llama.cpp for summarization +SPEAK_LLM=OPENAI # Use OpenAI for persona +NEOMEM_LLM=PRIMARY # Use llama.cpp for memory +UI_LLM=OPENAI # Use OpenAI for UI +RELAY_LLM=PRIMARY # Use llama.cpp for relay +STANDARD_MODE_LLM=SECONDARY # Default backend for Standard Mode (NEW in v0.7.0) +``` + +### Database Configuration +```bash +POSTGRES_USER=neomem +POSTGRES_PASSWORD=neomempass +POSTGRES_DB=neomem +POSTGRES_HOST=neomem-postgres +POSTGRES_PORT=5432 + +NEO4J_URI=bolt://neomem-neo4j:7687 +NEO4J_USERNAME=neo4j +NEO4J_PASSWORD=neomemgraph +``` + +### Service URLs (Internal Docker Network) +```bash +NEOMEM_API=http://neomem-api:7077 +CORTEX_API=http://cortex:7081 +CORTEX_REASON_URL=http://cortex:7081/reason +CORTEX_SIMPLE_URL=http://cortex:7081/simple # NEW in v0.7.0 +CORTEX_INGEST_URL=http://cortex:7081/ingest +RELAY_URL=http://relay:7078 +``` + +### Feature Flags +```bash +CORTEX_ENABLED=true +MEMORY_ENABLED=true +PERSONA_ENABLED=false +DEBUG_PROMPT=true +VERBOSE_DEBUG=true +ENABLE_TRILIUM=true # NEW in v0.9.0 +``` + +For complete environment variable reference, see [ENVIRONMENT_VARIABLES.md](ENVIRONMENT_VARIABLES.md). + +--- + +## Documentation + +- [CHANGELOG.md](CHANGELOG.md) - Detailed version history +- [PROJECT_SUMMARY.md](PROJECT_SUMMARY.md) - Comprehensive project overview for AI context +- [ENVIRONMENT_VARIABLES.md](ENVIRONMENT_VARIABLES.md) - Environment variable reference +- [DEPRECATED_FILES.md](DEPRECATED_FILES.md) - Deprecated files and migration guide + +--- + +## Troubleshooting + +### SESSIONS not persisting +**Symptom:** Intake buffer always shows 0 exchanges, summaries always empty. + +**Solution (Fixed in v0.5.1):** +- Ensure `cortex/intake/__init__.py` exists +- Check Cortex logs for `[Intake Module Init]` message showing SESSIONS object ID +- Verify single-worker mode (Dockerfile: `uvicorn main:app --workers 1`) +- Use `/debug/sessions` endpoint to inspect current state + +### Cortex connection errors +**Symptom:** Relay can't reach Cortex, 502 errors. + +**Solution:** +- Verify Cortex container is running: `docker ps | grep cortex` +- Check Cortex health: `curl http://localhost:7081/health` +- Verify environment variables: `CORTEX_REASON_URL=http://cortex:7081/reason` +- Check docker network: `docker network inspect lyra_net` + +### LLM backend timeouts +**Symptom:** Reasoning stage hangs or times out. + +**Solution:** +- Verify LLM backend is running and accessible +- Check LLM backend health: `curl http://10.0.0.44:8080/health` +- Increase timeout in llm_router.py if using slow models +- Check logs for specific backend errors + +--- + +## License + +NeoMem is a derivative work based on Mem0 OSS (Apache 2.0). Β© 2025 Terra-Mechanics / ServersDown Labs. All modifications released under Apache 2.0. +**Built with Claude Code** + +--- + +## Integration Notes + +- NeoMem API is compatible with Mem0 OSS endpoints (`/memories`, `/search`) +- All services communicate via Docker internal networking on the `lyra_net` bridge +- History and entity graphs are managed via PostgreSQL + Neo4j +- LLM backends are accessed via HTTP and configured in `.env` +- Intake module is imported internally by Cortex (no HTTP communication) +- SESSIONS state is maintained in-memory within Cortex container + +--- + +## Beta Lyrae - RAG Memory System (Currently Disabled) + +**Note:** The RAG service is currently disabled in docker-compose.yml + +### Requirements +- Python 3.10+ +- Dependencies: `chromadb openai tqdm python-dotenv fastapi uvicorn` +- Persistent storage: `./chromadb` or `/mnt/data/lyra_rag_db` + +### Setup +1. Import chat logs (must be in OpenAI message format): + ```bash + python3 rag/rag_chat_import.py + ``` + +2. Build and start the RAG API server: + ```bash + cd rag + python3 rag_build.py + uvicorn rag_api:app --host 0.0.0.0 --port 7090 + ``` + +3. Query the RAG system: + ```bash + curl -X POST http://127.0.0.1:7090/rag/search \ + -H "Content-Type: application/json" \ + -d '{ + "query": "What is the current state of Cortex?", + "where": {"category": "lyra"} + }' + ``` + +--- + +## Development Notes + +### Cortex Architecture (v0.6.0) +- Cortex contains embedded Intake module at `cortex/intake/` +- Intake is imported as: `from intake.intake import add_exchange_internal, SESSIONS` +- SESSIONS is a module-level global dictionary (singleton pattern) +- Single-worker constraint required to maintain SESSIONS state +- Diagnostic endpoints available for debugging: `/debug/sessions`, `/debug/summary` +- **NEW:** Autonomy system integrated at `cortex/autonomy/` + - Executive, decision, action, learning, and monitoring layers + - Self-state persistence in `cortex/data/self_state.json` + - Coordinated via orchestrator with flexible service controls + +### Adding New LLM Backends +1. Add backend URL to `.env`: + ```bash + LLM_CUSTOM_URL=http://your-backend:port + LLM_CUSTOM_MODEL=model-name + ``` + +2. Configure module to use new backend: + ```bash + CORTEX_LLM=CUSTOM + ``` + +3. Restart Cortex container: + ```bash + docker-compose restart cortex + ``` + +### Debugging Tips +- Enable verbose logging: `VERBOSE_DEBUG=true` in `.env` +- Check Cortex logs: `docker logs cortex -f` +- Check Relay logs: `docker logs relay -f` +- Inspect SESSIONS: `curl http://localhost:7081/debug/sessions` +- Test summarization: `curl "http://localhost:7081/debug/summary?session_id=test"` +- List sessions: `curl http://localhost:7078/sessions` +- Test Standard Mode: `curl -X POST http://localhost:7078/v1/chat/completions -H "Content-Type: application/json" -d '{"mode":"standard","backend":"SECONDARY","messages":[{"role":"user","content":"test"}],"sessionId":"test"}'` +- Monitor Docker network: `docker network inspect lyra_net` +- Check session files: `ls -la core/relay/sessions/` diff --git a/THINKING_STREAM.md b/THINKING_STREAM.md new file mode 100644 index 0000000..69bfdba --- /dev/null +++ b/THINKING_STREAM.md @@ -0,0 +1,163 @@ +# "Show Your Work" - Thinking Stream Feature + +Real-time Server-Sent Events (SSE) stream that broadcasts the internal thinking process during tool calling operations. + +## What It Does + +When Lyra uses tools to answer a question, you can now watch her "think" in real-time through a parallel stream: + +- πŸ€” **Thinking** - When she's planning what to do +- πŸ”§ **Tool Calls** - When she decides to use a tool +- πŸ“Š **Tool Results** - The results from tool execution +- βœ… **Done** - When she has the final answer +- ❌ **Errors** - If something goes wrong + +## How To Use + +### 1. Open the SSE Stream + +Connect to the thinking stream for a session: + +```bash +curl -N http://localhost:7081/stream/thinking/{session_id} +``` + +The stream will send Server-Sent Events in this format: + +``` +data: {"type": "thinking", "data": {"message": "πŸ€” Thinking... (iteration 1/5)"}} + +data: {"type": "tool_call", "data": {"tool": "execute_code", "args": {...}, "message": "πŸ”§ Using tool: execute_code"}} + +data: {"type": "tool_result", "data": {"tool": "execute_code", "result": {...}, "message": "πŸ“Š Result: ..."}} + +data: {"type": "done", "data": {"message": "βœ… Complete!", "final_answer": "The result is..."}} +``` + +### 2. Send a Request + +In parallel, send a request to `/simple` with the same `session_id`: + +```bash +curl -X POST http://localhost:7081/simple \ + -H "Content-Type: application/json" \ + -d '{ + "session_id": "your-session-id", + "user_prompt": "Calculate 50/2 using Python", + "backend": "SECONDARY" + }' +``` + +### 3. Watch the Stream + +As the request processes, you'll see real-time events showing: +- Each thinking iteration +- Every tool call being made +- The results from each tool +- The final answer + +## Event Types + +| Event Type | Description | Data Fields | +|-----------|-------------|-------------| +| `connected` | Initial connection | `session_id` | +| `thinking` | LLM is processing | `message` | +| `tool_call` | Tool is being invoked | `tool`, `args`, `message` | +| `tool_result` | Tool execution completed | `tool`, `result`, `message` | +| `done` | Process complete | `message`, `final_answer` | +| `error` | Something went wrong | `message` | + +## Demo Page + +A demo HTML page is included at [test_thinking_stream.html](../test_thinking_stream.html): + +```bash +# Serve the demo page +python3 -m http.server 8000 +``` + +Then open http://localhost:8000/test_thinking_stream.html in your browser. + +The demo shows: +- **Left panel**: Chat interface +- **Right panel**: Real-time thinking stream +- **Mobile**: Swipe between panels + +## Architecture + +### Components + +1. **ToolStreamManager** (`autonomy/tools/stream_events.py`) + - Manages SSE subscriptions per session + - Broadcasts events to all connected clients + - Handles automatic cleanup + +2. **FunctionCaller** (`autonomy/tools/function_caller.py`) + - Enhanced with event emission at each step + - Checks for active subscribers before emitting + - Passes `session_id` through the call chain + +3. **SSE Endpoint** (`/stream/thinking/{session_id}`) + - FastAPI streaming response + - 30-second keepalive for connection maintenance + - Automatic reconnection on client side + +### Event Flow + +``` +Client SSE Endpoint FunctionCaller Tools + | | | | + |--- Connect SSE -------->| | | + |<-- connected ----------| | | + | | | | + |--- POST /simple --------| | | + | | | | + | |<-- emit("thinking") ---| | + |<-- thinking ------------| | | + | | | | + | |<-- emit("tool_call") ---| | + |<-- tool_call -----------| | | + | | |-- execute ------>| + | | |<-- result -------| + | |<-- emit("tool_result")--| | + |<-- tool_result ---------| | | + | | | | + | |<-- emit("done") --------| | + |<-- done ---------------| | | + | | | | +``` + +## Configuration + +No additional configuration needed! The feature works automatically when: +1. `STANDARD_MODE_ENABLE_TOOLS=true` (already set) +2. A client connects to the SSE stream BEFORE sending the request + +## Example Output + +``` +🟒 Connected to thinking stream +βœ“ Connected (Session: thinking-demo-1735177234567) +πŸ€” Thinking... (iteration 1/5) +πŸ”§ Using tool: execute_code +πŸ“Š Result: {'stdout': '12.0\n', 'stderr': '', 'exit_code': 0, 'execution_time': 0.04} +πŸ€” Thinking... (iteration 2/5) +βœ… Complete! +``` + +## Use Cases + +- **Debugging**: See exactly what tools are being called and why +- **Transparency**: Show users what the AI is doing behind the scenes +- **Education**: Learn how the system breaks down complex tasks +- **UI Enhancement**: Create engaging "thinking" animations +- **Mobile App**: Separate tab for "Show Your Work" view + +## Future Enhancements + +Potential additions: +- Token usage per iteration +- Estimated time remaining +- Tool execution duration +- Intermediate reasoning steps +- Visual progress indicators diff --git a/TRILIUM_SETUP.md b/TRILIUM_SETUP.md new file mode 100644 index 0000000..077dab2 --- /dev/null +++ b/TRILIUM_SETUP.md @@ -0,0 +1,159 @@ +# Trilium ETAPI Integration Setup + +This guide will help you enable Lyra's integration with your Trilium notes using the ETAPI (External API). + +## What You Can Do with Trilium Integration + +Once enabled, Lyra can help you: +- πŸ” Search through your notes +- πŸ“ Create new notes from conversations +- πŸ”„ Find duplicate or similar notes +- 🏷️ Suggest better organization and tags +- πŸ“Š Summarize and update existing notes + +## Prerequisites + +- Trilium Notes installed and running +- Access to Trilium's web interface +- Lyra running on the same network as Trilium + +## Step 1: Generate ETAPI Token in Trilium + +1. **Open Trilium** in your web browser (e.g., `http://10.0.0.2:4292`) + +2. **Navigate to Options**: + - Click the menu icon (≑) in the top-left corner + - Select **"Options"** from the menu + +3. **Go to ETAPI Section**: + - In the Options sidebar, find and click **"ETAPI"** + - This section manages external API access + +4. **Generate a New Token**: + - Look for the **"Create New Token"** or **"Generate Token"** button + - Click it to create a new ETAPI token + - You may be asked to provide a name/description for the token (e.g., "Lyra Integration") + +5. **Copy the Token**: + - Once generated, you'll see a long string of characters (this is your token) + - **IMPORTANT**: Copy this token immediately - Trilium stores it hashed and you won't see it again! + - The token message will say: "ETAPI token created, copy the created token into the clipboard" + - Example format: `3ZOIydvNps3R_fZEE+kOFXiJlJ7vaeXHMEW6QuRYQm3+6qpjVxFwp9LE=` + +6. **Save the Token Securely**: + - Store it temporarily in a secure place (password manager or secure note) + - You'll need to paste it into Lyra's configuration in the next step + +## Step 2: Configure Lyra + +1. **Edit the Environment File**: + ```bash + nano /home/serversdown/project-lyra/.env + ``` + +2. **Add/Update Trilium Configuration**: + Find or add these lines: + ```env + # Trilium ETAPI Integration + ENABLE_TRILIUM=true + TRILIUM_URL=http://10.0.0.2:4292 + TRILIUM_ETAPI_TOKEN=your_token_here + + # Enable tools in standard mode (if not already set) + STANDARD_MODE_ENABLE_TOOLS=true + ``` + +3. **Replace `your_token_here`** with the actual token you copied from Trilium + +4. **Save and exit** (Ctrl+O, Enter, Ctrl+X in nano) + +## Step 3: Restart Cortex Service + +For the changes to take effect, restart the Cortex service: + +```bash +cd /home/serversdown/project-lyra +docker-compose restart cortex +``` + +Or if running with Docker directly: +```bash +docker restart cortex +``` + +## Step 4: Test the Integration + +Once restarted, try these example queries in Lyra (using Cortex mode): + +1. **Test Search**: + - "Search my Trilium notes for topics about AI" + - "Find notes containing 'project planning'" + +2. **Test Create Note**: + - "Create a note in Trilium titled 'Meeting Notes' with a summary of our conversation" + - "Save this to my Trilium as a new note" + +3. **Watch the Thinking Stream**: + - Open the thinking stream panel (🧠 Show Work) + - You should see tool calls to `search_notes` and `create_note` + +## Troubleshooting + +### "Connection refused" or "Cannot reach Trilium" +- Verify Trilium is running: `curl http://10.0.0.2:4292` +- Check that Cortex can access Trilium's network +- Ensure the URL in `.env` is correct + +### "Authentication failed" or "Invalid token" +- Double-check the token was copied correctly (no extra spaces) +- Generate a new token in Trilium if needed +- Verify `TRILIUM_ETAPI_TOKEN` in `.env` is set correctly + +### "No results found" when searching +- Verify you have notes in Trilium +- Try a broader search query +- Check Trilium's search functionality works directly + +### Tools not appearing in Cortex mode +- Verify `ENABLE_TRILIUM=true` is set +- Restart Cortex after changing `.env` +- Check Cortex logs: `docker logs cortex` + +## Security Notes + +⚠️ **Important Security Considerations**: + +- The ETAPI token provides **full access** to your Trilium notes +- Keep the token secure - do not share or commit to git +- The `.env` file should be in `.gitignore` (already configured) +- Consider using a dedicated token for Lyra (you can create multiple tokens) +- Revoke tokens you no longer use from Trilium's ETAPI settings + +## Available Functions + +Currently enabled functions: + +### `search_notes(query, limit)` +Search through your Trilium notes by keyword or phrase. + +**Example**: "Search my notes for 'machine learning' and show the top 5 results" + +### `create_note(title, content, parent_note_id)` +Create a new note in Trilium with specified title and content. + +**Example**: "Create a note called 'Ideas from Today' with this summary: [content]" + +**Optional**: Specify a parent note ID to nest the new note under an existing note. + +## Future Enhancements + +Potential additions to the integration: +- Update existing notes +- Retrieve full note content by ID +- Manage tags and attributes +- Clone/duplicate notes +- Export notes in various formats + +--- + +**Need Help?** Check the Cortex logs or open an issue on the project repository. diff --git a/UI_THINKING_STREAM.md b/UI_THINKING_STREAM.md new file mode 100644 index 0000000..f1975a0 --- /dev/null +++ b/UI_THINKING_STREAM.md @@ -0,0 +1,109 @@ +# Thinking Stream UI Integration + +## What Was Added + +Added a "🧠 Show Work" button to the main chat interface that opens a dedicated thinking stream window. + +## Changes Made + +### 1. Main Chat Interface ([core/ui/index.html](core/ui/index.html)) + +Added button to session selector: +```html + +``` + +Added event listener to open stream window: +```javascript +document.getElementById("thinkingStreamBtn").addEventListener("click", () => { + const streamUrl = `/thinking-stream.html?session=${currentSession}`; + const windowFeatures = "width=600,height=800,menubar=no,toolbar=no,location=no,status=no"; + window.open(streamUrl, `thinking_${currentSession}`, windowFeatures); +}); +``` + +### 2. Thinking Stream Window ([core/ui/thinking-stream.html](core/ui/thinking-stream.html)) + +New dedicated page for the thinking stream: +- **Header**: Shows connection status with live indicator +- **Events Area**: Scrollable list of thinking events +- **Footer**: Clear button and session info + +Features: +- Auto-reconnecting SSE connection +- Color-coded event types +- Slide-in animations for new events +- Automatic scrolling to latest event +- Session ID from URL parameter + +### 3. Styling ([core/ui/style.css](core/ui/style.css)) + +Added purple/violet theme for the thinking button: +```css +#thinkingStreamBtn { + background: rgba(138, 43, 226, 0.2); + border-color: #8a2be2; +} +``` + +## How To Use + +1. **Open Chat Interface** + - Navigate to http://localhost:7078 (relay) + - Select or create a session + +2. **Open Thinking Stream** + - Click the "🧠 Show Work" button + - A new window opens showing the thinking stream + +3. **Send a Message** + - Type a message that requires tools (e.g., "Calculate 50/2 in Python") + - Watch the thinking stream window for real-time updates + +4. **Observe Events** + - πŸ€” Thinking iterations + - πŸ”§ Tool calls + - πŸ“Š Tool results + - βœ… Completion + +## Event Types & Colors + +| Event | Icon | Color | Description | +|-------|------|-------|-------------| +| Connected | βœ“ | Green | Stream established | +| Thinking | πŸ€” | Light Green | LLM processing | +| Tool Call | πŸ”§ | Orange | Tool invocation | +| Tool Result | πŸ“Š | Blue | Tool output | +| Done | βœ… | Purple | Task complete | +| Error | ❌ | Red | Something failed | + +## Architecture + +``` +User clicks "Show Work" + ↓ +Opens thinking-stream.html?session=xxx + ↓ +Connects to SSE: /stream/thinking/{session} + ↓ +User sends message in main chat + ↓ +FunctionCaller emits events + ↓ +Events appear in thinking stream window +``` + +## Mobile Support + +The thinking stream window is responsive: +- Desktop: Side-by-side windows +- Mobile: Use browser's tab switcher to swap between chat and thinking stream + +## Future Enhancements + +Potential improvements: +- **Embedded panel**: Option to show thinking stream in a split panel within main UI +- **Event filtering**: Toggle event types on/off +- **Export**: Download thinking trace as JSON +- **Replay**: Replay past thinking sessions +- **Statistics**: Show timing, token usage per step diff --git a/core/PROJECT_SUMMARY.md b/core/PROJECT_SUMMARY.md deleted file mode 100644 index e4c7ddd..0000000 --- a/core/PROJECT_SUMMARY.md +++ /dev/null @@ -1,71 +0,0 @@ -# Lyra Core β€” Project Summary - -## v0.4 (2025-10-03) - -### 🧠 High-Level Architecture -- **Lyra Core (v0.3.1)** β€” Orchestration layer. - - Accepts chat requests (`/v1/chat/completions`). - - Routes through Cortex for subconscious annotation. - - Stores everything in Mem0 (no discard). - - Fetches persona + relevant memories. - - Injects context back into LLM. - -- **Cortex (v0.3.0)** β€” Subconscious annotator. - - Runs locally via `llama.cpp` (Phi-3.5-mini Q4_K_M). - - Strict JSON schema: - ```json - { - "sentiment": "positive" | "neutral" | "negative", - "novelty": 0.0–1.0, - "tags": ["keyword", "keyword"], - "notes": "short string" - } - ``` - - Normalizes keys (lowercase). - - Strips Markdown fences before parsing. - - Configurable via `.env` (`CORTEX_ENABLED=true|false`). - - Currently generates annotations, but not yet persisted into Mem0 payloads (stored as empty `{cortex:{}}`). - -- **Mem0 (v0.4.0)** β€” Persistent memory layer. - - Handles embeddings, graph storage, and retrieval. - - Dual embedder support: - - **OpenAI Cloud** (`text-embedding-3-small`, 1536-dim). - - **HuggingFace TEI** (gte-Qwen2-1.5B-instruct, 1536-dim, hosted on 3090). - - Environment toggle for provider (`.env.openai` vs `.env.3090`). - - Memory persistence in Postgres (`payload` JSON). - - CSV export pipeline confirmed (id, user_id, data, created_at). - -- **Persona Sidecar** - - Provides personality, style, and protocol instructions. - - Injected at runtime into Core prompt building. - ---- - -### πŸš€ Recent Changes -- **Mem0** - - Added HuggingFace TEI integration (local 3090 embedder). - - Enabled dual-mode environment switch (OpenAI cloud ↔ local TEI). - - Fixed `.env` line ending mismatch (CRLF vs LF). - - Added memory dump/export commands for Postgres. - -- **Core/Relay** - - No major changes since v0.3.1 (still routing input β†’ Cortex β†’ Mem0). - -- **Cortex** - - Still outputs annotations, but not yet persisted into Mem0 payloads. - ---- - -### πŸ“ˆ Versioning -- **Lyra Core** β†’ v0.3.1 -- **Cortex** β†’ v0.3.0 -- **Mem0** β†’ v0.4.0 - ---- - -### πŸ“‹ Next Steps -- [ ] Wire Cortex annotations into Mem0 payloads (`cortex` object). -- [ ] Add β€œexport all memories” script to standard workflow. -- [ ] Consider async embedding for faster `mem.add`. -- [ ] Build visual diagram of data flow (Core ↔ Cortex ↔ Mem0 ↔ Persona). -- [ ] Explore larger LLMs for Cortex (Qwen2-7B, etc.) for richer subconscious annotation. diff --git a/core/backups/mem0_20250927_221040.sql b/core/backups/mem0_20250927_221040.sql deleted file mode 100644 index 11300d4..0000000 --- a/core/backups/mem0_20250927_221040.sql +++ /dev/null @@ -1,133 +0,0 @@ --- --- PostgreSQL database dump --- - --- Dumped from database version 15.4 (Debian 15.4-2.pgdg120+1) --- Dumped by pg_dump version 15.4 (Debian 15.4-2.pgdg120+1) - -SET statement_timeout = 0; -SET lock_timeout = 0; -SET idle_in_transaction_session_timeout = 0; -SET client_encoding = 'UTF8'; -SET standard_conforming_strings = on; -SELECT pg_catalog.set_config('search_path', '', false); -SET check_function_bodies = false; -SET xmloption = content; -SET client_min_messages = warning; -SET row_security = off; - --- --- Name: vector; Type: EXTENSION; Schema: -; Owner: - --- - -CREATE EXTENSION IF NOT EXISTS vector WITH SCHEMA public; - - --- --- Name: EXTENSION vector; Type: COMMENT; Schema: -; Owner: --- - -COMMENT ON EXTENSION vector IS 'vector data type and ivfflat and hnsw access methods'; - - -SET default_tablespace = ''; - -SET default_table_access_method = heap; - --- --- Name: mem0migrations; Type: TABLE; Schema: public; Owner: mem0 --- - -CREATE TABLE public.mem0migrations ( - id uuid NOT NULL, - vector public.vector(1536), - payload jsonb -); - - -ALTER TABLE public.mem0migrations OWNER TO mem0; - --- --- Name: memories; Type: TABLE; Schema: public; Owner: mem0 --- - -CREATE TABLE public.memories ( - id uuid NOT NULL, - vector public.vector(1536), - payload jsonb -); - - -ALTER TABLE public.memories OWNER TO mem0; - --- --- Data for Name: mem0migrations; Type: TABLE DATA; Schema: public; Owner: mem0 --- - -COPY public.mem0migrations (id, vector, payload) FROM stdin; -9169baba-44dd-4ca8-b8a3-f5da66f57711 [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1] {"type": "user_identity", "user_id": "9169baba-44dd-4ca8-b8a3-f5da66f57711"} -47563c3c-dbcb-4bcc-86b3-b424ad4b3a8e [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1] {"type": "user_identity", "user_id": "47563c3c-dbcb-4bcc-86b3-b424ad4b3a8e"} -319b8d13-e309-4209-ab7e-f88b78e42b92 [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1] {"type": "user_identity", "user_id": "319b8d13-e309-4209-ab7e-f88b78e42b92"} -62b93698-4b11-412a-9afc-6f6878ac542a [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1] {"type": "user_identity", "user_id": "62b93698-4b11-412a-9afc-6f6878ac542a"} -8d64df79-fda4-498f-839f-777882e09a93 [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1] {"type": "user_identity", "user_id": "8d64df79-fda4-498f-839f-777882e09a93"} -329f3ebf-0462-4c7a-b124-05bbb607b5bd [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1] {"type": "user_identity", "user_id": "329f3ebf-0462-4c7a-b124-05bbb607b5bd"} -60410a79-05de-41e1-9699-6d0f15ac78ca [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1] {"type": "user_identity", "user_id": "60410a79-05de-41e1-9699-6d0f15ac78ca"} -508736a5-2f98-4aeb-a0dc-76f6f5615c56 [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1] {"type": "user_identity", "user_id": "508736a5-2f98-4aeb-a0dc-76f6f5615c56"} -689233da-edae-411e-a5f7-9c5a25e7b7e0 [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1] {"type": "user_identity", "user_id": "689233da-edae-411e-a5f7-9c5a25e7b7e0"} -e7dd9004-58b8-4347-acc5-548d7cc873d2 [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1] {"type": "user_identity", "user_id": "e7dd9004-58b8-4347-acc5-548d7cc873d2"} -b5d5272f-8010-489d-9b9e-f444545b60b7 [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1] {"type": "user_identity", "user_id": "b5d5272f-8010-489d-9b9e-f444545b60b7"} -1c00dc8b-9216-4ee4-903e-07263e1341b9 [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1] {"type": "user_identity", "user_id": "1c00dc8b-9216-4ee4-903e-07263e1341b9"} -f141aa2c-027d-4d22-b23e-ed20c149d96c [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1] {"type": "user_identity", "user_id": "f141aa2c-027d-4d22-b23e-ed20c149d96c"} -1357021d-b707-418f-89b3-1e4763dd07ff [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1] {"type": "user_identity", "user_id": "1357021d-b707-418f-89b3-1e4763dd07ff"} -7914c3af-0c9c-40d1-8041-1e94ad355e78 [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1] {"type": "user_identity", "user_id": "7914c3af-0c9c-40d1-8041-1e94ad355e78"} -4f3b49f3-f4c9-4add-991e-caa43adaa09f [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1] {"type": "user_identity", "user_id": "4f3b49f3-f4c9-4add-991e-caa43adaa09f"} -b2c48f12-76b2-465b-aee3-5a3caec10d2c [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1] {"type": "user_identity", "user_id": "b2c48f12-76b2-465b-aee3-5a3caec10d2c"} -fbad421b-4500-4903-b85d-46f5f36236e0 [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1] {"type": "user_identity", "user_id": "fbad421b-4500-4903-b85d-46f5f36236e0"} -3b3170e6-da35-4998-88b7-83d71a588cbc [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1] {"type": "user_identity", "user_id": "3b3170e6-da35-4998-88b7-83d71a588cbc"} -34dff05f-09a3-4806-a981-f8d3b10df618 [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1] {"type": "user_identity", "user_id": "34dff05f-09a3-4806-a981-f8d3b10df618"} -55604df0-0242-4e9a-8aed-cfd14e5a3cb9 [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1] {"type": "user_identity", "user_id": "55604df0-0242-4e9a-8aed-cfd14e5a3cb9"} -\. - - --- --- Data for Name: memories; Type: TABLE DATA; Schema: public; Owner: mem0 --- - -COPY public.memories (id, vector, payload) FROM stdin; -\. - - --- --- Name: mem0migrations mem0migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: mem0 --- - -ALTER TABLE ONLY public.mem0migrations - ADD CONSTRAINT mem0migrations_pkey PRIMARY KEY (id); - - --- --- Name: memories memories_pkey; Type: CONSTRAINT; Schema: public; Owner: mem0 --- - -ALTER TABLE ONLY public.memories - ADD CONSTRAINT memories_pkey PRIMARY KEY (id); - - --- --- Name: mem0migrations_hnsw_idx; Type: INDEX; Schema: public; Owner: mem0 --- - -CREATE INDEX mem0migrations_hnsw_idx ON public.mem0migrations USING hnsw (vector public.vector_cosine_ops); - - --- --- Name: memories_hnsw_idx; Type: INDEX; Schema: public; Owner: mem0 --- - -CREATE INDEX memories_hnsw_idx ON public.memories USING hnsw (vector public.vector_cosine_ops); - - --- --- PostgreSQL database dump complete --- - diff --git a/core/backups/mem0_history_20250927_220925.tgz b/core/backups/mem0_history_20250927_220925.tgz deleted file mode 100644 index bfb78b1..0000000 Binary files a/core/backups/mem0_history_20250927_220925.tgz and /dev/null differ diff --git a/core/docker-compose.yml b/core/docker-compose.yml deleted file mode 100644 index 67321b7..0000000 --- a/core/docker-compose.yml +++ /dev/null @@ -1,43 +0,0 @@ -services: - relay: - build: - context: ./relay - container_name: relay - restart: always - ports: - - "7078:7078" - env_file: - - .env - volumes: - - ./relay/sessions:/app/sessions - networks: - - lyra-net - - # persona-sidecar: - # build: - # context: ./persona-sidecar - # container_name: persona-sidecar - # env_file: - # - .env - # ports: - # - "7080:7080" - # volumes: - # - ./persona-sidecar/personas.json:/app/personas.json:rw - # restart: unless-stopped - # networks: - # - lyra-net - - lyra-ui: - image: nginx:alpine - container_name: lyra-ui - restart: unless-stopped - ports: - - "8081:80" - volumes: - - ./ui:/usr/share/nginx/html:ro - networks: - - lyra-net - -networks: - lyra-net: - external: true diff --git a/core/relay/lib/llm.js b/core/relay/lib/llm.js index 6e49927..01a382a 100644 --- a/core/relay/lib/llm.js +++ b/core/relay/lib/llm.js @@ -38,6 +38,8 @@ async function tryBackend(backend, messages) { // 🧩 Normalize replies let reply = ""; + let parsedData = null; + try { if (isOllama) { // Ollama sometimes returns NDJSON lines; merge them @@ -49,21 +51,75 @@ async function tryBackend(backend, messages) { .join(""); reply = merged.trim(); } else { - const data = JSON.parse(raw); - console.log("πŸ” RAW LLM RESPONSE:", JSON.stringify(data, null, 2)); + parsedData = JSON.parse(raw); reply = - data?.choices?.[0]?.text?.trim() || - data?.choices?.[0]?.message?.content?.trim() || - data?.message?.content?.trim() || + parsedData?.choices?.[0]?.text?.trim() || + parsedData?.choices?.[0]?.message?.content?.trim() || + parsedData?.message?.content?.trim() || ""; - - } } catch (err) { reply = `[parse error: ${err.message}]`; } - return { reply, raw, backend: backend.key }; + return { reply, raw, parsedData, backend: backend.key }; +} + +// ------------------------------------ +// Structured logging helper +// ------------------------------------ +const LOG_DETAIL = process.env.LOG_DETAIL_LEVEL || "summary"; // minimal | summary | detailed | verbose + +function logLLMCall(backend, messages, result, error = null) { + const timestamp = new Date().toISOString().split('T')[1].slice(0, -1); + + if (error) { + // Always log errors + console.warn(`⚠️ [LLM] ${backend.key.toUpperCase()} failed | ${timestamp} | ${error.message}`); + return; + } + + // Success - log based on detail level + if (LOG_DETAIL === "minimal") { + return; // Don't log successful calls in minimal mode + } + + if (LOG_DETAIL === "summary") { + console.log(`βœ… [LLM] ${backend.key.toUpperCase()} | ${timestamp} | Reply: ${result.reply.substring(0, 80)}...`); + return; + } + + // Detailed or verbose + console.log(`\n${'─'.repeat(100)}`); + console.log(`🧠 LLM CALL | Backend: ${backend.key.toUpperCase()} | ${timestamp}`); + console.log(`${'─'.repeat(100)}`); + + // Show prompt preview + const lastMsg = messages[messages.length - 1]; + const promptPreview = (lastMsg?.content || '').substring(0, 150); + console.log(`πŸ“ Prompt: ${promptPreview}...`); + + // Show parsed reply + console.log(`πŸ’¬ Reply: ${result.reply.substring(0, 200)}...`); + + // Show raw response only in verbose mode + if (LOG_DETAIL === "verbose" && result.parsedData) { + console.log(`\n╭─ RAW RESPONSE ────────────────────────────────────────────────────────────────────────────`); + const jsonStr = JSON.stringify(result.parsedData, null, 2); + const lines = jsonStr.split('\n'); + const maxLines = 50; + + lines.slice(0, maxLines).forEach(line => { + console.log(`β”‚ ${line}`); + }); + + if (lines.length > maxLines) { + console.log(`β”‚ ... (${lines.length - maxLines} more lines - check raw field for full response)`); + } + console.log(`β•°${'─'.repeat(95)}`); + } + + console.log(`${'─'.repeat(100)}\n`); } // ------------------------------------ @@ -77,17 +133,29 @@ export async function callSpeechLLM(messages) { { key: "fallback", type: "llamacpp", url: process.env.LLM_FALLBACK_URL, model: process.env.LLM_FALLBACK_MODEL }, ]; + const failedBackends = []; + for (const b of backends) { if (!b.url || !b.model) continue; + try { - console.log(`🧠 Trying backend: ${b.key.toUpperCase()} (${b.url})`); const out = await tryBackend(b, messages); - console.log(`βœ… Success via ${b.key.toUpperCase()}`); + logLLMCall(b, messages, out); return out; } catch (err) { - console.warn(`⚠️ ${b.key.toUpperCase()} failed: ${err.message}`); + logLLMCall(b, messages, null, err); + failedBackends.push({ backend: b.key, error: err.message }); } } + // All backends failed - log summary + console.error(`\n${'='.repeat(100)}`); + console.error(`πŸ”΄ ALL LLM BACKENDS FAILED`); + console.error(`${'='.repeat(100)}`); + failedBackends.forEach(({ backend, error }) => { + console.error(` ${backend.toUpperCase()}: ${error}`); + }); + console.error(`${'='.repeat(100)}\n`); + throw new Error("all_backends_failed"); } diff --git a/core/relay/package-lock.json b/core/relay/package-lock.json index fa0ee01..6f869dd 100644 --- a/core/relay/package-lock.json +++ b/core/relay/package-lock.json @@ -1,7 +1,7 @@ { "name": "lyra-relay", "version": "0.1.0", - "lockfileVersion": 2, + "lockfileVersion": 3, "requires": true, "packages": { "": { @@ -10,7 +10,7 @@ "dependencies": { "cors": "^2.8.5", "dotenv": "^16.6.1", - "express": "^4.18.2", + "express": "^4.21.2", "mem0ai": "^2.1.38", "node-fetch": "^3.3.2" } @@ -19,6 +19,7 @@ "version": "0.40.1", "resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.40.1.tgz", "integrity": "sha512-DJMWm8lTEM9Lk/MSFL+V+ugF7jKOn0M2Ujvb5fN8r2nY14aHbGPZ1k6sgjL+tpJ3VuOGJNG+4R83jEpOuYPv8w==", + "license": "MIT", "peer": true, "dependencies": { "@types/node": "^18.11.18", @@ -34,6 +35,7 @@ "version": "2.7.0", "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "license": "MIT", "peer": true, "dependencies": { "whatwg-url": "^5.0.0" @@ -54,6 +56,7 @@ "version": "7.27.1", "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", + "license": "MIT", "peer": true, "dependencies": { "@babel/helper-validator-identifier": "^7.27.1", @@ -68,6 +71,7 @@ "version": "7.28.5", "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "license": "MIT", "peer": true, "engines": { "node": ">=6.9.0" @@ -77,18 +81,21 @@ "version": "4.1.1", "resolved": "https://registry.npmjs.org/@cfworker/json-schema/-/json-schema-4.1.1.tgz", "integrity": "sha512-gAmrUZSGtKc3AiBL71iNWxDsyUC5uMaKKGdvzYsBoTW/xi42JQHl7eKV2OYzCUqvc+D2RCcf7EXY2iCyFIk6og==", + "license": "MIT", "peer": true }, "node_modules/@cloudflare/workers-types": { - "version": "4.20251014.0", - "resolved": "https://registry.npmjs.org/@cloudflare/workers-types/-/workers-types-4.20251014.0.tgz", - "integrity": "sha512-tEW98J/kOa0TdylIUOrLKRdwkUw0rvvYVlo+Ce0mqRH3c8kSoxLzUH9gfCvwLe0M89z1RkzFovSKAW2Nwtyn3w==", + "version": "4.20251128.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workers-types/-/workers-types-4.20251128.0.tgz", + "integrity": "sha512-gQxQvxLRsFb+mDlaBKGoJwEHWt+ox9telZZEuRMbNUAD6v78XYqZepTI4yPDdKhkRTlqYcDqDhIdAI3HrsGk7w==", + "license": "MIT OR Apache-2.0", "peer": true }, "node_modules/@fastify/busboy": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/@fastify/busboy/-/busboy-2.1.1.tgz", "integrity": "sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA==", + "license": "MIT", "peer": true, "engines": { "node": ">=14" @@ -98,13 +105,15 @@ "version": "1.1.3", "resolved": "https://registry.npmjs.org/@gar/promisify/-/promisify-1.1.3.tgz", "integrity": "sha512-k2Ty1JcVojjJFwrg/ThKi2ujJ7XNLYaFGNB/bWT9wGR+oSMJHMa5w+CUq6p/pVrKeNNgA7pCqEcjSnHVoqJQFw==", + "license": "MIT", "optional": true, "peer": true }, "node_modules/@google/genai": { - "version": "1.28.0", - "resolved": "https://registry.npmjs.org/@google/genai/-/genai-1.28.0.tgz", - "integrity": "sha512-0pfZ1EWQsM9kINsL+mFKJvpzM6NRHS9t360S1MzKq4JtIwTj/RbsPpC/K5wpKiPy9PC+J+bsz/9gvaL51++KrA==", + "version": "1.30.0", + "resolved": "https://registry.npmjs.org/@google/genai/-/genai-1.30.0.tgz", + "integrity": "sha512-3MRcgczBFbUat1wIlZoLJ0vCCfXgm7Qxjh59cZi2X08RgWLtm9hKOspzp7TOg1TV2e26/MLxR2GR5yD5GmBV2w==", + "license": "Apache-2.0", "peer": true, "dependencies": { "google-auth-library": "^10.3.0", @@ -126,6 +135,7 @@ "version": "8.0.2", "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "license": "ISC", "peer": true, "dependencies": { "string-width": "^5.1.2", @@ -143,6 +153,7 @@ "version": "29.7.0", "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.7.0.tgz", "integrity": "sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==", + "license": "MIT", "peer": true, "dependencies": { "jest-get-type": "^29.6.3" @@ -155,6 +166,7 @@ "version": "29.6.3", "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "license": "MIT", "peer": true, "dependencies": { "@sinclair/typebox": "^0.27.8" @@ -167,6 +179,7 @@ "version": "29.6.3", "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "license": "MIT", "peer": true, "dependencies": { "@jest/schemas": "^29.6.3", @@ -184,6 +197,7 @@ "version": "0.3.79", "resolved": "https://registry.npmjs.org/@langchain/core/-/core-0.3.79.tgz", "integrity": "sha512-ZLAs5YMM5N2UXN3kExMglltJrKKoW7hs3KMZFlXUnD7a5DFKBYxPFMeXA4rT+uvTxuJRZPCYX0JKI5BhyAWx4A==", + "license": "MIT", "peer": true, "dependencies": { "@cfworker/json-schema": "^4.0.2", @@ -217,6 +231,7 @@ "version": "1.1.1", "resolved": "https://registry.npmjs.org/@npmcli/fs/-/fs-1.1.1.tgz", "integrity": "sha512-8KG5RD0GVP4ydEzRn/I4BNDuxDtqVbOdm8675T49OIG/NGhaK0pjPX7ZcDlvKYbA+ulvVK3ztfcF4uBdOxuJbQ==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -229,6 +244,7 @@ "resolved": "https://registry.npmjs.org/@npmcli/move-file/-/move-file-1.1.2.tgz", "integrity": "sha512-1SUf/Cg2GzGDyaf15aR9St9TWlb+XvbZXWpDx8YKs7MLzMH/BCeopv+y9vzrzgkfykCGuWOlSu3mZhj2+FQcrg==", "deprecated": "This functionality has been moved to @npmcli/fs", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -243,6 +259,7 @@ "version": "1.1.12", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -255,6 +272,7 @@ "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", "deprecated": "Glob versions prior to v9 are no longer supported", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -276,6 +294,7 @@ "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -290,6 +309,7 @@ "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", "deprecated": "Rimraf versions prior to v4 are no longer supported", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -306,6 +326,7 @@ "version": "0.11.0", "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "license": "MIT", "optional": true, "peer": true, "engines": { @@ -316,6 +337,7 @@ "version": "1.13.0", "resolved": "https://registry.npmjs.org/@qdrant/js-client-rest/-/js-client-rest-1.13.0.tgz", "integrity": "sha512-bewMtnXlGvhhnfXsp0sLoLXOGvnrCM15z9lNlG0Snp021OedNAnRtKkerjk5vkOcbQWUmJHXYCuxDfcT93aSkA==", + "license": "Apache-2.0", "peer": true, "dependencies": { "@qdrant/openapi-typescript-fetch": "1.2.6", @@ -334,6 +356,7 @@ "version": "1.2.6", "resolved": "https://registry.npmjs.org/@qdrant/openapi-typescript-fetch/-/openapi-typescript-fetch-1.2.6.tgz", "integrity": "sha512-oQG/FejNpItrxRHoyctYvT3rwGZOnK4jr3JdppO/c78ktDvkWiPXPHNsrDf33K9sZdRb6PR7gi4noIapu5q4HA==", + "license": "MIT", "peer": true, "engines": { "node": ">=18.0.0", @@ -344,6 +367,7 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/@redis/bloom/-/bloom-1.2.0.tgz", "integrity": "sha512-HG2DFjYKbpNmVXsa0keLHp/3leGJz1mjh09f2RLGGLQZzSHpkmZWuwJbAvo3QcRY8p80m5+ZdXZdYOSBLlp7Cg==", + "license": "MIT", "peer": true, "peerDependencies": { "@redis/client": "^1.0.0" @@ -353,6 +377,7 @@ "version": "1.6.1", "resolved": "https://registry.npmjs.org/@redis/client/-/client-1.6.1.tgz", "integrity": "sha512-/KCsg3xSlR+nCK8/8ZYSknYxvXHwubJrU82F3Lm1Fp6789VQ0/3RJKfsmRXjqfaTA++23CvC3hqmqe/2GEt6Kw==", + "license": "MIT", "peer": true, "dependencies": { "cluster-key-slot": "1.1.2", @@ -367,6 +392,7 @@ "version": "1.1.1", "resolved": "https://registry.npmjs.org/@redis/graph/-/graph-1.1.1.tgz", "integrity": "sha512-FEMTcTHZozZciLRl6GiiIB4zGm5z5F3F6a6FZCyrfxdKOhFlGkiAqlexWMBzCi4DcRoyiOsuLfW+cjlGWyExOw==", + "license": "MIT", "peer": true, "peerDependencies": { "@redis/client": "^1.0.0" @@ -376,6 +402,7 @@ "version": "1.0.7", "resolved": "https://registry.npmjs.org/@redis/json/-/json-1.0.7.tgz", "integrity": "sha512-6UyXfjVaTBTJtKNG4/9Z8PSpKE6XgSyEb8iwaqDcy+uKrd/DGYHTWkUdnQDyzm727V7p21WUMhsqz5oy65kPcQ==", + "license": "MIT", "peer": true, "peerDependencies": { "@redis/client": "^1.0.0" @@ -385,6 +412,7 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/@redis/search/-/search-1.2.0.tgz", "integrity": "sha512-tYoDBbtqOVigEDMAcTGsRlMycIIjwMCgD8eR2t0NANeQmgK/lvxNAvYyb6bZDD4frHRhIHkJu2TBRvB0ERkOmw==", + "license": "MIT", "peer": true, "peerDependencies": { "@redis/client": "^1.0.0" @@ -394,6 +422,7 @@ "version": "1.1.0", "resolved": "https://registry.npmjs.org/@redis/time-series/-/time-series-1.1.0.tgz", "integrity": "sha512-c1Q99M5ljsIuc4YdaCwfUEXsofakb9c8+Zse2qxTadu8TalLXuAESzLvFAvNVbkmSlvlzIQOLpBCmWI9wTOt+g==", + "license": "MIT", "peer": true, "peerDependencies": { "@redis/client": "^1.0.0" @@ -403,97 +432,107 @@ "version": "0.5.0", "resolved": "https://registry.npmjs.org/@sevinf/maybe/-/maybe-0.5.0.tgz", "integrity": "sha512-ARhyoYDnY1LES3vYI0fiG6e9esWfTNcXcO6+MPJJXcnyMV3bim4lnFt45VXouV7y82F4x3YH8nOQ6VztuvUiWg==", + "license": "MIT", "peer": true }, "node_modules/@sinclair/typebox": { "version": "0.27.8", "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "license": "MIT", "peer": true }, "node_modules/@supabase/auth-js": { - "version": "2.78.0", - "resolved": "https://registry.npmjs.org/@supabase/auth-js/-/auth-js-2.78.0.tgz", - "integrity": "sha512-cXDtu1U0LeZj/xfnFoV7yCze37TcbNo8FCxy1FpqhMbB9u9QxxDSW6pA5gm/07Ei7m260Lof4CZx67Cu6DPeig==", + "version": "2.86.0", + "resolved": "https://registry.npmjs.org/@supabase/auth-js/-/auth-js-2.86.0.tgz", + "integrity": "sha512-3xPqMvBWC6Haqpr6hEWmSUqDq+6SA1BAEdbiaHdAZM9QjZ5uiQJ+6iD9pZOzOa6MVXZh4GmwjhC9ObIG0K1NcA==", + "license": "MIT", "peer": true, "dependencies": { - "@supabase/node-fetch": "2.6.15", "tslib": "2.8.1" + }, + "engines": { + "node": ">=20.0.0" } }, "node_modules/@supabase/functions-js": { - "version": "2.78.0", - "resolved": "https://registry.npmjs.org/@supabase/functions-js/-/functions-js-2.78.0.tgz", - "integrity": "sha512-t1jOvArBsOINyqaRee1xJ3gryXLvkBzqnKfi6q3YRzzhJbGS6eXz0pXR5fqmJeB01fLC+1njpf3YhMszdPEF7g==", + "version": "2.86.0", + "resolved": "https://registry.npmjs.org/@supabase/functions-js/-/functions-js-2.86.0.tgz", + "integrity": "sha512-AlOoVfeaq9XGlBFIyXTmb+y+CZzxNO4wWbfgRM6iPpNU5WCXKawtQYSnhivi3UVxS7GA0rWovY4d6cIAxZAojA==", + "license": "MIT", "peer": true, "dependencies": { - "@supabase/node-fetch": "2.6.15", "tslib": "2.8.1" - } - }, - "node_modules/@supabase/node-fetch": { - "version": "2.6.15", - "resolved": "https://registry.npmjs.org/@supabase/node-fetch/-/node-fetch-2.6.15.tgz", - "integrity": "sha512-1ibVeYUacxWYi9i0cf5efil6adJ9WRyZBLivgjs+AUpewx1F3xPi7gLgaASI2SmIQxPoCEjAsLAzKPgMJVgOUQ==", - "peer": true, - "dependencies": { - "whatwg-url": "^5.0.0" }, "engines": { - "node": "4.x || >=6.0.0" + "node": ">=20.0.0" } }, "node_modules/@supabase/postgrest-js": { - "version": "2.78.0", - "resolved": "https://registry.npmjs.org/@supabase/postgrest-js/-/postgrest-js-2.78.0.tgz", - "integrity": "sha512-AwhpYlSvJ+PSnPmIK8sHj7NGDyDENYfQGKrMtpVIEzQA2ApUjgpUGxzXWN4Z0wEtLQsvv7g4y9HVad9Hzo1TNA==", + "version": "2.86.0", + "resolved": "https://registry.npmjs.org/@supabase/postgrest-js/-/postgrest-js-2.86.0.tgz", + "integrity": "sha512-QVf+wIXILcZJ7IhWhWn+ozdf8B+oO0Ulizh2AAPxD/6nQL+x3r9lJ47a+fpc/jvAOGXMbkeW534Kw6jz7e8iIA==", + "license": "MIT", "peer": true, "dependencies": { - "@supabase/node-fetch": "2.6.15", "tslib": "2.8.1" + }, + "engines": { + "node": ">=20.0.0" } }, "node_modules/@supabase/realtime-js": { - "version": "2.78.0", - "resolved": "https://registry.npmjs.org/@supabase/realtime-js/-/realtime-js-2.78.0.tgz", - "integrity": "sha512-rCs1zmLe7of7hj4s7G9z8rTqzWuNVtmwDr3FiCRCJFawEoa+RQO1xpZGbdeuVvVmKDyVN6b542Okci+117y/LQ==", + "version": "2.86.0", + "resolved": "https://registry.npmjs.org/@supabase/realtime-js/-/realtime-js-2.86.0.tgz", + "integrity": "sha512-dyS8bFoP29R/sj5zLi0AP3JfgG8ar1nuImcz5jxSx7UIW7fbFsXhUCVrSY2Ofo0+Ev6wiATiSdBOzBfWaiFyPA==", + "license": "MIT", "peer": true, "dependencies": { - "@supabase/node-fetch": "2.6.15", "@types/phoenix": "^1.6.6", "@types/ws": "^8.18.1", "tslib": "2.8.1", "ws": "^8.18.2" + }, + "engines": { + "node": ">=20.0.0" } }, "node_modules/@supabase/storage-js": { - "version": "2.78.0", - "resolved": "https://registry.npmjs.org/@supabase/storage-js/-/storage-js-2.78.0.tgz", - "integrity": "sha512-n17P0JbjHOlxqJpkaGFOn97i3EusEKPEbWOpuk1r4t00Wg06B8Z4GUiq0O0n1vUpjiMgJUkLIMuBVp+bEgunzQ==", + "version": "2.86.0", + "resolved": "https://registry.npmjs.org/@supabase/storage-js/-/storage-js-2.86.0.tgz", + "integrity": "sha512-PM47jX/Mfobdtx7NNpoj9EvlrkapAVTQBZgGGslEXD6NS70EcGjhgRPBItwHdxZPM5GwqQ0cGMN06uhjeY2mHQ==", + "license": "MIT", "peer": true, "dependencies": { - "@supabase/node-fetch": "2.6.15", + "iceberg-js": "^0.8.0", "tslib": "2.8.1" + }, + "engines": { + "node": ">=20.0.0" } }, "node_modules/@supabase/supabase-js": { - "version": "2.78.0", - "resolved": "https://registry.npmjs.org/@supabase/supabase-js/-/supabase-js-2.78.0.tgz", - "integrity": "sha512-xYMRNBFmKp2m1gMuwcp/gr/HlfZKqjye1Ib8kJe29XJNsgwsfO/f8skxnWiscFKTlkOKLuBexNgl5L8dzGt6vA==", + "version": "2.86.0", + "resolved": "https://registry.npmjs.org/@supabase/supabase-js/-/supabase-js-2.86.0.tgz", + "integrity": "sha512-BaC9sv5+HGNy1ulZwY8/Ev7EjfYYmWD4fOMw9bDBqTawEj6JHAiOHeTwXLRzVaeSay4p17xYLN2NSCoGgXMQnw==", + "license": "MIT", "peer": true, "dependencies": { - "@supabase/auth-js": "2.78.0", - "@supabase/functions-js": "2.78.0", - "@supabase/node-fetch": "2.6.15", - "@supabase/postgrest-js": "2.78.0", - "@supabase/realtime-js": "2.78.0", - "@supabase/storage-js": "2.78.0" + "@supabase/auth-js": "2.86.0", + "@supabase/functions-js": "2.86.0", + "@supabase/postgrest-js": "2.86.0", + "@supabase/realtime-js": "2.86.0", + "@supabase/storage-js": "2.86.0" + }, + "engines": { + "node": ">=20.0.0" } }, "node_modules/@tootallnate/once": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-1.1.2.tgz", "integrity": "sha512-RbzJvlNzmRq5c3O09UipeuXno4tA1FE6ikOjxZK0tuxVv3412l64l5t1W5pj4+rJq9vpkm/kwiR07aZXnsKPxw==", + "license": "MIT", "optional": true, "peer": true, "engines": { @@ -504,12 +543,14 @@ "version": "2.0.6", "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==", + "license": "MIT", "peer": true }, "node_modules/@types/istanbul-lib-report": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", + "license": "MIT", "peer": true, "dependencies": { "@types/istanbul-lib-coverage": "*" @@ -519,6 +560,7 @@ "version": "3.0.4", "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", + "license": "MIT", "peer": true, "dependencies": { "@types/istanbul-lib-report": "*" @@ -528,6 +570,7 @@ "version": "29.5.14", "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.14.tgz", "integrity": "sha512-ZN+4sdnLUbo8EVvVc2ao0GFW6oVrQRPn4K2lglySj7APvSrgzxHiNNK99us4WDMi57xxA2yggblIAMNhXOotLQ==", + "license": "MIT", "peer": true, "dependencies": { "expect": "^29.0.0", @@ -538,6 +581,7 @@ "version": "18.19.130", "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.130.tgz", "integrity": "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==", + "license": "MIT", "dependencies": { "undici-types": "~5.26.4" } @@ -546,6 +590,7 @@ "version": "2.6.13", "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.13.tgz", "integrity": "sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw==", + "license": "MIT", "dependencies": { "@types/node": "*", "form-data": "^4.0.4" @@ -555,6 +600,7 @@ "version": "8.11.0", "resolved": "https://registry.npmjs.org/@types/pg/-/pg-8.11.0.tgz", "integrity": "sha512-sDAlRiBNthGjNFfvt0k6mtotoVYVQ63pA8R4EMWka7crawSR60waVYR0HAgmPRs/e2YaeJTD/43OoZ3PFw80pw==", + "license": "MIT", "peer": true, "dependencies": { "@types/node": "*", @@ -566,18 +612,21 @@ "version": "1.6.6", "resolved": "https://registry.npmjs.org/@types/phoenix/-/phoenix-1.6.6.tgz", "integrity": "sha512-PIzZZlEppgrpoT2QgbnDU+MMzuR6BbCjllj0bM70lWoejMeNJAxCchxnv7J3XFkI8MpygtRpzXrIlmWUBclP5A==", + "license": "MIT", "peer": true }, "node_modules/@types/retry": { "version": "0.12.0", "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==", + "license": "MIT", "peer": true }, "node_modules/@types/sqlite3": { "version": "3.1.11", "resolved": "https://registry.npmjs.org/@types/sqlite3/-/sqlite3-3.1.11.tgz", "integrity": "sha512-KYF+QgxAnnAh7DWPdNDroxkDI3/MspH1NMx6m/N/6fT1G6+jvsw4/ZePt8R8cr7ta58aboeTfYFBDxTJ5yv15w==", + "license": "MIT", "peer": true, "dependencies": { "@types/node": "*" @@ -587,27 +636,31 @@ "version": "2.0.3", "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz", "integrity": "sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==", + "license": "MIT", "peer": true }, "node_modules/@types/uuid": { "version": "10.0.0", "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-10.0.0.tgz", "integrity": "sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==", + "license": "MIT", "peer": true }, "node_modules/@types/ws": { "version": "8.18.1", "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.18.1.tgz", "integrity": "sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==", + "license": "MIT", "peer": true, "dependencies": { "@types/node": "*" } }, "node_modules/@types/yargs": { - "version": "17.0.34", - "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.34.tgz", - "integrity": "sha512-KExbHVa92aJpw9WDQvzBaGVE2/Pz+pLZQloT2hjL8IqsZnV62rlPOYvNnLmf/L2dyllfVUOVBj64M0z/46eR2A==", + "version": "17.0.35", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.35.tgz", + "integrity": "sha512-qUHkeCyQFxMXg79wQfTtfndEC+N9ZZg76HJftDJp+qH2tV7Gj4OJi7l+PiWwJ+pWtW8GwSmqsDj/oymhrTWXjg==", + "license": "MIT", "peer": true, "dependencies": { "@types/yargs-parser": "*" @@ -617,12 +670,14 @@ "version": "21.0.3", "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==", + "license": "MIT", "peer": true }, "node_modules/abbrev": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==", + "license": "ISC", "optional": true, "peer": true }, @@ -630,6 +685,7 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", + "license": "MIT", "dependencies": { "event-target-shim": "^5.0.0" }, @@ -641,6 +697,7 @@ "version": "1.3.8", "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "license": "MIT", "dependencies": { "mime-types": "~2.1.34", "negotiator": "0.6.3" @@ -653,6 +710,7 @@ "version": "7.1.4", "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", + "license": "MIT", "peer": true, "engines": { "node": ">= 14" @@ -662,6 +720,7 @@ "version": "4.6.0", "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.6.0.tgz", "integrity": "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==", + "license": "MIT", "dependencies": { "humanize-ms": "^1.2.1" }, @@ -673,6 +732,7 @@ "version": "3.1.0", "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -687,6 +747,7 @@ "version": "6.2.2", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "license": "MIT", "peer": true, "engines": { "node": ">=12" @@ -699,6 +760,7 @@ "version": "5.2.0", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "license": "MIT", "peer": true, "engines": { "node": ">=10" @@ -711,6 +773,7 @@ "version": "2.1.0", "resolved": "https://registry.npmjs.org/aproba/-/aproba-2.1.0.tgz", "integrity": "sha512-tLIEcj5GuR2RSTnxNKdkK0dJ/GrC7P38sUkiDmDuHfsHmbagTFAxDVIBltoklXEVIQ/f14IL8IMJ5pn9Hez1Ew==", + "license": "ISC", "optional": true, "peer": true }, @@ -719,6 +782,7 @@ "resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-3.0.1.tgz", "integrity": "sha512-QZW4EDmGwlYur0Yyf/b2uGucHQMa8aFUP7eu9ddR73vvhFyt4V0Vl3QHPcTNJ8l6qYOBdxgXdnBXQrHilfRQBg==", "deprecated": "This package is no longer supported.", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -732,17 +796,20 @@ "node_modules/array-flatten": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", - "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==" + "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==", + "license": "MIT" }, "node_modules/asynckit": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "license": "MIT" }, "node_modules/axios": { "version": "1.7.7", "resolved": "https://registry.npmjs.org/axios/-/axios-1.7.7.tgz", "integrity": "sha512-S4kL7XrjgBmvdGut0sN3yJxqYzrDOnivkBiN0OFs6hLiUam3UPvswUo0kqGyhqUZGEOytHyumEdXsAkgCOUf3Q==", + "license": "MIT", "dependencies": { "follow-redirects": "^1.15.6", "form-data": "^4.0.0", @@ -753,6 +820,7 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "license": "MIT", "peer": true }, "node_modules/base-64": { @@ -779,12 +847,14 @@ "url": "https://feross.org/support" } ], + "license": "MIT", "peer": true }, "node_modules/bignumber.js": { "version": "9.3.1", "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.3.1.tgz", "integrity": "sha512-Ko0uX15oIUS7wJ3Rb30Fs6SkVbLmPBAKdlm7q9+ak9bbIeFf0MwuBsQV6z7+X768/cHsfg+WlysDWJcmthjsjQ==", + "license": "MIT", "peer": true, "engines": { "node": "*" @@ -794,6 +864,7 @@ "version": "1.5.0", "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz", "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==", + "license": "MIT", "peer": true, "dependencies": { "file-uri-to-path": "1.0.0" @@ -803,6 +874,7 @@ "version": "4.1.0", "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", + "license": "MIT", "peer": true, "dependencies": { "buffer": "^5.5.0", @@ -828,6 +900,7 @@ "url": "https://feross.org/support" } ], + "license": "MIT", "peer": true, "dependencies": { "base64-js": "^1.3.1", @@ -838,6 +911,7 @@ "version": "1.20.3", "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", + "license": "MIT", "dependencies": { "bytes": "3.1.2", "content-type": "~1.0.5", @@ -861,6 +935,7 @@ "version": "2.0.2", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "license": "MIT", "peer": true, "dependencies": { "balanced-match": "^1.0.0" @@ -870,6 +945,7 @@ "version": "3.0.3", "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "license": "MIT", "peer": true, "dependencies": { "fill-range": "^7.1.1" @@ -896,6 +972,7 @@ "url": "https://feross.org/support" } ], + "license": "MIT", "peer": true, "dependencies": { "base64-js": "^1.3.1", @@ -906,12 +983,14 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", "integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==", + "license": "BSD-3-Clause", "peer": true }, "node_modules/buffer-writer": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/buffer-writer/-/buffer-writer-2.0.0.tgz", "integrity": "sha512-a7ZpuTZU1TRtnwyCNW3I5dc0wWNC3VR9S++Ewyk2HHZdrO3CQJqSpd+95Us590V6AL7JqUAH2IwZ/398PmNFgw==", + "license": "MIT", "peer": true, "engines": { "node": ">=4" @@ -921,6 +1000,7 @@ "version": "3.1.2", "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "license": "MIT", "engines": { "node": ">= 0.8" } @@ -929,6 +1009,7 @@ "version": "15.3.0", "resolved": "https://registry.npmjs.org/cacache/-/cacache-15.3.0.tgz", "integrity": "sha512-VVdYzXEn+cnbXpFgWs5hTT7OScegHVmLhJIR8Ufqk3iFD6A6j5iSX1KuBTfNEv4tdJWE2PzA6IVFtcLC7fN9wQ==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -959,6 +1040,7 @@ "version": "1.1.12", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -971,6 +1053,7 @@ "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", "deprecated": "Glob versions prior to v9 are no longer supported", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -992,6 +1075,7 @@ "version": "6.0.0", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -1005,6 +1089,7 @@ "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -1018,6 +1103,7 @@ "version": "3.3.6", "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -1032,6 +1118,7 @@ "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", "deprecated": "Rimraf versions prior to v4 are no longer supported", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -1048,6 +1135,7 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", "dependencies": { "es-errors": "^1.3.0", "function-bind": "^1.1.2" @@ -1060,6 +1148,7 @@ "version": "1.0.4", "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "license": "MIT", "dependencies": { "call-bind-apply-helpers": "^1.0.2", "get-intrinsic": "^1.3.0" @@ -1075,6 +1164,7 @@ "version": "6.3.0", "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "license": "MIT", "peer": true, "engines": { "node": ">=10" @@ -1087,6 +1177,7 @@ "version": "4.1.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "license": "MIT", "peer": true, "dependencies": { "ansi-styles": "^4.1.0", @@ -1103,6 +1194,7 @@ "version": "4.3.0", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", "peer": true, "dependencies": { "color-convert": "^2.0.1" @@ -1118,6 +1210,7 @@ "version": "0.0.2", "resolved": "https://registry.npmjs.org/charenc/-/charenc-0.0.2.tgz", "integrity": "sha512-yrLQ/yVUFXkzg7EDQsPieE/53+0RlaWTs+wBrvW36cyilJ2SaDWfl4Yj7MtLTXleV9uEKefbAGUPv2/iWSooRA==", + "license": "BSD-3-Clause", "peer": true, "engines": { "node": "*" @@ -1127,6 +1220,7 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz", "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==", + "license": "ISC", "peer": true, "engines": { "node": ">=10" @@ -1142,6 +1236,7 @@ "url": "https://github.com/sponsors/sibiraj-s" } ], + "license": "MIT", "peer": true, "engines": { "node": ">=8" @@ -1151,6 +1246,7 @@ "version": "2.2.0", "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", + "license": "MIT", "optional": true, "peer": true, "engines": { @@ -1161,6 +1257,7 @@ "version": "4.5.0", "resolved": "https://registry.npmjs.org/cloudflare/-/cloudflare-4.5.0.tgz", "integrity": "sha512-fPcbPKx4zF45jBvQ0z7PCdgejVAPBBCZxwqk1k7krQNfpM07Cfj97/Q6wBzvYqlWXx/zt1S9+m8vnfCe06umbQ==", + "license": "Apache-2.0", "peer": true, "dependencies": { "@types/node": "^18.11.18", @@ -1176,6 +1273,7 @@ "version": "2.7.0", "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "license": "MIT", "peer": true, "dependencies": { "whatwg-url": "^5.0.0" @@ -1196,6 +1294,7 @@ "version": "1.1.2", "resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.2.tgz", "integrity": "sha512-RMr0FhtfXemyinomL4hrWcYJxmX6deFdCxpJzhDttxgO1+bcCnkk+9drydLVDmAMG7NE6aN/fl4F7ucU/90gAA==", + "license": "Apache-2.0", "peer": true, "engines": { "node": ">=0.10.0" @@ -1205,6 +1304,7 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", "peer": true, "dependencies": { "color-name": "~1.1.4" @@ -1217,12 +1317,14 @@ "version": "1.1.4", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "license": "MIT", "peer": true }, "node_modules/color-support": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/color-support/-/color-support-1.1.3.tgz", "integrity": "sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg==", + "license": "ISC", "optional": true, "peer": true, "bin": { @@ -1233,6 +1335,7 @@ "version": "1.0.8", "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "license": "MIT", "dependencies": { "delayed-stream": "~1.0.0" }, @@ -1244,6 +1347,7 @@ "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "license": "MIT", "optional": true, "peer": true }, @@ -1251,6 +1355,7 @@ "version": "1.1.0", "resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz", "integrity": "sha512-ty/fTekppD2fIwRvnZAVdeOiGd1c7YXEixbgJTNzqcxJWKQnjJ/V1bNEEE6hygpM3WjwHFUVK6HTjWSzV4a8sQ==", + "license": "ISC", "optional": true, "peer": true }, @@ -1258,6 +1363,7 @@ "version": "2.15.0", "resolved": "https://registry.npmjs.org/console-table-printer/-/console-table-printer-2.15.0.tgz", "integrity": "sha512-SrhBq4hYVjLCkBVOWaTzceJalvn5K1Zq5aQA6wXC/cYjI3frKWNPEMK3sZsJfNNQApvCQmgBcc13ZKmFj8qExw==", + "license": "MIT", "peer": true, "dependencies": { "simple-wcswidth": "^1.1.2" @@ -1267,6 +1373,7 @@ "version": "0.5.4", "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "license": "MIT", "dependencies": { "safe-buffer": "5.2.1" }, @@ -1278,6 +1385,7 @@ "version": "1.0.5", "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -1286,6 +1394,7 @@ "version": "0.7.1", "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz", "integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -1293,12 +1402,14 @@ "node_modules/cookie-signature": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", - "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==" + "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==", + "license": "MIT" }, "node_modules/cors": { "version": "2.8.5", "resolved": "https://registry.npmjs.org/cors/-/cors-2.8.5.tgz", "integrity": "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==", + "license": "MIT", "dependencies": { "object-assign": "^4", "vary": "^1" @@ -1311,6 +1422,7 @@ "version": "7.0.6", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "license": "MIT", "peer": true, "dependencies": { "path-key": "^3.1.0", @@ -1325,6 +1437,7 @@ "version": "0.0.2", "resolved": "https://registry.npmjs.org/crypt/-/crypt-0.0.2.tgz", "integrity": "sha512-mCxBlsHFYh9C+HVpiEacem8FEBnMXgU9gy4zmNC+SXAZNB/1idgp/aulFJ4FgCi7GPEVbfyng092GqL2k2rmow==", + "license": "BSD-3-Clause", "peer": true, "engines": { "node": "*" @@ -1334,6 +1447,7 @@ "version": "4.0.1", "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-4.0.1.tgz", "integrity": "sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==", + "license": "MIT", "engines": { "node": ">= 12" } @@ -1342,6 +1456,7 @@ "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", "dependencies": { "ms": "2.0.0" } @@ -1350,6 +1465,7 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", "integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==", + "license": "MIT", "peer": true, "engines": { "node": ">=0.10.0" @@ -1359,6 +1475,7 @@ "version": "6.0.0", "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", + "license": "MIT", "peer": true, "dependencies": { "mimic-response": "^3.1.0" @@ -1374,6 +1491,7 @@ "version": "0.6.0", "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "license": "MIT", "peer": true, "engines": { "node": ">=4.0.0" @@ -1383,6 +1501,7 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "license": "MIT", "engines": { "node": ">=0.4.0" } @@ -1391,6 +1510,7 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", "integrity": "sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==", + "license": "MIT", "optional": true, "peer": true }, @@ -1398,6 +1518,7 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "license": "MIT", "engines": { "node": ">= 0.8" } @@ -1406,6 +1527,7 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "license": "MIT", "engines": { "node": ">= 0.8", "npm": "1.2.8000 || >= 1.4.16" @@ -1415,6 +1537,7 @@ "version": "2.1.2", "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz", "integrity": "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==", + "license": "Apache-2.0", "peer": true, "engines": { "node": ">=8" @@ -1424,6 +1547,7 @@ "version": "29.6.3", "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", + "license": "MIT", "peer": true, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" @@ -1433,6 +1557,7 @@ "version": "1.3.0", "resolved": "https://registry.npmjs.org/digest-fetch/-/digest-fetch-1.3.0.tgz", "integrity": "sha512-CGJuv6iKNM7QyZlM2T3sPAdZWd/p9zQiRNS9G+9COUCwzWFTs0Xp8NF5iePx7wtvhDykReiRRrSeNb4oMmB8lA==", + "license": "ISC", "peer": true, "dependencies": { "base-64": "^0.1.0", @@ -1443,6 +1568,7 @@ "version": "16.6.1", "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.6.1.tgz", "integrity": "sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==", + "license": "BSD-2-Clause", "engines": { "node": ">=12" }, @@ -1454,6 +1580,7 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", "dependencies": { "call-bind-apply-helpers": "^1.0.1", "es-errors": "^1.3.0", @@ -1467,12 +1594,14 @@ "version": "0.2.0", "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "license": "MIT", "peer": true }, "node_modules/ecdsa-sig-formatter": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==", + "license": "Apache-2.0", "peer": true, "dependencies": { "safe-buffer": "^5.0.1" @@ -1481,18 +1610,21 @@ "node_modules/ee-first": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", - "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", + "license": "MIT" }, "node_modules/emoji-regex": { "version": "9.2.2", "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "license": "MIT", "peer": true }, "node_modules/encodeurl": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "license": "MIT", "engines": { "node": ">= 0.8" } @@ -1501,6 +1633,7 @@ "version": "0.1.13", "resolved": "https://registry.npmjs.org/encoding/-/encoding-0.1.13.tgz", "integrity": "sha512-ETBauow1T35Y/WZMkio9jiM0Z5xjHHmJ4XmjZOq1l/dXz3lr2sRn87nJy20RupqSh1F2m3HHPSp8ShIPQJrJ3A==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -1511,6 +1644,7 @@ "version": "0.6.3", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -1524,6 +1658,7 @@ "version": "1.4.5", "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.5.tgz", "integrity": "sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg==", + "license": "MIT", "peer": true, "dependencies": { "once": "^1.4.0" @@ -1533,6 +1668,7 @@ "version": "2.2.1", "resolved": "https://registry.npmjs.org/env-paths/-/env-paths-2.2.1.tgz", "integrity": "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==", + "license": "MIT", "optional": true, "peer": true, "engines": { @@ -1543,6 +1679,7 @@ "version": "2.0.3", "resolved": "https://registry.npmjs.org/err-code/-/err-code-2.0.3.tgz", "integrity": "sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA==", + "license": "MIT", "optional": true, "peer": true }, @@ -1550,6 +1687,7 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", "engines": { "node": ">= 0.4" } @@ -1558,6 +1696,7 @@ "version": "1.3.0", "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", "engines": { "node": ">= 0.4" } @@ -1566,6 +1705,7 @@ "version": "1.1.1", "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", "dependencies": { "es-errors": "^1.3.0" }, @@ -1577,6 +1717,7 @@ "version": "2.1.0", "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "license": "MIT", "dependencies": { "es-errors": "^1.3.0", "get-intrinsic": "^1.2.6", @@ -1590,12 +1731,14 @@ "node_modules/escape-html": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", - "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==" + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", + "license": "MIT" }, "node_modules/escape-string-regexp": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", + "license": "MIT", "peer": true, "engines": { "node": ">=8" @@ -1605,6 +1748,7 @@ "version": "1.8.1", "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -1613,6 +1757,7 @@ "version": "5.0.1", "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==", + "license": "MIT", "engines": { "node": ">=6" } @@ -1621,12 +1766,14 @@ "version": "4.0.7", "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", + "license": "MIT", "peer": true }, "node_modules/expand-template": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/expand-template/-/expand-template-2.0.3.tgz", "integrity": "sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg==", + "license": "(MIT OR WTFPL)", "peer": true, "engines": { "node": ">=6" @@ -1636,6 +1783,7 @@ "version": "29.7.0", "resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz", "integrity": "sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==", + "license": "MIT", "peer": true, "dependencies": { "@jest/expect-utils": "^29.7.0", @@ -1652,6 +1800,7 @@ "version": "4.21.2", "resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz", "integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==", + "license": "MIT", "dependencies": { "accepts": "~1.3.8", "array-flatten": "1.1.1", @@ -1697,6 +1846,7 @@ "version": "3.0.2", "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", + "license": "MIT", "peer": true }, "node_modules/fetch-blob": { @@ -1713,6 +1863,7 @@ "url": "https://paypal.me/jimmywarting" } ], + "license": "MIT", "dependencies": { "node-domexception": "^1.0.0", "web-streams-polyfill": "^3.0.3" @@ -1725,6 +1876,7 @@ "version": "3.3.3", "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==", + "license": "MIT", "engines": { "node": ">= 8" } @@ -1733,12 +1885,14 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==", + "license": "MIT", "peer": true }, "node_modules/fill-range": { "version": "7.1.1", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "license": "MIT", "peer": true, "dependencies": { "to-regex-range": "^5.0.1" @@ -1751,6 +1905,7 @@ "version": "1.3.1", "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz", "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==", + "license": "MIT", "dependencies": { "debug": "2.6.9", "encodeurl": "~2.0.0", @@ -1774,6 +1929,7 @@ "url": "https://github.com/sponsors/RubenVerborgh" } ], + "license": "MIT", "engines": { "node": ">=4.0" }, @@ -1787,6 +1943,7 @@ "version": "3.3.1", "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.1.tgz", "integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==", + "license": "ISC", "peer": true, "dependencies": { "cross-spawn": "^7.0.6", @@ -1800,9 +1957,10 @@ } }, "node_modules/form-data": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz", - "integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==", + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz", + "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==", + "license": "MIT", "dependencies": { "asynckit": "^0.4.0", "combined-stream": "^1.0.8", @@ -1817,12 +1975,14 @@ "node_modules/form-data-encoder": { "version": "1.7.2", "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz", - "integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==" + "integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==", + "license": "MIT" }, "node_modules/formdata-node": { "version": "4.4.1", "resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz", "integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==", + "license": "MIT", "dependencies": { "node-domexception": "1.0.0", "web-streams-polyfill": "4.0.0-beta.3" @@ -1835,6 +1995,7 @@ "version": "4.0.10", "resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz", "integrity": "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==", + "license": "MIT", "dependencies": { "fetch-blob": "^3.1.2" }, @@ -1846,6 +2007,7 @@ "version": "0.2.0", "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -1854,6 +2016,7 @@ "version": "0.5.2", "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -1862,12 +2025,14 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==", + "license": "MIT", "peer": true }, "node_modules/fs-minipass": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", + "license": "ISC", "peer": true, "dependencies": { "minipass": "^3.0.0" @@ -1880,6 +2045,7 @@ "version": "3.3.6", "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "license": "ISC", "peer": true, "dependencies": { "yallist": "^4.0.0" @@ -1892,6 +2058,7 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "license": "ISC", "optional": true, "peer": true }, @@ -1899,6 +2066,7 @@ "version": "1.1.2", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -1908,6 +2076,7 @@ "resolved": "https://registry.npmjs.org/gauge/-/gauge-4.0.4.tgz", "integrity": "sha512-f9m+BEN5jkg6a0fZjleidjN51VE1X+mPFQ2DJ0uv1V39oCLCbsGe6yjbBnp7eK7z/+GAon99a3nHuqbuuthyPg==", "deprecated": "This package is no longer supported.", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -1928,6 +2097,7 @@ "version": "5.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", "optional": true, "peer": true, "engines": { @@ -1938,6 +2108,7 @@ "version": "8.0.0", "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT", "optional": true, "peer": true }, @@ -1945,6 +2116,7 @@ "version": "3.0.7", "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "license": "ISC", "optional": true, "peer": true }, @@ -1952,6 +2124,7 @@ "version": "4.2.3", "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -1967,6 +2140,7 @@ "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -1980,6 +2154,7 @@ "version": "7.1.3", "resolved": "https://registry.npmjs.org/gaxios/-/gaxios-7.1.3.tgz", "integrity": "sha512-YGGyuEdVIjqxkxVH1pUTMY/XtmmsApXrCVv5EU25iX6inEPbV+VakJfLealkBtJN69AQmh1eGOdCl9Sm1UP6XQ==", + "license": "Apache-2.0", "peer": true, "dependencies": { "extend": "^3.0.2", @@ -1995,6 +2170,7 @@ "version": "8.1.2", "resolved": "https://registry.npmjs.org/gcp-metadata/-/gcp-metadata-8.1.2.tgz", "integrity": "sha512-zV/5HKTfCeKWnxG0Dmrw51hEWFGfcF2xiXqcA3+J90WDuP0SvoiSO5ORvcBsifmx/FoIjgQN3oNOGaQ5PhLFkg==", + "license": "Apache-2.0", "peer": true, "dependencies": { "gaxios": "^7.0.0", @@ -2009,6 +2185,7 @@ "version": "3.9.0", "resolved": "https://registry.npmjs.org/generic-pool/-/generic-pool-3.9.0.tgz", "integrity": "sha512-hymDOu5B53XvN4QT9dBmZxPX4CWhBPPLguTZ9MMFeFa/Kg0xWVfylOVNlJji/E7yTZWFd/q9GO5TxDLq156D7g==", + "license": "MIT", "peer": true, "engines": { "node": ">= 4" @@ -2018,6 +2195,7 @@ "version": "1.3.0", "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", "dependencies": { "call-bind-apply-helpers": "^1.0.2", "es-define-property": "^1.0.1", @@ -2041,6 +2219,7 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", "dependencies": { "dunder-proto": "^1.0.1", "es-object-atoms": "^1.0.0" @@ -2053,12 +2232,14 @@ "version": "0.0.0", "resolved": "https://registry.npmjs.org/github-from-package/-/github-from-package-0.0.0.tgz", "integrity": "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw==", + "license": "MIT", "peer": true }, "node_modules/glob": { - "version": "10.4.5", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", - "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz", + "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==", + "license": "ISC", "peer": true, "dependencies": { "foreground-child": "^3.1.0", @@ -2079,6 +2260,7 @@ "version": "10.5.0", "resolved": "https://registry.npmjs.org/google-auth-library/-/google-auth-library-10.5.0.tgz", "integrity": "sha512-7ABviyMOlX5hIVD60YOfHw4/CxOfBhyduaYB+wbFWCWoni4N7SLcV46hrVRktuBbZjFC9ONyqamZITN7q3n32w==", + "license": "Apache-2.0", "peer": true, "dependencies": { "base64-js": "^1.3.0", @@ -2094,9 +2276,10 @@ } }, "node_modules/google-logging-utils": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/google-logging-utils/-/google-logging-utils-1.1.2.tgz", - "integrity": "sha512-YsFPGVgDFf4IzSwbwIR0iaFJQFmR5Jp7V1WuYSjuRgAm9yWqsMhKE9YPlL+wvFLnc/wMiFV4SQUD9Y/JMpxIxQ==", + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/google-logging-utils/-/google-logging-utils-1.1.3.tgz", + "integrity": "sha512-eAmLkjDjAFCVXg7A1unxHsLf961m6y17QFqXqAXGj/gVkKFrEICfStRfwUlGNfeCEjNRa32JEWOUTlYXPyyKvA==", + "license": "Apache-2.0", "peer": true, "engines": { "node": ">=14" @@ -2106,6 +2289,7 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", "engines": { "node": ">= 0.4" }, @@ -2117,12 +2301,14 @@ "version": "4.2.11", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "license": "ISC", "peer": true }, "node_modules/groq-sdk": { "version": "0.3.0", "resolved": "https://registry.npmjs.org/groq-sdk/-/groq-sdk-0.3.0.tgz", "integrity": "sha512-Cdgjh4YoSBE2X4S9sxPGXaAy1dlN4bRtAaDZ3cnq+XsxhhN9WSBeHF64l7LWwuD5ntmw7YC5Vf4Ff1oHCg1LOg==", + "license": "Apache-2.0", "peer": true, "dependencies": { "@types/node": "^18.11.18", @@ -2140,6 +2326,7 @@ "version": "2.7.0", "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "license": "MIT", "peer": true, "dependencies": { "whatwg-url": "^5.0.0" @@ -2160,6 +2347,7 @@ "version": "3.3.3", "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==", + "license": "MIT", "peer": true, "engines": { "node": ">= 8" @@ -2169,6 +2357,7 @@ "version": "8.0.0", "resolved": "https://registry.npmjs.org/gtoken/-/gtoken-8.0.0.tgz", "integrity": "sha512-+CqsMbHPiSTdtSO14O51eMNlrp9N79gmeqmXeouJOhfucAedHw9noVe/n5uJk3tbKE6a+6ZCQg3RPhVhHByAIw==", + "license": "MIT", "peer": true, "dependencies": { "gaxios": "^7.0.0", @@ -2182,6 +2371,7 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "license": "MIT", "peer": true, "engines": { "node": ">=8" @@ -2191,6 +2381,7 @@ "version": "1.1.0", "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", "engines": { "node": ">= 0.4" }, @@ -2202,6 +2393,7 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "license": "MIT", "dependencies": { "has-symbols": "^1.0.3" }, @@ -2216,6 +2408,7 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz", "integrity": "sha512-8Rf9Y83NBReMnx0gFzA8JImQACstCYWUplepDa9xprwwtmgEZUF0h/i5xSA625zB/I37EtrswSST6OXxwaaIJQ==", + "license": "ISC", "optional": true, "peer": true }, @@ -2223,6 +2416,7 @@ "version": "2.0.2", "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", "dependencies": { "function-bind": "^1.1.2" }, @@ -2234,6 +2428,7 @@ "version": "4.2.0", "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.2.0.tgz", "integrity": "sha512-dTxcvPXqPvXBQpq5dUr6mEMJX4oIEFv6bwom3FDwKRDsuIjjJGANqhBuoAn9c1RQJIdAKav33ED65E2ys+87QQ==", + "license": "BSD-2-Clause", "optional": true, "peer": true }, @@ -2241,6 +2436,7 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "license": "MIT", "dependencies": { "depd": "2.0.0", "inherits": "2.0.4", @@ -2256,6 +2452,7 @@ "version": "4.0.1", "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-4.0.1.tgz", "integrity": "sha512-k0zdNgqWTGA6aeIRVpvfVob4fL52dTfaehylg0Y4UvSySvOq/Y+BOyPrgpUrA7HylqvU8vIZGsRuXmspskV0Tg==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -2271,6 +2468,7 @@ "version": "6.0.2", "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -2284,6 +2482,7 @@ "version": "4.4.3", "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -2302,6 +2501,7 @@ "version": "2.1.3", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT", "optional": true, "peer": true }, @@ -2309,6 +2509,7 @@ "version": "7.0.6", "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "license": "MIT", "peer": true, "dependencies": { "agent-base": "^7.1.2", @@ -2322,6 +2523,7 @@ "version": "4.4.3", "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", "peer": true, "dependencies": { "ms": "^2.1.3" @@ -2339,20 +2541,33 @@ "version": "2.1.3", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT", "peer": true }, "node_modules/humanize-ms": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==", + "license": "MIT", "dependencies": { "ms": "^2.0.0" } }, + "node_modules/iceberg-js": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/iceberg-js/-/iceberg-js-0.8.0.tgz", + "integrity": "sha512-kmgmea2nguZEvRqW79gDqNXyxA3OS5WIgMVffrHpqXV4F/J4UmNIw2vstixioLTNSkd5rFB8G0s3Lwzogm6OFw==", + "license": "MIT", + "peer": true, + "engines": { + "node": ">=20.0.0" + } + }, "node_modules/iconv-lite": { "version": "0.4.24", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "license": "MIT", "dependencies": { "safer-buffer": ">= 2.1.2 < 3" }, @@ -2378,12 +2593,14 @@ "url": "https://feross.org/support" } ], + "license": "BSD-3-Clause", "peer": true }, "node_modules/imurmurhash": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "license": "MIT", "optional": true, "peer": true, "engines": { @@ -2394,6 +2611,7 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "license": "MIT", "optional": true, "peer": true, "engines": { @@ -2404,6 +2622,7 @@ "version": "1.0.4", "resolved": "https://registry.npmjs.org/infer-owner/-/infer-owner-1.0.4.tgz", "integrity": "sha512-IClj+Xz94+d7irH5qRyfJonOdfTzuDaifE6ZPWfx0N0+/ATZCbuTPq2prFl526urkQd90WyUKIh1DfBQ2hMz9A==", + "license": "ISC", "optional": true, "peer": true }, @@ -2412,6 +2631,7 @@ "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -2422,18 +2642,21 @@ "node_modules/inherits": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "license": "ISC" }, "node_modules/ini": { "version": "1.3.8", "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", + "license": "ISC", "peer": true }, "node_modules/ip-address": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-10.0.1.tgz", - "integrity": "sha512-NWv9YLW4PoW2B7xtzaS3NCot75m6nK7Icdv0o3lfMceJVRfSoQwqD4wEH5rLwoKJwUiZ/rfpiVBhnaF0FK4HoA==", + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-10.1.0.tgz", + "integrity": "sha512-XXADHxXmvT9+CRxhXg56LJovE+bmWnEWB78LB83VZTprKTmaC5QfruXocxzTZ2Kl0DNwKuBdlIhjL8LeY8Sf8Q==", + "license": "MIT", "optional": true, "peer": true, "engines": { @@ -2444,6 +2667,7 @@ "version": "1.9.1", "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "license": "MIT", "engines": { "node": ">= 0.10" } @@ -2452,12 +2676,14 @@ "version": "1.1.6", "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==", + "license": "MIT", "peer": true }, "node_modules/is-fullwidth-code-point": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "license": "MIT", "peer": true, "engines": { "node": ">=8" @@ -2467,6 +2693,7 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/is-lambda/-/is-lambda-1.0.1.tgz", "integrity": "sha512-z7CMFGNrENq5iFB9Bqo64Xk6Y9sg+epq1myIcdHaGnbMTYOxvzsEtdYqQUylB7LxfkvgrrjP32T6Ywciio9UIQ==", + "license": "MIT", "optional": true, "peer": true }, @@ -2474,6 +2701,7 @@ "version": "7.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "license": "MIT", "peer": true, "engines": { "node": ">=0.12.0" @@ -2483,12 +2711,14 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "license": "ISC", "peer": true }, "node_modules/jackspeak": { "version": "3.4.3", "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", + "license": "BlueOak-1.0.0", "peer": true, "dependencies": { "@isaacs/cliui": "^8.0.2" @@ -2504,6 +2734,7 @@ "version": "29.7.0", "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz", "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==", + "license": "MIT", "peer": true, "dependencies": { "chalk": "^4.0.0", @@ -2519,6 +2750,7 @@ "version": "29.6.3", "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz", "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==", + "license": "MIT", "peer": true, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" @@ -2528,6 +2760,7 @@ "version": "29.7.0", "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz", "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==", + "license": "MIT", "peer": true, "dependencies": { "chalk": "^4.0.0", @@ -2543,6 +2776,7 @@ "version": "29.7.0", "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", + "license": "MIT", "peer": true, "dependencies": { "@babel/code-frame": "^7.12.13", @@ -2563,6 +2797,7 @@ "version": "29.7.0", "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "license": "MIT", "peer": true, "dependencies": { "@jest/types": "^29.6.3", @@ -2580,6 +2815,7 @@ "version": "1.0.21", "resolved": "https://registry.npmjs.org/js-tiktoken/-/js-tiktoken-1.0.21.tgz", "integrity": "sha512-biOj/6M5qdgx5TKjDnFT1ymSpM5tbd3ylwDtrQvFQSu0Z7bBYko2dF+W/aUkXUPuk6IVpRxk/3Q2sHOzGlS36g==", + "license": "MIT", "peer": true, "dependencies": { "base64-js": "^1.5.1" @@ -2589,12 +2825,14 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "license": "MIT", "peer": true }, "node_modules/json-bigint": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/json-bigint/-/json-bigint-1.0.0.tgz", "integrity": "sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ==", + "license": "MIT", "peer": true, "dependencies": { "bignumber.js": "^9.0.0" @@ -2604,6 +2842,7 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.1.tgz", "integrity": "sha512-hRF04fqJIP8Abbkq5NKGN0Bbr3JxlQ+qhZufXVr0DvujKy93ZCbXZMHDL4EOtodSbCWxOqR8MS1tXA5hwqCXDg==", + "license": "MIT", "peer": true, "dependencies": { "buffer-equal-constant-time": "^1.0.1", @@ -2615,6 +2854,7 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/jws/-/jws-4.0.0.tgz", "integrity": "sha512-KDncfTmOZoOMTFG4mBlG0qUIOlc03fmzH+ru6RgYVZhPkyiy/92Owlt/8UEN+a4TXR1FQetfIpJE8ApdvdVxTg==", + "license": "MIT", "peer": true, "dependencies": { "jwa": "^2.0.0", @@ -2622,16 +2862,16 @@ } }, "node_modules/langsmith": { - "version": "0.3.77", - "resolved": "https://registry.npmjs.org/langsmith/-/langsmith-0.3.77.tgz", - "integrity": "sha512-wbS/9IX/hOAsOEOtPj8kCS8H0tFHaelwQ97gTONRtIfoPPLd9MMUmhk0KQB5DdsGAI5abg966+f0dZ/B+YRRzg==", + "version": "0.3.82", + "resolved": "https://registry.npmjs.org/langsmith/-/langsmith-0.3.82.tgz", + "integrity": "sha512-RTcxtRm0zp2lV+pMesMW7EZSsIlqN7OmR2F6sZ/sOFQwmcLVl+VErMPV4VkX4Sycs4/EIAFT5hpr36EqiHoikQ==", + "license": "MIT", "peer": true, "dependencies": { "@types/uuid": "^10.0.0", "chalk": "^4.1.2", "console-table-printer": "^2.12.1", "p-queue": "^6.6.2", - "p-retry": "4", "semver": "^7.6.3", "uuid": "^10.0.0" }, @@ -2660,12 +2900,14 @@ "version": "10.4.3", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "license": "ISC", "peer": true }, "node_modules/make-fetch-happen": { "version": "9.1.0", "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-9.1.0.tgz", "integrity": "sha512-+zopwDy7DNknmwPQplem5lAZX/eCOzSvSNNcSKm5eVwTkOBzoktEfXsa9L23J/GIRhxRsaxzkPEhrJEpE2F4Gg==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -2694,6 +2936,7 @@ "version": "6.0.2", "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -2707,6 +2950,7 @@ "version": "4.4.3", "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -2725,6 +2969,7 @@ "version": "5.0.1", "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -2739,6 +2984,7 @@ "version": "6.0.0", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -2752,6 +2998,7 @@ "version": "3.3.6", "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -2765,6 +3012,7 @@ "version": "2.1.3", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT", "optional": true, "peer": true }, @@ -2772,6 +3020,7 @@ "version": "1.1.0", "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", "engines": { "node": ">= 0.4" } @@ -2780,6 +3029,7 @@ "version": "2.3.0", "resolved": "https://registry.npmjs.org/md5/-/md5-2.3.0.tgz", "integrity": "sha512-T1GITYmFaKuO91vxyoQMFETst+O71VUPEU3ze5GNzDm0OWdP8v1ziTaAEPUr/3kLsY3Sftgz242A1SetQiDL7g==", + "license": "BSD-3-Clause", "peer": true, "dependencies": { "charenc": "0.0.2", @@ -2791,6 +3041,7 @@ "version": "0.3.0", "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -2799,6 +3050,7 @@ "version": "2.1.38", "resolved": "https://registry.npmjs.org/mem0ai/-/mem0ai-2.1.38.tgz", "integrity": "sha512-es8ffk0VbYJ1RDSblcoYzxaaafDMD8XgvyYTGb0HrKcDLj1rlvFqaV4K5IMBm4GGOAI+I0BwGh8d49z7vC/ajQ==", + "license": "Apache-2.0", "dependencies": { "axios": "1.7.7", "openai": "^4.93.0", @@ -2836,6 +3088,7 @@ "https://github.com/sponsors/broofa", "https://github.com/sponsors/ctavan" ], + "license": "MIT", "bin": { "uuid": "dist/bin/uuid" } @@ -2844,6 +3097,7 @@ "version": "1.0.3", "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", + "license": "MIT", "funding": { "url": "https://github.com/sponsors/sindresorhus" } @@ -2852,6 +3106,7 @@ "version": "1.1.2", "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -2860,6 +3115,7 @@ "version": "4.0.8", "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "license": "MIT", "peer": true, "dependencies": { "braces": "^3.0.3", @@ -2873,6 +3129,7 @@ "version": "1.6.0", "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "license": "MIT", "bin": { "mime": "cli.js" }, @@ -2884,6 +3141,7 @@ "version": "1.52.0", "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -2892,6 +3150,7 @@ "version": "2.1.35", "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", "dependencies": { "mime-db": "1.52.0" }, @@ -2903,6 +3162,7 @@ "version": "3.1.0", "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", + "license": "MIT", "peer": true, "engines": { "node": ">=10" @@ -2915,6 +3175,7 @@ "version": "9.0.5", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "license": "ISC", "peer": true, "dependencies": { "brace-expansion": "^2.0.1" @@ -2930,6 +3191,7 @@ "version": "1.2.8", "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "license": "MIT", "peer": true, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -2939,6 +3201,7 @@ "version": "7.1.2", "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "license": "ISC", "peer": true, "engines": { "node": ">=16 || 14 >=14.17" @@ -2948,6 +3211,7 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/minipass-collect/-/minipass-collect-1.0.2.tgz", "integrity": "sha512-6T6lH0H8OG9kITm/Jm6tdooIbogG9e0tLgpY6mphXSm/A9u8Nq1ryBG+Qspiub9LjWlBPsPS3tWQ/Botq4FdxA==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -2961,6 +3225,7 @@ "version": "3.3.6", "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -2974,6 +3239,7 @@ "version": "1.4.1", "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-1.4.1.tgz", "integrity": "sha512-CGH1eblLq26Y15+Azk7ey4xh0J/XfJfrCox5LDJiKqI2Q2iwOLOKrlmIaODiSQS8d18jalF6y2K2ePUm0CmShw==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -2992,6 +3258,7 @@ "version": "3.3.6", "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -3005,6 +3272,7 @@ "version": "1.0.5", "resolved": "https://registry.npmjs.org/minipass-flush/-/minipass-flush-1.0.5.tgz", "integrity": "sha512-JmQSYYpPUqX5Jyn1mXaRwOda1uQ8HP5KAT/oDSLCzt1BYRhQU0/hDtsB1ufZfEEzMZ9aAVmsBw8+FWsIXlClWw==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -3018,6 +3286,7 @@ "version": "3.3.6", "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -3031,6 +3300,7 @@ "version": "1.2.4", "resolved": "https://registry.npmjs.org/minipass-pipeline/-/minipass-pipeline-1.2.4.tgz", "integrity": "sha512-xuIq7cIOt09RPRJ19gdi4b+RiNvDFYe5JH+ggNvBqGqpQXcru3PcRmOZuHBKWK1Txf9+cQ+HMVN4d6z46LZP7A==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -3044,6 +3314,7 @@ "version": "3.3.6", "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -3057,6 +3328,7 @@ "version": "1.0.3", "resolved": "https://registry.npmjs.org/minipass-sized/-/minipass-sized-1.0.3.tgz", "integrity": "sha512-MbkQQ2CTiBMlA2Dm/5cY+9SWFEN8pzzOXi6rlM5Xxq0Yqbda5ZQy9sU75a673FE9ZK0Zsbr6Y5iP6u9nktfg2g==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -3070,6 +3342,7 @@ "version": "3.3.6", "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -3083,6 +3356,7 @@ "version": "2.1.2", "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", + "license": "MIT", "peer": true, "dependencies": { "minipass": "^3.0.0", @@ -3096,6 +3370,7 @@ "version": "3.3.6", "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "license": "ISC", "peer": true, "dependencies": { "yallist": "^4.0.0" @@ -3108,6 +3383,7 @@ "version": "1.0.4", "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", + "license": "MIT", "peer": true, "bin": { "mkdirp": "bin/cmd.js" @@ -3120,17 +3396,20 @@ "version": "0.5.3", "resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz", "integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==", + "license": "MIT", "peer": true }, "node_modules/ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" }, "node_modules/mustache": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/mustache/-/mustache-4.2.0.tgz", "integrity": "sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==", + "license": "MIT", "peer": true, "bin": { "mustache": "bin/mustache" @@ -3140,12 +3419,14 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/napi-build-utils/-/napi-build-utils-2.0.0.tgz", "integrity": "sha512-GEbrYkbfF7MoNaoh2iGG84Mnf/WZfB0GdGEsM8wz7Expx/LlWf5U8t9nvJKXSp3qr5IsEbK04cBGhol/KwOsWA==", + "license": "MIT", "peer": true }, "node_modules/negotiator": { "version": "0.6.3", "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -3154,6 +3435,7 @@ "version": "5.28.2", "resolved": "https://registry.npmjs.org/neo4j-driver/-/neo4j-driver-5.28.2.tgz", "integrity": "sha512-nix4Canllf7Tl4FZL9sskhkKYoCp40fg7VsknSRTRgbm1JaE2F1Ej/c2nqlM06nqh3WrkI0ww3taVB+lem7w7w==", + "license": "Apache-2.0", "peer": true, "dependencies": { "neo4j-driver-bolt-connection": "5.28.2", @@ -3165,6 +3447,7 @@ "version": "5.28.2", "resolved": "https://registry.npmjs.org/neo4j-driver-bolt-connection/-/neo4j-driver-bolt-connection-5.28.2.tgz", "integrity": "sha512-dEX06iNPEo9iyCb0NssxJeA3REN+H+U/Y0MdAjJBEoil4tGz5PxBNZL6/+noQnu2pBJT5wICepakXCrN3etboA==", + "license": "Apache-2.0", "peer": true, "dependencies": { "buffer": "^6.0.3", @@ -3176,12 +3459,14 @@ "version": "5.28.2", "resolved": "https://registry.npmjs.org/neo4j-driver-core/-/neo4j-driver-core-5.28.2.tgz", "integrity": "sha512-fBMk4Ox379oOz4FcfdS6ZOxsTEypjkcAelNm9LcWQZ981xCdOnGMzlWL+qXECvL0qUwRfmZxoqbDlJzuzFrdvw==", + "license": "Apache-2.0", "peer": true }, "node_modules/node-abi": { - "version": "3.80.0", - "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.80.0.tgz", - "integrity": "sha512-LyPuZJcI9HVwzXK1GPxWNzrr+vr8Hp/3UqlmWxxh8p54U1ZbclOqbSog9lWHaCX+dBaiGi6n/hIX+mKu74GmPA==", + "version": "3.85.0", + "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.85.0.tgz", + "integrity": "sha512-zsFhmbkAzwhTft6nd3VxcG0cvJsT70rL+BIGHWVq5fi6MwGrHwzqKaxXE+Hl2GmnGItnDKPPkO5/LQqjVkIdFg==", + "license": "MIT", "peer": true, "dependencies": { "semver": "^7.3.5" @@ -3194,6 +3479,7 @@ "version": "7.1.1", "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-7.1.1.tgz", "integrity": "sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==", + "license": "MIT", "peer": true }, "node_modules/node-domexception": { @@ -3211,6 +3497,7 @@ "url": "https://paypal.me/jimmywarting" } ], + "license": "MIT", "engines": { "node": ">=10.5.0" } @@ -3219,6 +3506,7 @@ "version": "3.3.2", "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-3.3.2.tgz", "integrity": "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==", + "license": "MIT", "dependencies": { "data-uri-to-buffer": "^4.0.0", "fetch-blob": "^3.1.4", @@ -3236,6 +3524,7 @@ "version": "8.4.1", "resolved": "https://registry.npmjs.org/node-gyp/-/node-gyp-8.4.1.tgz", "integrity": "sha512-olTJRgUtAb/hOXG0E93wZDs5YiJlgbXxTwQAFHyNlRsXQnYzUaF2aGgujZbw+hR8aF4ZG/rST57bWMWD16jr9w==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -3261,6 +3550,7 @@ "version": "1.1.12", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -3273,6 +3563,7 @@ "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", "deprecated": "Glob versions prior to v9 are no longer supported", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -3294,6 +3585,7 @@ "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -3308,6 +3600,7 @@ "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", "deprecated": "Rimraf versions prior to v4 are no longer supported", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -3324,6 +3617,7 @@ "version": "5.0.0", "resolved": "https://registry.npmjs.org/nopt/-/nopt-5.0.0.tgz", "integrity": "sha512-Tbj67rffqceeLpcRXrT7vKAN8CwfPeIBgM7E6iBkmKLV7bEMwpGgYLGv0jACUsECaa/vuxP0IjEont6umdMgtQ==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -3341,6 +3635,7 @@ "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-6.0.2.tgz", "integrity": "sha512-/vBvz5Jfr9dT/aFWd0FIRf+T/Q2WBsLENygUaFUqstqsycmZAP/t5BvFJTK0viFmSUxiUKTUplWy5vt+rvKIxg==", "deprecated": "This package is no longer supported.", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -3357,6 +3652,7 @@ "version": "4.1.1", "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "license": "MIT", "engines": { "node": ">=0.10.0" } @@ -3365,6 +3661,7 @@ "version": "1.13.4", "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "license": "MIT", "engines": { "node": ">= 0.4" }, @@ -3376,12 +3673,14 @@ "version": "1.1.2", "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz", "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==", + "license": "MIT", "peer": true }, "node_modules/ollama": { "version": "0.5.18", "resolved": "https://registry.npmjs.org/ollama/-/ollama-0.5.18.tgz", "integrity": "sha512-lTFqTf9bo7Cd3hpF6CviBe/DEhewjoZYd9N/uCe7O20qYTvGqrNOFOBDj3lbZgFWHUgDv5EeyusYxsZSLS8nvg==", + "license": "MIT", "peer": true, "dependencies": { "whatwg-fetch": "^3.6.20" @@ -3391,6 +3690,7 @@ "version": "2.4.1", "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "license": "MIT", "dependencies": { "ee-first": "1.1.1" }, @@ -3402,6 +3702,7 @@ "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "license": "ISC", "peer": true, "dependencies": { "wrappy": "1" @@ -3411,6 +3712,7 @@ "version": "4.104.0", "resolved": "https://registry.npmjs.org/openai/-/openai-4.104.0.tgz", "integrity": "sha512-p99EFNsA/yX6UhVO93f5kJsDRLAg+CTA2RBqdHK4RtK8u5IJw32Hyb2dTGKbnnFmnuoBv5r7Z2CURI9sGZpSuA==", + "license": "Apache-2.0", "dependencies": { "@types/node": "^18.11.18", "@types/node-fetch": "^2.6.4", @@ -3440,6 +3742,7 @@ "version": "2.7.0", "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "license": "MIT", "dependencies": { "whatwg-url": "^5.0.0" }, @@ -3459,6 +3762,7 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", "integrity": "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==", + "license": "MIT", "peer": true, "engines": { "node": ">=4" @@ -3468,6 +3772,7 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -3484,6 +3789,7 @@ "version": "6.6.2", "resolved": "https://registry.npmjs.org/p-queue/-/p-queue-6.6.2.tgz", "integrity": "sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==", + "license": "MIT", "peer": true, "dependencies": { "eventemitter3": "^4.0.4", @@ -3500,6 +3806,7 @@ "version": "4.6.2", "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", + "license": "MIT", "peer": true, "dependencies": { "@types/retry": "0.12.0", @@ -3513,6 +3820,7 @@ "version": "3.2.0", "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-3.2.0.tgz", "integrity": "sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==", + "license": "MIT", "peer": true, "dependencies": { "p-finally": "^1.0.0" @@ -3525,18 +3833,21 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", + "license": "BlueOak-1.0.0", "peer": true }, "node_modules/packet-reader": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/packet-reader/-/packet-reader-1.0.0.tgz", "integrity": "sha512-HAKu/fG3HpHFO0AA8WE8q2g+gBJaZ9MG7fcKk+IJPLTGAD6Psw4443l+9DGRbOIh3/aXr7Phy0TjilYivJo5XQ==", + "license": "MIT", "peer": true }, "node_modules/parseurl": { "version": "1.3.3", "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "license": "MIT", "engines": { "node": ">= 0.8" } @@ -3545,6 +3856,7 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "license": "MIT", "optional": true, "peer": true, "engines": { @@ -3555,6 +3867,7 @@ "version": "3.1.1", "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "license": "MIT", "peer": true, "engines": { "node": ">=8" @@ -3564,6 +3877,7 @@ "version": "1.11.1", "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "license": "BlueOak-1.0.0", "peer": true, "dependencies": { "lru-cache": "^10.2.0", @@ -3579,12 +3893,14 @@ "node_modules/path-to-regexp": { "version": "0.1.12", "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz", - "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==" + "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==", + "license": "MIT" }, "node_modules/pg": { "version": "8.11.3", "resolved": "https://registry.npmjs.org/pg/-/pg-8.11.3.tgz", "integrity": "sha512-+9iuvG8QfaaUrrph+kpF24cXkH1YOOUeArRNYIxq1viYHZagBxrTno7cecY1Fa44tJeZvaoG+Djpkc3JwehN5g==", + "license": "MIT", "peer": true, "dependencies": { "buffer-writer": "2.0.0", @@ -3614,6 +3930,7 @@ "version": "1.2.7", "resolved": "https://registry.npmjs.org/pg-cloudflare/-/pg-cloudflare-1.2.7.tgz", "integrity": "sha512-YgCtzMH0ptvZJslLM1ffsY4EuGaU0cx4XSdXLRFae8bPP4dS5xL1tNB3k2o/N64cHJpwU7dxKli/nZ2lUa5fLg==", + "license": "MIT", "optional": true, "peer": true }, @@ -3621,12 +3938,14 @@ "version": "2.9.1", "resolved": "https://registry.npmjs.org/pg-connection-string/-/pg-connection-string-2.9.1.tgz", "integrity": "sha512-nkc6NpDcvPVpZXxrreI/FOtX3XemeLl8E0qFr6F2Lrm/I8WOnaWNhIPK2Z7OHpw7gh5XJThi6j6ppgNoaT1w4w==", + "license": "MIT", "peer": true }, "node_modules/pg-int8": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/pg-int8/-/pg-int8-1.0.1.tgz", "integrity": "sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==", + "license": "ISC", "peer": true, "engines": { "node": ">=4.0.0" @@ -3636,6 +3955,7 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/pg-numeric/-/pg-numeric-1.0.2.tgz", "integrity": "sha512-BM/Thnrw5jm2kKLE5uJkXqqExRUY/toLHda65XgFTBTFYZyopbKjBe29Ii3RbkvlsMoFwD+tHeGaCjjv0gHlyw==", + "license": "ISC", "peer": true, "engines": { "node": ">=4" @@ -3645,6 +3965,7 @@ "version": "3.10.1", "resolved": "https://registry.npmjs.org/pg-pool/-/pg-pool-3.10.1.tgz", "integrity": "sha512-Tu8jMlcX+9d8+QVzKIvM/uJtp07PKr82IUOYEphaWcoBhIYkoHpLXN3qO59nAI11ripznDsEzEv8nUxBVWajGg==", + "license": "MIT", "peer": true, "peerDependencies": { "pg": ">=8.0" @@ -3654,12 +3975,14 @@ "version": "1.10.3", "resolved": "https://registry.npmjs.org/pg-protocol/-/pg-protocol-1.10.3.tgz", "integrity": "sha512-6DIBgBQaTKDJyxnXaLiLR8wBpQQcGWuAESkRBX/t6OwA8YsqP+iVSiond2EDy6Y/dsGk8rh/jtax3js5NeV7JQ==", + "license": "MIT", "peer": true }, "node_modules/pg-types": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/pg-types/-/pg-types-4.1.0.tgz", "integrity": "sha512-o2XFanIMy/3+mThw69O8d4n1E5zsLhdO+OPqswezu7Z5ekP4hYDqlDjlmOpYMbzY2Br0ufCwJLdDIXeNVwcWFg==", + "license": "MIT", "peer": true, "dependencies": { "pg-int8": "1.0.1", @@ -3678,6 +4001,7 @@ "version": "2.2.0", "resolved": "https://registry.npmjs.org/pg-types/-/pg-types-2.2.0.tgz", "integrity": "sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==", + "license": "MIT", "peer": true, "dependencies": { "pg-int8": "1.0.1", @@ -3694,6 +4018,7 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/postgres-array/-/postgres-array-2.0.0.tgz", "integrity": "sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==", + "license": "MIT", "peer": true, "engines": { "node": ">=4" @@ -3703,6 +4028,7 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/postgres-bytea/-/postgres-bytea-1.0.0.tgz", "integrity": "sha512-xy3pmLuQqRBZBXDULy7KbaitYqLcmxigw14Q5sj8QBVLqEwXfeybIKVWiqAXTlcvdvb0+xkOtDbfQMOf4lST1w==", + "license": "MIT", "peer": true, "engines": { "node": ">=0.10.0" @@ -3712,6 +4038,7 @@ "version": "1.0.7", "resolved": "https://registry.npmjs.org/postgres-date/-/postgres-date-1.0.7.tgz", "integrity": "sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q==", + "license": "MIT", "peer": true, "engines": { "node": ">=0.10.0" @@ -3721,6 +4048,7 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/postgres-interval/-/postgres-interval-1.2.0.tgz", "integrity": "sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==", + "license": "MIT", "peer": true, "dependencies": { "xtend": "^4.0.0" @@ -3733,6 +4061,7 @@ "version": "1.0.5", "resolved": "https://registry.npmjs.org/pgpass/-/pgpass-1.0.5.tgz", "integrity": "sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug==", + "license": "MIT", "peer": true, "dependencies": { "split2": "^4.1.0" @@ -3742,12 +4071,14 @@ "version": "1.1.1", "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC", "peer": true }, "node_modules/picomatch": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "license": "MIT", "peer": true, "engines": { "node": ">=8.6" @@ -3760,6 +4091,7 @@ "version": "3.0.4", "resolved": "https://registry.npmjs.org/postgres-array/-/postgres-array-3.0.4.tgz", "integrity": "sha512-nAUSGfSDGOaOAEGwqsRY27GPOea7CNipJPOA7lPbdEpx5Kg3qzdP0AaWC5MlhTWV9s4hFX39nomVZ+C4tnGOJQ==", + "license": "MIT", "peer": true, "engines": { "node": ">=12" @@ -3769,6 +4101,7 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/postgres-bytea/-/postgres-bytea-3.0.0.tgz", "integrity": "sha512-CNd4jim9RFPkObHSjVHlVrxoVQXz7quwNFpz7RY1okNNme49+sVyiTvTRobiLV548Hx/hb1BG+iE7h9493WzFw==", + "license": "MIT", "peer": true, "dependencies": { "obuf": "~1.1.2" @@ -3781,6 +4114,7 @@ "version": "2.1.0", "resolved": "https://registry.npmjs.org/postgres-date/-/postgres-date-2.1.0.tgz", "integrity": "sha512-K7Juri8gtgXVcDfZttFKVmhglp7epKb1K4pgrkLxehjqkrgPhfG6OO8LHLkfaqkbpjNRnra018XwAr1yQFWGcA==", + "license": "MIT", "peer": true, "engines": { "node": ">=12" @@ -3790,6 +4124,7 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/postgres-interval/-/postgres-interval-3.0.0.tgz", "integrity": "sha512-BSNDnbyZCXSxgA+1f5UU2GmwhoI0aU5yMxRGO8CdFEcY2BQF9xm/7MqKnYoM1nJDk8nONNWDk9WeSmePFhQdlw==", + "license": "MIT", "peer": true, "engines": { "node": ">=12" @@ -3799,12 +4134,14 @@ "version": "1.1.4", "resolved": "https://registry.npmjs.org/postgres-range/-/postgres-range-1.1.4.tgz", "integrity": "sha512-i/hbxIE9803Alj/6ytL7UHQxRvZkI9O4Sy+J3HGc4F4oo/2eQAjTSNJ0bfxyse3bH0nuVesCk+3IRLaMtG3H6w==", + "license": "MIT", "peer": true }, "node_modules/prebuild-install": { "version": "7.1.3", "resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.3.tgz", "integrity": "sha512-8Mf2cbV7x1cXPUILADGI3wuhfqWvtiLA1iclTDbFRZkgRQS0NqsPZphna9V+HyTEadheuPmjaJMsbzKQFOzLug==", + "license": "MIT", "peer": true, "dependencies": { "detect-libc": "^2.0.0", @@ -3831,6 +4168,7 @@ "version": "29.7.0", "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "license": "MIT", "peer": true, "dependencies": { "@jest/schemas": "^29.6.3", @@ -3845,6 +4183,7 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/promise-inflight/-/promise-inflight-1.0.1.tgz", "integrity": "sha512-6zWPyEOFaQBJYcGMHBKTKJ3u6TBsnMFOIZSa6ce1e/ZrrsOlnHRHbabMjLiBYKp+n44X9eUI6VUPaukCXHuG4g==", + "license": "ISC", "optional": true, "peer": true }, @@ -3852,6 +4191,7 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/promise-retry/-/promise-retry-2.0.1.tgz", "integrity": "sha512-y+WKFlBR8BGXnsNlIHFGPZmyDf3DFMoLhaflAnyZgV6rG6xu+JwesTo2Q9R6XwYmtmwAFCkAk3e35jEdoeh/3g==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -3866,6 +4206,7 @@ "version": "0.12.0", "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz", "integrity": "sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow==", + "license": "MIT", "optional": true, "peer": true, "engines": { @@ -3876,6 +4217,7 @@ "version": "2.0.7", "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "license": "MIT", "dependencies": { "forwarded": "0.2.0", "ipaddr.js": "1.9.1" @@ -3887,12 +4229,14 @@ "node_modules/proxy-from-env": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", - "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", + "license": "MIT" }, "node_modules/pump": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.3.tgz", "integrity": "sha512-todwxLMY7/heScKmntwQG8CXVkWUOdYxIvY2s0VWAAMh/nd8SoYiRaKjlr7+iCs984f2P8zvrfWcDDYVb73NfA==", + "license": "MIT", "peer": true, "dependencies": { "end-of-stream": "^1.1.0", @@ -3903,6 +4247,7 @@ "version": "6.13.0", "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", + "license": "BSD-3-Clause", "dependencies": { "side-channel": "^1.0.6" }, @@ -3917,6 +4262,7 @@ "version": "1.2.1", "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -3925,6 +4271,7 @@ "version": "2.5.2", "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", + "license": "MIT", "dependencies": { "bytes": "3.1.2", "http-errors": "2.0.0", @@ -3939,6 +4286,7 @@ "version": "1.2.8", "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "license": "(BSD-2-Clause OR MIT OR Apache-2.0)", "peer": true, "dependencies": { "deep-extend": "^0.6.0", @@ -3954,12 +4302,14 @@ "version": "18.3.1", "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "license": "MIT", "peer": true }, "node_modules/readable-stream": { "version": "3.6.2", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "license": "MIT", "peer": true, "dependencies": { "inherits": "^2.0.3", @@ -3974,7 +4324,11 @@ "version": "4.7.1", "resolved": "https://registry.npmjs.org/redis/-/redis-4.7.1.tgz", "integrity": "sha512-S1bJDnqLftzHXHP8JsT5II/CtHWQrASX5K96REjWjlmWKrviSOLWmM7QnRLstAWsu1VBBV1ffV6DzCvxNP0UJQ==", + "license": "MIT", "peer": true, + "workspaces": [ + "./packages/*" + ], "dependencies": { "@redis/bloom": "1.2.0", "@redis/client": "1.6.1", @@ -3988,6 +4342,7 @@ "version": "0.13.1", "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", + "license": "MIT", "peer": true, "engines": { "node": ">= 4" @@ -3997,6 +4352,7 @@ "version": "5.0.10", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-5.0.10.tgz", "integrity": "sha512-l0OE8wL34P4nJH/H2ffoaniAokM2qSmrtXHmlpvYr5AVVX8msAyW0l8NVJFDxlSK4u3Uh/f41cQheDVdnYijwQ==", + "license": "ISC", "peer": true, "dependencies": { "glob": "^10.3.7" @@ -4012,6 +4368,7 @@ "version": "7.8.2", "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.2.tgz", "integrity": "sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==", + "license": "Apache-2.0", "peer": true, "dependencies": { "tslib": "^2.1.0" @@ -4034,17 +4391,20 @@ "type": "consulting", "url": "https://feross.org/support" } - ] + ], + "license": "MIT" }, "node_modules/safer-buffer": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "license": "MIT" }, "node_modules/semver": { "version": "7.7.3", "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", + "license": "ISC", "peer": true, "bin": { "semver": "bin/semver.js" @@ -4057,6 +4417,7 @@ "version": "0.19.0", "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", + "license": "MIT", "dependencies": { "debug": "2.6.9", "depd": "2.0.0", @@ -4080,6 +4441,7 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "license": "MIT", "engines": { "node": ">= 0.8" } @@ -4087,12 +4449,14 @@ "node_modules/send/node_modules/ms": { "version": "2.1.3", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" }, "node_modules/serve-static": { "version": "1.16.2", "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz", "integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==", + "license": "MIT", "dependencies": { "encodeurl": "~2.0.0", "escape-html": "~1.0.3", @@ -4107,18 +4471,21 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==", + "license": "ISC", "optional": true, "peer": true }, "node_modules/setprototypeof": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", - "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", + "license": "ISC" }, "node_modules/shebang-command": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "license": "MIT", "peer": true, "dependencies": { "shebang-regex": "^3.0.0" @@ -4131,6 +4498,7 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "license": "MIT", "peer": true, "engines": { "node": ">=8" @@ -4140,6 +4508,7 @@ "version": "1.1.0", "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "license": "MIT", "dependencies": { "es-errors": "^1.3.0", "object-inspect": "^1.13.3", @@ -4158,6 +4527,7 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "license": "MIT", "dependencies": { "es-errors": "^1.3.0", "object-inspect": "^1.13.3" @@ -4173,6 +4543,7 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "license": "MIT", "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", @@ -4190,6 +4561,7 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "license": "MIT", "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", @@ -4208,6 +4580,7 @@ "version": "4.1.0", "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "license": "ISC", "peer": true, "engines": { "node": ">=14" @@ -4234,6 +4607,7 @@ "url": "https://feross.org/support" } ], + "license": "MIT", "peer": true }, "node_modules/simple-get": { @@ -4254,6 +4628,7 @@ "url": "https://feross.org/support" } ], + "license": "MIT", "peer": true, "dependencies": { "decompress-response": "^6.0.0", @@ -4265,12 +4640,14 @@ "version": "1.1.2", "resolved": "https://registry.npmjs.org/simple-wcswidth/-/simple-wcswidth-1.1.2.tgz", "integrity": "sha512-j7piyCjAeTDSjzTSQ7DokZtMNwNlEAyxqSZeCS+CXH7fJ4jx3FuJ/mTW3mE+6JLs4VJBbcll0Kjn+KXI5t21Iw==", + "license": "MIT", "peer": true }, "node_modules/slash": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "license": "MIT", "peer": true, "engines": { "node": ">=8" @@ -4280,6 +4657,7 @@ "version": "4.2.0", "resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.2.0.tgz", "integrity": "sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==", + "license": "MIT", "optional": true, "peer": true, "engines": { @@ -4291,6 +4669,7 @@ "version": "2.8.7", "resolved": "https://registry.npmjs.org/socks/-/socks-2.8.7.tgz", "integrity": "sha512-HLpt+uLy/pxB+bum/9DzAgiKS8CX1EvbWxI4zlmgGCExImLdiad2iCwXT5Z4c9c3Eq8rP2318mPW2c+QbtjK8A==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -4306,6 +4685,7 @@ "version": "6.2.1", "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-6.2.1.tgz", "integrity": "sha512-a6KW9G+6B3nWZ1yB8G7pJwL3ggLy1uTzKAgCb7ttblwqdz9fMGJUuTy3uFzEP48FAs9FLILlmzDlE2JJhVQaXQ==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -4321,6 +4701,7 @@ "version": "6.0.2", "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -4334,6 +4715,7 @@ "version": "4.4.3", "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -4352,6 +4734,7 @@ "version": "2.1.3", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT", "optional": true, "peer": true }, @@ -4359,6 +4742,7 @@ "version": "4.2.0", "resolved": "https://registry.npmjs.org/split2/-/split2-4.2.0.tgz", "integrity": "sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==", + "license": "ISC", "peer": true, "engines": { "node": ">= 10.x" @@ -4369,6 +4753,7 @@ "resolved": "https://registry.npmjs.org/sqlite3/-/sqlite3-5.1.7.tgz", "integrity": "sha512-GGIyOiFaG+TUra3JIfkI/zGP8yZYLPQ0pl1bH+ODjiX57sPhrLU5sQJn1y9bDKZUFYkX1crlrPfSYt0BKKdkog==", "hasInstallScript": true, + "license": "BSD-3-Clause", "peer": true, "dependencies": { "bindings": "^1.5.0", @@ -4392,6 +4777,7 @@ "version": "8.0.1", "resolved": "https://registry.npmjs.org/ssri/-/ssri-8.0.1.tgz", "integrity": "sha512-97qShzy1AiyxvPNIkLWoGua7xoQzzPjQ0HAH4B0rWKo7SZ6USuPcrUiAFrws0UH8RrbWmgq3LMTObhPIHbbBeQ==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -4405,6 +4791,7 @@ "version": "3.3.6", "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -4418,6 +4805,7 @@ "version": "2.0.6", "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", + "license": "MIT", "peer": true, "dependencies": { "escape-string-regexp": "^2.0.0" @@ -4430,6 +4818,7 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "license": "MIT", "engines": { "node": ">= 0.8" } @@ -4438,6 +4827,7 @@ "version": "1.3.0", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "license": "MIT", "peer": true, "dependencies": { "safe-buffer": "~5.2.0" @@ -4447,6 +4837,7 @@ "version": "5.1.2", "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "license": "MIT", "peer": true, "dependencies": { "eastasianwidth": "^0.2.0", @@ -4465,6 +4856,7 @@ "version": "4.2.3", "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", "peer": true, "dependencies": { "emoji-regex": "^8.0.0", @@ -4479,6 +4871,7 @@ "version": "5.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", "peer": true, "engines": { "node": ">=8" @@ -4488,12 +4881,14 @@ "version": "8.0.0", "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT", "peer": true }, "node_modules/string-width-cjs/node_modules/strip-ansi": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", "peer": true, "dependencies": { "ansi-regex": "^5.0.1" @@ -4506,6 +4901,7 @@ "version": "7.1.2", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "license": "MIT", "peer": true, "dependencies": { "ansi-regex": "^6.0.1" @@ -4522,6 +4918,7 @@ "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", "peer": true, "dependencies": { "ansi-regex": "^5.0.1" @@ -4534,6 +4931,7 @@ "version": "5.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", "peer": true, "engines": { "node": ">=8" @@ -4543,6 +4941,7 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", + "license": "MIT", "peer": true, "engines": { "node": ">=0.10.0" @@ -4552,6 +4951,7 @@ "version": "7.2.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "license": "MIT", "peer": true, "dependencies": { "has-flag": "^4.0.0" @@ -4564,6 +4964,7 @@ "version": "6.2.1", "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", + "license": "ISC", "peer": true, "dependencies": { "chownr": "^2.0.0", @@ -4581,6 +4982,7 @@ "version": "2.1.4", "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.4.tgz", "integrity": "sha512-mDAjwmZdh7LTT6pNleZ05Yt65HC3E+NiQzl672vQG38jIrehtJk/J3mNwIg+vShQPcLF/LV7CMnDW6vjj6sfYQ==", + "license": "MIT", "peer": true, "dependencies": { "chownr": "^1.1.1", @@ -4593,12 +4995,14 @@ "version": "1.1.4", "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==", + "license": "ISC", "peer": true }, "node_modules/tar-stream": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz", "integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==", + "license": "MIT", "peer": true, "dependencies": { "bl": "^4.0.3", @@ -4615,6 +5019,7 @@ "version": "5.0.0", "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", + "license": "ISC", "peer": true, "engines": { "node": ">=8" @@ -4624,6 +5029,7 @@ "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "license": "MIT", "peer": true, "dependencies": { "is-number": "^7.0.0" @@ -4636,6 +5042,7 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "license": "MIT", "engines": { "node": ">=0.6" } @@ -4643,18 +5050,21 @@ "node_modules/tr46": { "version": "0.0.3", "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", - "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", + "license": "MIT" }, "node_modules/tslib": { "version": "2.8.1", "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD", "peer": true }, "node_modules/tunnel-agent": { "version": "0.6.0", "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", + "license": "Apache-2.0", "peer": true, "dependencies": { "safe-buffer": "^5.0.1" @@ -4667,6 +5077,7 @@ "version": "1.6.18", "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "license": "MIT", "dependencies": { "media-typer": "0.3.0", "mime-types": "~2.1.24" @@ -4679,6 +5090,7 @@ "version": "5.9.3", "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "license": "Apache-2.0", "peer": true, "bin": { "tsc": "bin/tsc", @@ -4692,6 +5104,7 @@ "version": "5.28.5", "resolved": "https://registry.npmjs.org/undici/-/undici-5.28.5.tgz", "integrity": "sha512-zICwjrDrcrUE0pyyJc1I2QzBkLM8FINsgOrt6WjA+BgajVq9Nxu2PbFFXUrAggLfDXlZGZBVZYw7WNV5KiBiBA==", + "license": "MIT", "peer": true, "dependencies": { "@fastify/busboy": "^2.0.0" @@ -4703,12 +5116,14 @@ "node_modules/undici-types": { "version": "5.26.5", "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", - "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", + "license": "MIT" }, "node_modules/unique-filename": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-1.1.1.tgz", "integrity": "sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -4719,6 +5134,7 @@ "version": "2.0.2", "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-2.0.2.tgz", "integrity": "sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -4729,6 +5145,7 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "license": "MIT", "engines": { "node": ">= 0.8" } @@ -4737,12 +5154,14 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "license": "MIT", "peer": true }, "node_modules/utils-merge": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", + "license": "MIT", "engines": { "node": ">= 0.4.0" } @@ -4755,6 +5174,7 @@ "https://github.com/sponsors/broofa", "https://github.com/sponsors/ctavan" ], + "license": "MIT", "peer": true, "bin": { "uuid": "dist/bin/uuid" @@ -4764,6 +5184,7 @@ "version": "1.1.2", "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "license": "MIT", "engines": { "node": ">= 0.8" } @@ -4772,6 +5193,7 @@ "version": "4.0.0-beta.3", "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz", "integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==", + "license": "MIT", "engines": { "node": ">= 14" } @@ -4779,18 +5201,21 @@ "node_modules/webidl-conversions": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", - "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", + "license": "BSD-2-Clause" }, "node_modules/whatwg-fetch": { "version": "3.6.20", "resolved": "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-3.6.20.tgz", "integrity": "sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg==", + "license": "MIT", "peer": true }, "node_modules/whatwg-url": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "license": "MIT", "dependencies": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" @@ -4800,6 +5225,7 @@ "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "license": "ISC", "peer": true, "dependencies": { "isexe": "^2.0.0" @@ -4815,6 +5241,7 @@ "version": "1.1.5", "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.5.tgz", "integrity": "sha512-eDMORYaPNZ4sQIuuYPDHdQvf4gyCF9rEEV/yPxGfwPkRodwEgiMUUXTx/dex+Me0wxx53S+NgUHaP7y3MGlDmg==", + "license": "ISC", "optional": true, "peer": true, "dependencies": { @@ -4825,6 +5252,7 @@ "version": "5.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", "optional": true, "peer": true, "engines": { @@ -4835,6 +5263,7 @@ "version": "8.0.0", "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT", "optional": true, "peer": true }, @@ -4842,6 +5271,7 @@ "version": "4.2.3", "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -4857,6 +5287,7 @@ "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", "optional": true, "peer": true, "dependencies": { @@ -4870,6 +5301,7 @@ "version": "8.1.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "license": "MIT", "peer": true, "dependencies": { "ansi-styles": "^6.1.0", @@ -4888,6 +5320,7 @@ "version": "7.0.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "license": "MIT", "peer": true, "dependencies": { "ansi-styles": "^4.0.0", @@ -4905,6 +5338,7 @@ "version": "5.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", "peer": true, "engines": { "node": ">=8" @@ -4914,6 +5348,7 @@ "version": "4.3.0", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", "peer": true, "dependencies": { "color-convert": "^2.0.1" @@ -4929,12 +5364,14 @@ "version": "8.0.0", "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT", "peer": true }, "node_modules/wrap-ansi-cjs/node_modules/string-width": { "version": "4.2.3", "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", "peer": true, "dependencies": { "emoji-regex": "^8.0.0", @@ -4949,6 +5386,7 @@ "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", "peer": true, "dependencies": { "ansi-regex": "^5.0.1" @@ -4961,6 +5399,7 @@ "version": "6.2.3", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", + "license": "MIT", "peer": true, "engines": { "node": ">=12" @@ -4973,12 +5412,14 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "license": "ISC", "peer": true }, "node_modules/ws": { "version": "8.18.3", "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz", "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==", + "license": "MIT", "peer": true, "engines": { "node": ">=10.0.0" @@ -5000,6 +5441,7 @@ "version": "4.0.2", "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "license": "MIT", "peer": true, "engines": { "node": ">=0.4" @@ -5009,3791 +5451,27 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "license": "ISC", "peer": true }, "node_modules/zod": { "version": "3.25.76", "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", + "license": "MIT", "funding": { "url": "https://github.com/sponsors/colinhacks" } }, "node_modules/zod-to-json-schema": { - "version": "3.24.6", - "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.24.6.tgz", - "integrity": "sha512-h/z3PKvcTcTetyjl1fkj79MHNEjm+HpD6NXheWjzOekY7kV+lwDYnHw+ivHkijnCSMz1yJaWBD9vu/Fcmk+vEg==", + "version": "3.25.0", + "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.25.0.tgz", + "integrity": "sha512-HvWtU2UG41LALjajJrML6uQejQhNJx+JBO9IflpSja4R03iNWfKXrj6W2h7ljuLyc1nKS+9yDyL/9tD1U/yBnQ==", + "license": "ISC", "peer": true, "peerDependencies": { - "zod": "^3.24.1" + "zod": "^3.25 || ^4" } } - }, - "dependencies": { - "@anthropic-ai/sdk": { - "version": "0.40.1", - "resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.40.1.tgz", - "integrity": "sha512-DJMWm8lTEM9Lk/MSFL+V+ugF7jKOn0M2Ujvb5fN8r2nY14aHbGPZ1k6sgjL+tpJ3VuOGJNG+4R83jEpOuYPv8w==", - "peer": true, - "requires": { - "@types/node": "^18.11.18", - "@types/node-fetch": "^2.6.4", - "abort-controller": "^3.0.0", - "agentkeepalive": "^4.2.1", - "form-data-encoder": "1.7.2", - "formdata-node": "^4.3.2", - "node-fetch": "^2.6.7" - }, - "dependencies": { - "node-fetch": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", - "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", - "peer": true, - "requires": { - "whatwg-url": "^5.0.0" - } - } - } - }, - "@babel/code-frame": { - "version": "7.27.1", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", - "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", - "peer": true, - "requires": { - "@babel/helper-validator-identifier": "^7.27.1", - "js-tokens": "^4.0.0", - "picocolors": "^1.1.1" - } - }, - "@babel/helper-validator-identifier": { - "version": "7.28.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", - "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", - "peer": true - }, - "@cfworker/json-schema": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/@cfworker/json-schema/-/json-schema-4.1.1.tgz", - "integrity": "sha512-gAmrUZSGtKc3AiBL71iNWxDsyUC5uMaKKGdvzYsBoTW/xi42JQHl7eKV2OYzCUqvc+D2RCcf7EXY2iCyFIk6og==", - "peer": true - }, - "@cloudflare/workers-types": { - "version": "4.20251014.0", - "resolved": "https://registry.npmjs.org/@cloudflare/workers-types/-/workers-types-4.20251014.0.tgz", - "integrity": "sha512-tEW98J/kOa0TdylIUOrLKRdwkUw0rvvYVlo+Ce0mqRH3c8kSoxLzUH9gfCvwLe0M89z1RkzFovSKAW2Nwtyn3w==", - "peer": true - }, - "@fastify/busboy": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/@fastify/busboy/-/busboy-2.1.1.tgz", - "integrity": "sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA==", - "peer": true - }, - "@gar/promisify": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@gar/promisify/-/promisify-1.1.3.tgz", - "integrity": "sha512-k2Ty1JcVojjJFwrg/ThKi2ujJ7XNLYaFGNB/bWT9wGR+oSMJHMa5w+CUq6p/pVrKeNNgA7pCqEcjSnHVoqJQFw==", - "optional": true, - "peer": true - }, - "@google/genai": { - "version": "1.28.0", - "resolved": "https://registry.npmjs.org/@google/genai/-/genai-1.28.0.tgz", - "integrity": "sha512-0pfZ1EWQsM9kINsL+mFKJvpzM6NRHS9t360S1MzKq4JtIwTj/RbsPpC/K5wpKiPy9PC+J+bsz/9gvaL51++KrA==", - "peer": true, - "requires": { - "google-auth-library": "^10.3.0", - "ws": "^8.18.0" - } - }, - "@isaacs/cliui": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", - "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", - "peer": true, - "requires": { - "string-width": "^5.1.2", - "string-width-cjs": "npm:string-width@^4.2.0", - "strip-ansi": "^7.0.1", - "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", - "wrap-ansi": "^8.1.0", - "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" - } - }, - "@jest/expect-utils": { - "version": "29.7.0", - "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.7.0.tgz", - "integrity": "sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==", - "peer": true, - "requires": { - "jest-get-type": "^29.6.3" - } - }, - "@jest/schemas": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", - "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", - "peer": true, - "requires": { - "@sinclair/typebox": "^0.27.8" - } - }, - "@jest/types": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", - "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", - "peer": true, - "requires": { - "@jest/schemas": "^29.6.3", - "@types/istanbul-lib-coverage": "^2.0.0", - "@types/istanbul-reports": "^3.0.0", - "@types/node": "*", - "@types/yargs": "^17.0.8", - "chalk": "^4.0.0" - } - }, - "@langchain/core": { - "version": "0.3.79", - "resolved": "https://registry.npmjs.org/@langchain/core/-/core-0.3.79.tgz", - "integrity": "sha512-ZLAs5YMM5N2UXN3kExMglltJrKKoW7hs3KMZFlXUnD7a5DFKBYxPFMeXA4rT+uvTxuJRZPCYX0JKI5BhyAWx4A==", - "peer": true, - "requires": { - "@cfworker/json-schema": "^4.0.2", - "ansi-styles": "^5.0.0", - "camelcase": "6", - "decamelize": "1.2.0", - "js-tiktoken": "^1.0.12", - "langsmith": "^0.3.67", - "mustache": "^4.2.0", - "p-queue": "^6.6.2", - "p-retry": "4", - "uuid": "^10.0.0", - "zod": "^3.25.32", - "zod-to-json-schema": "^3.22.3" - } - }, - "@mistralai/mistralai": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/@mistralai/mistralai/-/mistralai-1.10.0.tgz", - "integrity": "sha512-tdIgWs4Le8vpvPiUEWne6tK0qbVc+jMenujnvTqOjogrJUsCSQhus0tHTU1avDDh5//Rq2dFgP9mWRAdIEoBqg==", - "peer": true, - "requires": { - "zod": "^3.20.0", - "zod-to-json-schema": "^3.24.1" - } - }, - "@npmcli/fs": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@npmcli/fs/-/fs-1.1.1.tgz", - "integrity": "sha512-8KG5RD0GVP4ydEzRn/I4BNDuxDtqVbOdm8675T49OIG/NGhaK0pjPX7ZcDlvKYbA+ulvVK3ztfcF4uBdOxuJbQ==", - "optional": true, - "peer": true, - "requires": { - "@gar/promisify": "^1.0.1", - "semver": "^7.3.5" - } - }, - "@npmcli/move-file": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@npmcli/move-file/-/move-file-1.1.2.tgz", - "integrity": "sha512-1SUf/Cg2GzGDyaf15aR9St9TWlb+XvbZXWpDx8YKs7MLzMH/BCeopv+y9vzrzgkfykCGuWOlSu3mZhj2+FQcrg==", - "optional": true, - "peer": true, - "requires": { - "mkdirp": "^1.0.4", - "rimraf": "^3.0.2" - }, - "dependencies": { - "brace-expansion": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", - "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", - "optional": true, - "peer": true, - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "optional": true, - "peer": true, - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - }, - "minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "optional": true, - "peer": true, - "requires": { - "brace-expansion": "^1.1.7" - } - }, - "rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "optional": true, - "peer": true, - "requires": { - "glob": "^7.1.3" - } - } - } - }, - "@pkgjs/parseargs": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", - "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", - "optional": true, - "peer": true - }, - "@qdrant/js-client-rest": { - "version": "1.13.0", - "resolved": "https://registry.npmjs.org/@qdrant/js-client-rest/-/js-client-rest-1.13.0.tgz", - "integrity": "sha512-bewMtnXlGvhhnfXsp0sLoLXOGvnrCM15z9lNlG0Snp021OedNAnRtKkerjk5vkOcbQWUmJHXYCuxDfcT93aSkA==", - "peer": true, - "requires": { - "@qdrant/openapi-typescript-fetch": "1.2.6", - "@sevinf/maybe": "0.5.0", - "undici": "~5.28.4" - } - }, - "@qdrant/openapi-typescript-fetch": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/@qdrant/openapi-typescript-fetch/-/openapi-typescript-fetch-1.2.6.tgz", - "integrity": "sha512-oQG/FejNpItrxRHoyctYvT3rwGZOnK4jr3JdppO/c78ktDvkWiPXPHNsrDf33K9sZdRb6PR7gi4noIapu5q4HA==", - "peer": true - }, - "@redis/bloom": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@redis/bloom/-/bloom-1.2.0.tgz", - "integrity": "sha512-HG2DFjYKbpNmVXsa0keLHp/3leGJz1mjh09f2RLGGLQZzSHpkmZWuwJbAvo3QcRY8p80m5+ZdXZdYOSBLlp7Cg==", - "peer": true, - "requires": {} - }, - "@redis/client": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/@redis/client/-/client-1.6.1.tgz", - "integrity": "sha512-/KCsg3xSlR+nCK8/8ZYSknYxvXHwubJrU82F3Lm1Fp6789VQ0/3RJKfsmRXjqfaTA++23CvC3hqmqe/2GEt6Kw==", - "peer": true, - "requires": { - "cluster-key-slot": "1.1.2", - "generic-pool": "3.9.0", - "yallist": "4.0.0" - } - }, - "@redis/graph": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@redis/graph/-/graph-1.1.1.tgz", - "integrity": "sha512-FEMTcTHZozZciLRl6GiiIB4zGm5z5F3F6a6FZCyrfxdKOhFlGkiAqlexWMBzCi4DcRoyiOsuLfW+cjlGWyExOw==", - "peer": true, - "requires": {} - }, - "@redis/json": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/@redis/json/-/json-1.0.7.tgz", - "integrity": "sha512-6UyXfjVaTBTJtKNG4/9Z8PSpKE6XgSyEb8iwaqDcy+uKrd/DGYHTWkUdnQDyzm727V7p21WUMhsqz5oy65kPcQ==", - "peer": true, - "requires": {} - }, - "@redis/search": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@redis/search/-/search-1.2.0.tgz", - "integrity": "sha512-tYoDBbtqOVigEDMAcTGsRlMycIIjwMCgD8eR2t0NANeQmgK/lvxNAvYyb6bZDD4frHRhIHkJu2TBRvB0ERkOmw==", - "peer": true, - "requires": {} - }, - "@redis/time-series": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@redis/time-series/-/time-series-1.1.0.tgz", - "integrity": "sha512-c1Q99M5ljsIuc4YdaCwfUEXsofakb9c8+Zse2qxTadu8TalLXuAESzLvFAvNVbkmSlvlzIQOLpBCmWI9wTOt+g==", - "peer": true, - "requires": {} - }, - "@sevinf/maybe": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/@sevinf/maybe/-/maybe-0.5.0.tgz", - "integrity": "sha512-ARhyoYDnY1LES3vYI0fiG6e9esWfTNcXcO6+MPJJXcnyMV3bim4lnFt45VXouV7y82F4x3YH8nOQ6VztuvUiWg==", - "peer": true - }, - "@sinclair/typebox": { - "version": "0.27.8", - "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", - "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", - "peer": true - }, - "@supabase/auth-js": { - "version": "2.78.0", - "resolved": "https://registry.npmjs.org/@supabase/auth-js/-/auth-js-2.78.0.tgz", - "integrity": "sha512-cXDtu1U0LeZj/xfnFoV7yCze37TcbNo8FCxy1FpqhMbB9u9QxxDSW6pA5gm/07Ei7m260Lof4CZx67Cu6DPeig==", - "peer": true, - "requires": { - "@supabase/node-fetch": "2.6.15", - "tslib": "2.8.1" - } - }, - "@supabase/functions-js": { - "version": "2.78.0", - "resolved": "https://registry.npmjs.org/@supabase/functions-js/-/functions-js-2.78.0.tgz", - "integrity": "sha512-t1jOvArBsOINyqaRee1xJ3gryXLvkBzqnKfi6q3YRzzhJbGS6eXz0pXR5fqmJeB01fLC+1njpf3YhMszdPEF7g==", - "peer": true, - "requires": { - "@supabase/node-fetch": "2.6.15", - "tslib": "2.8.1" - } - }, - "@supabase/node-fetch": { - "version": "2.6.15", - "resolved": "https://registry.npmjs.org/@supabase/node-fetch/-/node-fetch-2.6.15.tgz", - "integrity": "sha512-1ibVeYUacxWYi9i0cf5efil6adJ9WRyZBLivgjs+AUpewx1F3xPi7gLgaASI2SmIQxPoCEjAsLAzKPgMJVgOUQ==", - "peer": true, - "requires": { - "whatwg-url": "^5.0.0" - } - }, - "@supabase/postgrest-js": { - "version": "2.78.0", - "resolved": "https://registry.npmjs.org/@supabase/postgrest-js/-/postgrest-js-2.78.0.tgz", - "integrity": "sha512-AwhpYlSvJ+PSnPmIK8sHj7NGDyDENYfQGKrMtpVIEzQA2ApUjgpUGxzXWN4Z0wEtLQsvv7g4y9HVad9Hzo1TNA==", - "peer": true, - "requires": { - "@supabase/node-fetch": "2.6.15", - "tslib": "2.8.1" - } - }, - "@supabase/realtime-js": { - "version": "2.78.0", - "resolved": "https://registry.npmjs.org/@supabase/realtime-js/-/realtime-js-2.78.0.tgz", - "integrity": "sha512-rCs1zmLe7of7hj4s7G9z8rTqzWuNVtmwDr3FiCRCJFawEoa+RQO1xpZGbdeuVvVmKDyVN6b542Okci+117y/LQ==", - "peer": true, - "requires": { - "@supabase/node-fetch": "2.6.15", - "@types/phoenix": "^1.6.6", - "@types/ws": "^8.18.1", - "tslib": "2.8.1", - "ws": "^8.18.2" - } - }, - "@supabase/storage-js": { - "version": "2.78.0", - "resolved": "https://registry.npmjs.org/@supabase/storage-js/-/storage-js-2.78.0.tgz", - "integrity": "sha512-n17P0JbjHOlxqJpkaGFOn97i3EusEKPEbWOpuk1r4t00Wg06B8Z4GUiq0O0n1vUpjiMgJUkLIMuBVp+bEgunzQ==", - "peer": true, - "requires": { - "@supabase/node-fetch": "2.6.15", - "tslib": "2.8.1" - } - }, - "@supabase/supabase-js": { - "version": "2.78.0", - "resolved": "https://registry.npmjs.org/@supabase/supabase-js/-/supabase-js-2.78.0.tgz", - "integrity": "sha512-xYMRNBFmKp2m1gMuwcp/gr/HlfZKqjye1Ib8kJe29XJNsgwsfO/f8skxnWiscFKTlkOKLuBexNgl5L8dzGt6vA==", - "peer": true, - "requires": { - "@supabase/auth-js": "2.78.0", - "@supabase/functions-js": "2.78.0", - "@supabase/node-fetch": "2.6.15", - "@supabase/postgrest-js": "2.78.0", - "@supabase/realtime-js": "2.78.0", - "@supabase/storage-js": "2.78.0" - } - }, - "@tootallnate/once": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-1.1.2.tgz", - "integrity": "sha512-RbzJvlNzmRq5c3O09UipeuXno4tA1FE6ikOjxZK0tuxVv3412l64l5t1W5pj4+rJq9vpkm/kwiR07aZXnsKPxw==", - "optional": true, - "peer": true - }, - "@types/istanbul-lib-coverage": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", - "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==", - "peer": true - }, - "@types/istanbul-lib-report": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", - "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", - "peer": true, - "requires": { - "@types/istanbul-lib-coverage": "*" - } - }, - "@types/istanbul-reports": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", - "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", - "peer": true, - "requires": { - "@types/istanbul-lib-report": "*" - } - }, - "@types/jest": { - "version": "29.5.14", - "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.14.tgz", - "integrity": "sha512-ZN+4sdnLUbo8EVvVc2ao0GFW6oVrQRPn4K2lglySj7APvSrgzxHiNNK99us4WDMi57xxA2yggblIAMNhXOotLQ==", - "peer": true, - "requires": { - "expect": "^29.0.0", - "pretty-format": "^29.0.0" - } - }, - "@types/node": { - "version": "18.19.130", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.130.tgz", - "integrity": "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==", - "requires": { - "undici-types": "~5.26.4" - } - }, - "@types/node-fetch": { - "version": "2.6.13", - "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.13.tgz", - "integrity": "sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw==", - "requires": { - "@types/node": "*", - "form-data": "^4.0.4" - } - }, - "@types/pg": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/@types/pg/-/pg-8.11.0.tgz", - "integrity": "sha512-sDAlRiBNthGjNFfvt0k6mtotoVYVQ63pA8R4EMWka7crawSR60waVYR0HAgmPRs/e2YaeJTD/43OoZ3PFw80pw==", - "peer": true, - "requires": { - "@types/node": "*", - "pg-protocol": "*", - "pg-types": "^4.0.1" - } - }, - "@types/phoenix": { - "version": "1.6.6", - "resolved": "https://registry.npmjs.org/@types/phoenix/-/phoenix-1.6.6.tgz", - "integrity": "sha512-PIzZZlEppgrpoT2QgbnDU+MMzuR6BbCjllj0bM70lWoejMeNJAxCchxnv7J3XFkI8MpygtRpzXrIlmWUBclP5A==", - "peer": true - }, - "@types/retry": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", - "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==", - "peer": true - }, - "@types/sqlite3": { - "version": "3.1.11", - "resolved": "https://registry.npmjs.org/@types/sqlite3/-/sqlite3-3.1.11.tgz", - "integrity": "sha512-KYF+QgxAnnAh7DWPdNDroxkDI3/MspH1NMx6m/N/6fT1G6+jvsw4/ZePt8R8cr7ta58aboeTfYFBDxTJ5yv15w==", - "peer": true, - "requires": { - "@types/node": "*" - } - }, - "@types/stack-utils": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz", - "integrity": "sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==", - "peer": true - }, - "@types/uuid": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-10.0.0.tgz", - "integrity": "sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==", - "peer": true - }, - "@types/ws": { - "version": "8.18.1", - "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.18.1.tgz", - "integrity": "sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==", - "peer": true, - "requires": { - "@types/node": "*" - } - }, - "@types/yargs": { - "version": "17.0.34", - "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.34.tgz", - "integrity": "sha512-KExbHVa92aJpw9WDQvzBaGVE2/Pz+pLZQloT2hjL8IqsZnV62rlPOYvNnLmf/L2dyllfVUOVBj64M0z/46eR2A==", - "peer": true, - "requires": { - "@types/yargs-parser": "*" - } - }, - "@types/yargs-parser": { - "version": "21.0.3", - "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", - "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==", - "peer": true - }, - "abbrev": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", - "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==", - "optional": true, - "peer": true - }, - "abort-controller": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", - "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", - "requires": { - "event-target-shim": "^5.0.0" - } - }, - "accepts": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", - "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", - "requires": { - "mime-types": "~2.1.34", - "negotiator": "0.6.3" - } - }, - "agent-base": { - "version": "7.1.4", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", - "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", - "peer": true - }, - "agentkeepalive": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.6.0.tgz", - "integrity": "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==", - "requires": { - "humanize-ms": "^1.2.1" - } - }, - "aggregate-error": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", - "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", - "optional": true, - "peer": true, - "requires": { - "clean-stack": "^2.0.0", - "indent-string": "^4.0.0" - } - }, - "ansi-regex": { - "version": "6.2.2", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", - "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", - "peer": true - }, - "ansi-styles": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", - "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", - "peer": true - }, - "aproba": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/aproba/-/aproba-2.1.0.tgz", - "integrity": "sha512-tLIEcj5GuR2RSTnxNKdkK0dJ/GrC7P38sUkiDmDuHfsHmbagTFAxDVIBltoklXEVIQ/f14IL8IMJ5pn9Hez1Ew==", - "optional": true, - "peer": true - }, - "are-we-there-yet": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-3.0.1.tgz", - "integrity": "sha512-QZW4EDmGwlYur0Yyf/b2uGucHQMa8aFUP7eu9ddR73vvhFyt4V0Vl3QHPcTNJ8l6qYOBdxgXdnBXQrHilfRQBg==", - "optional": true, - "peer": true, - "requires": { - "delegates": "^1.0.0", - "readable-stream": "^3.6.0" - } - }, - "array-flatten": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", - "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==" - }, - "asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" - }, - "axios": { - "version": "1.7.7", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.7.7.tgz", - "integrity": "sha512-S4kL7XrjgBmvdGut0sN3yJxqYzrDOnivkBiN0OFs6hLiUam3UPvswUo0kqGyhqUZGEOytHyumEdXsAkgCOUf3Q==", - "requires": { - "follow-redirects": "^1.15.6", - "form-data": "^4.0.0", - "proxy-from-env": "^1.1.0" - } - }, - "balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "peer": true - }, - "base-64": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/base-64/-/base-64-0.1.0.tgz", - "integrity": "sha512-Y5gU45svrR5tI2Vt/X9GPd3L0HNIKzGu202EjxrXMpuc2V2CiKgemAbUUsqYmZJvPtCXoUKjNZwBJzsNScUbXA==", - "peer": true - }, - "base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", - "peer": true - }, - "bignumber.js": { - "version": "9.3.1", - "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.3.1.tgz", - "integrity": "sha512-Ko0uX15oIUS7wJ3Rb30Fs6SkVbLmPBAKdlm7q9+ak9bbIeFf0MwuBsQV6z7+X768/cHsfg+WlysDWJcmthjsjQ==", - "peer": true - }, - "bindings": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz", - "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==", - "peer": true, - "requires": { - "file-uri-to-path": "1.0.0" - } - }, - "bl": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", - "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", - "peer": true, - "requires": { - "buffer": "^5.5.0", - "inherits": "^2.0.4", - "readable-stream": "^3.4.0" - }, - "dependencies": { - "buffer": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", - "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", - "peer": true, - "requires": { - "base64-js": "^1.3.1", - "ieee754": "^1.1.13" - } - } - } - }, - "body-parser": { - "version": "1.20.3", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", - "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", - "requires": { - "bytes": "3.1.2", - "content-type": "~1.0.5", - "debug": "2.6.9", - "depd": "2.0.0", - "destroy": "1.2.0", - "http-errors": "2.0.0", - "iconv-lite": "0.4.24", - "on-finished": "2.4.1", - "qs": "6.13.0", - "raw-body": "2.5.2", - "type-is": "~1.6.18", - "unpipe": "1.0.0" - } - }, - "brace-expansion": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", - "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", - "peer": true, - "requires": { - "balanced-match": "^1.0.0" - } - }, - "braces": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", - "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", - "peer": true, - "requires": { - "fill-range": "^7.1.1" - } - }, - "buffer": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", - "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", - "peer": true, - "requires": { - "base64-js": "^1.3.1", - "ieee754": "^1.2.1" - } - }, - "buffer-equal-constant-time": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", - "integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==", - "peer": true - }, - "buffer-writer": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/buffer-writer/-/buffer-writer-2.0.0.tgz", - "integrity": "sha512-a7ZpuTZU1TRtnwyCNW3I5dc0wWNC3VR9S++Ewyk2HHZdrO3CQJqSpd+95Us590V6AL7JqUAH2IwZ/398PmNFgw==", - "peer": true - }, - "bytes": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", - "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==" - }, - "cacache": { - "version": "15.3.0", - "resolved": "https://registry.npmjs.org/cacache/-/cacache-15.3.0.tgz", - "integrity": "sha512-VVdYzXEn+cnbXpFgWs5hTT7OScegHVmLhJIR8Ufqk3iFD6A6j5iSX1KuBTfNEv4tdJWE2PzA6IVFtcLC7fN9wQ==", - "optional": true, - "peer": true, - "requires": { - "@npmcli/fs": "^1.0.0", - "@npmcli/move-file": "^1.0.1", - "chownr": "^2.0.0", - "fs-minipass": "^2.0.0", - "glob": "^7.1.4", - "infer-owner": "^1.0.4", - "lru-cache": "^6.0.0", - "minipass": "^3.1.1", - "minipass-collect": "^1.0.2", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.2", - "mkdirp": "^1.0.3", - "p-map": "^4.0.0", - "promise-inflight": "^1.0.1", - "rimraf": "^3.0.2", - "ssri": "^8.0.1", - "tar": "^6.0.2", - "unique-filename": "^1.1.1" - }, - "dependencies": { - "brace-expansion": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", - "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", - "optional": true, - "peer": true, - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "optional": true, - "peer": true, - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - }, - "lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "optional": true, - "peer": true, - "requires": { - "yallist": "^4.0.0" - } - }, - "minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "optional": true, - "peer": true, - "requires": { - "brace-expansion": "^1.1.7" - } - }, - "minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "optional": true, - "peer": true, - "requires": { - "yallist": "^4.0.0" - } - }, - "rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "optional": true, - "peer": true, - "requires": { - "glob": "^7.1.3" - } - } - } - }, - "call-bind-apply-helpers": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", - "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", - "requires": { - "es-errors": "^1.3.0", - "function-bind": "^1.1.2" - } - }, - "call-bound": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", - "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", - "requires": { - "call-bind-apply-helpers": "^1.0.2", - "get-intrinsic": "^1.3.0" - } - }, - "camelcase": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", - "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", - "peer": true - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "peer": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "peer": true, - "requires": { - "color-convert": "^2.0.1" - } - } - } - }, - "charenc": { - "version": "0.0.2", - "resolved": "https://registry.npmjs.org/charenc/-/charenc-0.0.2.tgz", - "integrity": "sha512-yrLQ/yVUFXkzg7EDQsPieE/53+0RlaWTs+wBrvW36cyilJ2SaDWfl4Yj7MtLTXleV9uEKefbAGUPv2/iWSooRA==", - "peer": true - }, - "chownr": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz", - "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==", - "peer": true - }, - "ci-info": { - "version": "3.9.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", - "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", - "peer": true - }, - "clean-stack": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", - "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", - "optional": true, - "peer": true - }, - "cloudflare": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/cloudflare/-/cloudflare-4.5.0.tgz", - "integrity": "sha512-fPcbPKx4zF45jBvQ0z7PCdgejVAPBBCZxwqk1k7krQNfpM07Cfj97/Q6wBzvYqlWXx/zt1S9+m8vnfCe06umbQ==", - "peer": true, - "requires": { - "@types/node": "^18.11.18", - "@types/node-fetch": "^2.6.4", - "abort-controller": "^3.0.0", - "agentkeepalive": "^4.2.1", - "form-data-encoder": "1.7.2", - "formdata-node": "^4.3.2", - "node-fetch": "^2.6.7" - }, - "dependencies": { - "node-fetch": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", - "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", - "peer": true, - "requires": { - "whatwg-url": "^5.0.0" - } - } - } - }, - "cluster-key-slot": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.2.tgz", - "integrity": "sha512-RMr0FhtfXemyinomL4hrWcYJxmX6deFdCxpJzhDttxgO1+bcCnkk+9drydLVDmAMG7NE6aN/fl4F7ucU/90gAA==", - "peer": true - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "peer": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "peer": true - }, - "color-support": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-support/-/color-support-1.1.3.tgz", - "integrity": "sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg==", - "optional": true, - "peer": true - }, - "combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "requires": { - "delayed-stream": "~1.0.0" - } - }, - "concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", - "optional": true, - "peer": true - }, - "console-control-strings": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz", - "integrity": "sha512-ty/fTekppD2fIwRvnZAVdeOiGd1c7YXEixbgJTNzqcxJWKQnjJ/V1bNEEE6hygpM3WjwHFUVK6HTjWSzV4a8sQ==", - "optional": true, - "peer": true - }, - "console-table-printer": { - "version": "2.15.0", - "resolved": "https://registry.npmjs.org/console-table-printer/-/console-table-printer-2.15.0.tgz", - "integrity": "sha512-SrhBq4hYVjLCkBVOWaTzceJalvn5K1Zq5aQA6wXC/cYjI3frKWNPEMK3sZsJfNNQApvCQmgBcc13ZKmFj8qExw==", - "peer": true, - "requires": { - "simple-wcswidth": "^1.1.2" - } - }, - "content-disposition": { - "version": "0.5.4", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", - "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", - "requires": { - "safe-buffer": "5.2.1" - } - }, - "content-type": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", - "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==" - }, - "cookie": { - "version": "0.7.1", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz", - "integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==" - }, - "cookie-signature": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", - "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==" - }, - "cors": { - "version": "2.8.5", - "resolved": "https://registry.npmjs.org/cors/-/cors-2.8.5.tgz", - "integrity": "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==", - "requires": { - "object-assign": "^4", - "vary": "^1" - } - }, - "cross-spawn": { - "version": "7.0.6", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", - "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", - "peer": true, - "requires": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - } - }, - "crypt": { - "version": "0.0.2", - "resolved": "https://registry.npmjs.org/crypt/-/crypt-0.0.2.tgz", - "integrity": "sha512-mCxBlsHFYh9C+HVpiEacem8FEBnMXgU9gy4zmNC+SXAZNB/1idgp/aulFJ4FgCi7GPEVbfyng092GqL2k2rmow==", - "peer": true - }, - "data-uri-to-buffer": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-4.0.1.tgz", - "integrity": "sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==" - }, - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "requires": { - "ms": "2.0.0" - } - }, - "decamelize": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", - "integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==", - "peer": true - }, - "decompress-response": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", - "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", - "peer": true, - "requires": { - "mimic-response": "^3.1.0" - } - }, - "deep-extend": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", - "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", - "peer": true - }, - "delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==" - }, - "delegates": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", - "integrity": "sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==", - "optional": true, - "peer": true - }, - "depd": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", - "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==" - }, - "destroy": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", - "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==" - }, - "detect-libc": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz", - "integrity": "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==", - "peer": true - }, - "diff-sequences": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", - "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", - "peer": true - }, - "digest-fetch": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/digest-fetch/-/digest-fetch-1.3.0.tgz", - "integrity": "sha512-CGJuv6iKNM7QyZlM2T3sPAdZWd/p9zQiRNS9G+9COUCwzWFTs0Xp8NF5iePx7wtvhDykReiRRrSeNb4oMmB8lA==", - "peer": true, - "requires": { - "base-64": "^0.1.0", - "md5": "^2.3.0" - } - }, - "dotenv": { - "version": "16.6.1", - "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.6.1.tgz", - "integrity": "sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==" - }, - "dunder-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", - "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", - "requires": { - "call-bind-apply-helpers": "^1.0.1", - "es-errors": "^1.3.0", - "gopd": "^1.2.0" - } - }, - "eastasianwidth": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", - "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", - "peer": true - }, - "ecdsa-sig-formatter": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", - "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==", - "peer": true, - "requires": { - "safe-buffer": "^5.0.1" - } - }, - "ee-first": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", - "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" - }, - "emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "peer": true - }, - "encodeurl": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", - "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==" - }, - "encoding": { - "version": "0.1.13", - "resolved": "https://registry.npmjs.org/encoding/-/encoding-0.1.13.tgz", - "integrity": "sha512-ETBauow1T35Y/WZMkio9jiM0Z5xjHHmJ4XmjZOq1l/dXz3lr2sRn87nJy20RupqSh1F2m3HHPSp8ShIPQJrJ3A==", - "optional": true, - "peer": true, - "requires": { - "iconv-lite": "^0.6.2" - }, - "dependencies": { - "iconv-lite": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", - "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", - "optional": true, - "peer": true, - "requires": { - "safer-buffer": ">= 2.1.2 < 3.0.0" - } - } - } - }, - "end-of-stream": { - "version": "1.4.5", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.5.tgz", - "integrity": "sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg==", - "peer": true, - "requires": { - "once": "^1.4.0" - } - }, - "env-paths": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/env-paths/-/env-paths-2.2.1.tgz", - "integrity": "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==", - "optional": true, - "peer": true - }, - "err-code": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/err-code/-/err-code-2.0.3.tgz", - "integrity": "sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA==", - "optional": true, - "peer": true - }, - "es-define-property": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", - "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==" - }, - "es-errors": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", - "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==" - }, - "es-object-atoms": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", - "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", - "requires": { - "es-errors": "^1.3.0" - } - }, - "es-set-tostringtag": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", - "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", - "requires": { - "es-errors": "^1.3.0", - "get-intrinsic": "^1.2.6", - "has-tostringtag": "^1.0.2", - "hasown": "^2.0.2" - } - }, - "escape-html": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", - "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==" - }, - "escape-string-regexp": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", - "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", - "peer": true - }, - "etag": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", - "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==" - }, - "event-target-shim": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", - "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==" - }, - "eventemitter3": { - "version": "4.0.7", - "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", - "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", - "peer": true - }, - "expand-template": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/expand-template/-/expand-template-2.0.3.tgz", - "integrity": "sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg==", - "peer": true - }, - "expect": { - "version": "29.7.0", - "resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz", - "integrity": "sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==", - "peer": true, - "requires": { - "@jest/expect-utils": "^29.7.0", - "jest-get-type": "^29.6.3", - "jest-matcher-utils": "^29.7.0", - "jest-message-util": "^29.7.0", - "jest-util": "^29.7.0" - } - }, - "express": { - "version": "4.21.2", - "resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz", - "integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==", - "requires": { - "accepts": "~1.3.8", - "array-flatten": "1.1.1", - "body-parser": "1.20.3", - "content-disposition": "0.5.4", - "content-type": "~1.0.4", - "cookie": "0.7.1", - "cookie-signature": "1.0.6", - "debug": "2.6.9", - "depd": "2.0.0", - "encodeurl": "~2.0.0", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "finalhandler": "1.3.1", - "fresh": "0.5.2", - "http-errors": "2.0.0", - "merge-descriptors": "1.0.3", - "methods": "~1.1.2", - "on-finished": "2.4.1", - "parseurl": "~1.3.3", - "path-to-regexp": "0.1.12", - "proxy-addr": "~2.0.7", - "qs": "6.13.0", - "range-parser": "~1.2.1", - "safe-buffer": "5.2.1", - "send": "0.19.0", - "serve-static": "1.16.2", - "setprototypeof": "1.2.0", - "statuses": "2.0.1", - "type-is": "~1.6.18", - "utils-merge": "1.0.1", - "vary": "~1.1.2" - } - }, - "extend": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", - "peer": true - }, - "fetch-blob": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/fetch-blob/-/fetch-blob-3.2.0.tgz", - "integrity": "sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==", - "requires": { - "node-domexception": "^1.0.0", - "web-streams-polyfill": "^3.0.3" - }, - "dependencies": { - "web-streams-polyfill": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", - "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==" - } - } - }, - "file-uri-to-path": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", - "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==", - "peer": true - }, - "fill-range": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", - "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", - "peer": true, - "requires": { - "to-regex-range": "^5.0.1" - } - }, - "finalhandler": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz", - "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==", - "requires": { - "debug": "2.6.9", - "encodeurl": "~2.0.0", - "escape-html": "~1.0.3", - "on-finished": "2.4.1", - "parseurl": "~1.3.3", - "statuses": "2.0.1", - "unpipe": "~1.0.0" - } - }, - "follow-redirects": { - "version": "1.15.11", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz", - "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==" - }, - "foreground-child": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.1.tgz", - "integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==", - "peer": true, - "requires": { - "cross-spawn": "^7.0.6", - "signal-exit": "^4.0.1" - } - }, - "form-data": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz", - "integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==", - "requires": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.8", - "es-set-tostringtag": "^2.1.0", - "hasown": "^2.0.2", - "mime-types": "^2.1.12" - } - }, - "form-data-encoder": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz", - "integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==" - }, - "formdata-node": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz", - "integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==", - "requires": { - "node-domexception": "1.0.0", - "web-streams-polyfill": "4.0.0-beta.3" - } - }, - "formdata-polyfill": { - "version": "4.0.10", - "resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz", - "integrity": "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==", - "requires": { - "fetch-blob": "^3.1.2" - } - }, - "forwarded": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", - "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==" - }, - "fresh": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", - "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==" - }, - "fs-constants": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", - "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==", - "peer": true - }, - "fs-minipass": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", - "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", - "peer": true, - "requires": { - "minipass": "^3.0.0" - }, - "dependencies": { - "minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "peer": true, - "requires": { - "yallist": "^4.0.0" - } - } - } - }, - "fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", - "optional": true, - "peer": true - }, - "function-bind": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", - "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==" - }, - "gauge": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/gauge/-/gauge-4.0.4.tgz", - "integrity": "sha512-f9m+BEN5jkg6a0fZjleidjN51VE1X+mPFQ2DJ0uv1V39oCLCbsGe6yjbBnp7eK7z/+GAon99a3nHuqbuuthyPg==", - "optional": true, - "peer": true, - "requires": { - "aproba": "^1.0.3 || ^2.0.0", - "color-support": "^1.1.3", - "console-control-strings": "^1.1.0", - "has-unicode": "^2.0.1", - "signal-exit": "^3.0.7", - "string-width": "^4.2.3", - "strip-ansi": "^6.0.1", - "wide-align": "^1.1.5" - }, - "dependencies": { - "ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "optional": true, - "peer": true - }, - "emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "optional": true, - "peer": true - }, - "signal-exit": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", - "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", - "optional": true, - "peer": true - }, - "string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "optional": true, - "peer": true, - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - } - }, - "strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "optional": true, - "peer": true, - "requires": { - "ansi-regex": "^5.0.1" - } - } - } - }, - "gaxios": { - "version": "7.1.3", - "resolved": "https://registry.npmjs.org/gaxios/-/gaxios-7.1.3.tgz", - "integrity": "sha512-YGGyuEdVIjqxkxVH1pUTMY/XtmmsApXrCVv5EU25iX6inEPbV+VakJfLealkBtJN69AQmh1eGOdCl9Sm1UP6XQ==", - "peer": true, - "requires": { - "extend": "^3.0.2", - "https-proxy-agent": "^7.0.1", - "node-fetch": "^3.3.2", - "rimraf": "^5.0.1" - } - }, - "gcp-metadata": { - "version": "8.1.2", - "resolved": "https://registry.npmjs.org/gcp-metadata/-/gcp-metadata-8.1.2.tgz", - "integrity": "sha512-zV/5HKTfCeKWnxG0Dmrw51hEWFGfcF2xiXqcA3+J90WDuP0SvoiSO5ORvcBsifmx/FoIjgQN3oNOGaQ5PhLFkg==", - "peer": true, - "requires": { - "gaxios": "^7.0.0", - "google-logging-utils": "^1.0.0", - "json-bigint": "^1.0.0" - } - }, - "generic-pool": { - "version": "3.9.0", - "resolved": "https://registry.npmjs.org/generic-pool/-/generic-pool-3.9.0.tgz", - "integrity": "sha512-hymDOu5B53XvN4QT9dBmZxPX4CWhBPPLguTZ9MMFeFa/Kg0xWVfylOVNlJji/E7yTZWFd/q9GO5TxDLq156D7g==", - "peer": true - }, - "get-intrinsic": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", - "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", - "requires": { - "call-bind-apply-helpers": "^1.0.2", - "es-define-property": "^1.0.1", - "es-errors": "^1.3.0", - "es-object-atoms": "^1.1.1", - "function-bind": "^1.1.2", - "get-proto": "^1.0.1", - "gopd": "^1.2.0", - "has-symbols": "^1.1.0", - "hasown": "^2.0.2", - "math-intrinsics": "^1.1.0" - } - }, - "get-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", - "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", - "requires": { - "dunder-proto": "^1.0.1", - "es-object-atoms": "^1.0.0" - } - }, - "github-from-package": { - "version": "0.0.0", - "resolved": "https://registry.npmjs.org/github-from-package/-/github-from-package-0.0.0.tgz", - "integrity": "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw==", - "peer": true - }, - "glob": { - "version": "10.4.5", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", - "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", - "peer": true, - "requires": { - "foreground-child": "^3.1.0", - "jackspeak": "^3.1.2", - "minimatch": "^9.0.4", - "minipass": "^7.1.2", - "package-json-from-dist": "^1.0.0", - "path-scurry": "^1.11.1" - } - }, - "google-auth-library": { - "version": "10.5.0", - "resolved": "https://registry.npmjs.org/google-auth-library/-/google-auth-library-10.5.0.tgz", - "integrity": "sha512-7ABviyMOlX5hIVD60YOfHw4/CxOfBhyduaYB+wbFWCWoni4N7SLcV46hrVRktuBbZjFC9ONyqamZITN7q3n32w==", - "peer": true, - "requires": { - "base64-js": "^1.3.0", - "ecdsa-sig-formatter": "^1.0.11", - "gaxios": "^7.0.0", - "gcp-metadata": "^8.0.0", - "google-logging-utils": "^1.0.0", - "gtoken": "^8.0.0", - "jws": "^4.0.0" - } - }, - "google-logging-utils": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/google-logging-utils/-/google-logging-utils-1.1.2.tgz", - "integrity": "sha512-YsFPGVgDFf4IzSwbwIR0iaFJQFmR5Jp7V1WuYSjuRgAm9yWqsMhKE9YPlL+wvFLnc/wMiFV4SQUD9Y/JMpxIxQ==", - "peer": true - }, - "gopd": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", - "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==" - }, - "graceful-fs": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", - "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", - "peer": true - }, - "groq-sdk": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/groq-sdk/-/groq-sdk-0.3.0.tgz", - "integrity": "sha512-Cdgjh4YoSBE2X4S9sxPGXaAy1dlN4bRtAaDZ3cnq+XsxhhN9WSBeHF64l7LWwuD5ntmw7YC5Vf4Ff1oHCg1LOg==", - "peer": true, - "requires": { - "@types/node": "^18.11.18", - "@types/node-fetch": "^2.6.4", - "abort-controller": "^3.0.0", - "agentkeepalive": "^4.2.1", - "digest-fetch": "^1.3.0", - "form-data-encoder": "1.7.2", - "formdata-node": "^4.3.2", - "node-fetch": "^2.6.7", - "web-streams-polyfill": "^3.2.1" - }, - "dependencies": { - "node-fetch": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", - "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", - "peer": true, - "requires": { - "whatwg-url": "^5.0.0" - } - }, - "web-streams-polyfill": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", - "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==", - "peer": true - } - } - }, - "gtoken": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/gtoken/-/gtoken-8.0.0.tgz", - "integrity": "sha512-+CqsMbHPiSTdtSO14O51eMNlrp9N79gmeqmXeouJOhfucAedHw9noVe/n5uJk3tbKE6a+6ZCQg3RPhVhHByAIw==", - "peer": true, - "requires": { - "gaxios": "^7.0.0", - "jws": "^4.0.0" - } - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "peer": true - }, - "has-symbols": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", - "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==" - }, - "has-tostringtag": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", - "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", - "requires": { - "has-symbols": "^1.0.3" - } - }, - "has-unicode": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz", - "integrity": "sha512-8Rf9Y83NBReMnx0gFzA8JImQACstCYWUplepDa9xprwwtmgEZUF0h/i5xSA625zB/I37EtrswSST6OXxwaaIJQ==", - "optional": true, - "peer": true - }, - "hasown": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", - "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", - "requires": { - "function-bind": "^1.1.2" - } - }, - "http-cache-semantics": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.2.0.tgz", - "integrity": "sha512-dTxcvPXqPvXBQpq5dUr6mEMJX4oIEFv6bwom3FDwKRDsuIjjJGANqhBuoAn9c1RQJIdAKav33ED65E2ys+87QQ==", - "optional": true, - "peer": true - }, - "http-errors": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", - "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", - "requires": { - "depd": "2.0.0", - "inherits": "2.0.4", - "setprototypeof": "1.2.0", - "statuses": "2.0.1", - "toidentifier": "1.0.1" - } - }, - "http-proxy-agent": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-4.0.1.tgz", - "integrity": "sha512-k0zdNgqWTGA6aeIRVpvfVob4fL52dTfaehylg0Y4UvSySvOq/Y+BOyPrgpUrA7HylqvU8vIZGsRuXmspskV0Tg==", - "optional": true, - "peer": true, - "requires": { - "@tootallnate/once": "1", - "agent-base": "6", - "debug": "4" - }, - "dependencies": { - "agent-base": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", - "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", - "optional": true, - "peer": true, - "requires": { - "debug": "4" - } - }, - "debug": { - "version": "4.4.3", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", - "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", - "optional": true, - "peer": true, - "requires": { - "ms": "^2.1.3" - } - }, - "ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "optional": true, - "peer": true - } - } - }, - "https-proxy-agent": { - "version": "7.0.6", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", - "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", - "peer": true, - "requires": { - "agent-base": "^7.1.2", - "debug": "4" - }, - "dependencies": { - "debug": { - "version": "4.4.3", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", - "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", - "peer": true, - "requires": { - "ms": "^2.1.3" - } - }, - "ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "peer": true - } - } - }, - "humanize-ms": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", - "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==", - "requires": { - "ms": "^2.0.0" - } - }, - "iconv-lite": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", - "requires": { - "safer-buffer": ">= 2.1.2 < 3" - } - }, - "ieee754": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", - "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", - "peer": true - }, - "imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", - "optional": true, - "peer": true - }, - "indent-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", - "optional": true, - "peer": true - }, - "infer-owner": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/infer-owner/-/infer-owner-1.0.4.tgz", - "integrity": "sha512-IClj+Xz94+d7irH5qRyfJonOdfTzuDaifE6ZPWfx0N0+/ATZCbuTPq2prFl526urkQd90WyUKIh1DfBQ2hMz9A==", - "optional": true, - "peer": true - }, - "inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", - "optional": true, - "peer": true, - "requires": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" - }, - "ini": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", - "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", - "peer": true - }, - "ip-address": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-10.0.1.tgz", - "integrity": "sha512-NWv9YLW4PoW2B7xtzaS3NCot75m6nK7Icdv0o3lfMceJVRfSoQwqD4wEH5rLwoKJwUiZ/rfpiVBhnaF0FK4HoA==", - "optional": true, - "peer": true - }, - "ipaddr.js": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", - "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==" - }, - "is-buffer": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==", - "peer": true - }, - "is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "peer": true - }, - "is-lambda": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-lambda/-/is-lambda-1.0.1.tgz", - "integrity": "sha512-z7CMFGNrENq5iFB9Bqo64Xk6Y9sg+epq1myIcdHaGnbMTYOxvzsEtdYqQUylB7LxfkvgrrjP32T6Ywciio9UIQ==", - "optional": true, - "peer": true - }, - "is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "peer": true - }, - "isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "peer": true - }, - "jackspeak": { - "version": "3.4.3", - "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", - "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", - "peer": true, - "requires": { - "@isaacs/cliui": "^8.0.2", - "@pkgjs/parseargs": "^0.11.0" - } - }, - "jest-diff": { - "version": "29.7.0", - "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz", - "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==", - "peer": true, - "requires": { - "chalk": "^4.0.0", - "diff-sequences": "^29.6.3", - "jest-get-type": "^29.6.3", - "pretty-format": "^29.7.0" - } - }, - "jest-get-type": { - "version": "29.6.3", - "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz", - "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==", - "peer": true - }, - "jest-matcher-utils": { - "version": "29.7.0", - "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz", - "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==", - "peer": true, - "requires": { - "chalk": "^4.0.0", - "jest-diff": "^29.7.0", - "jest-get-type": "^29.6.3", - "pretty-format": "^29.7.0" - } - }, - "jest-message-util": { - "version": "29.7.0", - "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", - "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", - "peer": true, - "requires": { - "@babel/code-frame": "^7.12.13", - "@jest/types": "^29.6.3", - "@types/stack-utils": "^2.0.0", - "chalk": "^4.0.0", - "graceful-fs": "^4.2.9", - "micromatch": "^4.0.4", - "pretty-format": "^29.7.0", - "slash": "^3.0.0", - "stack-utils": "^2.0.3" - } - }, - "jest-util": { - "version": "29.7.0", - "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", - "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", - "peer": true, - "requires": { - "@jest/types": "^29.6.3", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" - } - }, - "js-tiktoken": { - "version": "1.0.21", - "resolved": "https://registry.npmjs.org/js-tiktoken/-/js-tiktoken-1.0.21.tgz", - "integrity": "sha512-biOj/6M5qdgx5TKjDnFT1ymSpM5tbd3ylwDtrQvFQSu0Z7bBYko2dF+W/aUkXUPuk6IVpRxk/3Q2sHOzGlS36g==", - "peer": true, - "requires": { - "base64-js": "^1.5.1" - } - }, - "js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", - "peer": true - }, - "json-bigint": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-bigint/-/json-bigint-1.0.0.tgz", - "integrity": "sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ==", - "peer": true, - "requires": { - "bignumber.js": "^9.0.0" - } - }, - "jwa": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.1.tgz", - "integrity": "sha512-hRF04fqJIP8Abbkq5NKGN0Bbr3JxlQ+qhZufXVr0DvujKy93ZCbXZMHDL4EOtodSbCWxOqR8MS1tXA5hwqCXDg==", - "peer": true, - "requires": { - "buffer-equal-constant-time": "^1.0.1", - "ecdsa-sig-formatter": "1.0.11", - "safe-buffer": "^5.0.1" - } - }, - "jws": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jws/-/jws-4.0.0.tgz", - "integrity": "sha512-KDncfTmOZoOMTFG4mBlG0qUIOlc03fmzH+ru6RgYVZhPkyiy/92Owlt/8UEN+a4TXR1FQetfIpJE8ApdvdVxTg==", - "peer": true, - "requires": { - "jwa": "^2.0.0", - "safe-buffer": "^5.0.1" - } - }, - "langsmith": { - "version": "0.3.77", - "resolved": "https://registry.npmjs.org/langsmith/-/langsmith-0.3.77.tgz", - "integrity": "sha512-wbS/9IX/hOAsOEOtPj8kCS8H0tFHaelwQ97gTONRtIfoPPLd9MMUmhk0KQB5DdsGAI5abg966+f0dZ/B+YRRzg==", - "peer": true, - "requires": { - "@types/uuid": "^10.0.0", - "chalk": "^4.1.2", - "console-table-printer": "^2.12.1", - "p-queue": "^6.6.2", - "p-retry": "4", - "semver": "^7.6.3", - "uuid": "^10.0.0" - } - }, - "lru-cache": { - "version": "10.4.3", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", - "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", - "peer": true - }, - "make-fetch-happen": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-9.1.0.tgz", - "integrity": "sha512-+zopwDy7DNknmwPQplem5lAZX/eCOzSvSNNcSKm5eVwTkOBzoktEfXsa9L23J/GIRhxRsaxzkPEhrJEpE2F4Gg==", - "optional": true, - "peer": true, - "requires": { - "agentkeepalive": "^4.1.3", - "cacache": "^15.2.0", - "http-cache-semantics": "^4.1.0", - "http-proxy-agent": "^4.0.1", - "https-proxy-agent": "^5.0.0", - "is-lambda": "^1.0.1", - "lru-cache": "^6.0.0", - "minipass": "^3.1.3", - "minipass-collect": "^1.0.2", - "minipass-fetch": "^1.3.2", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "negotiator": "^0.6.2", - "promise-retry": "^2.0.1", - "socks-proxy-agent": "^6.0.0", - "ssri": "^8.0.0" - }, - "dependencies": { - "agent-base": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", - "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", - "optional": true, - "peer": true, - "requires": { - "debug": "4" - } - }, - "debug": { - "version": "4.4.3", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", - "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", - "optional": true, - "peer": true, - "requires": { - "ms": "^2.1.3" - } - }, - "https-proxy-agent": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", - "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", - "optional": true, - "peer": true, - "requires": { - "agent-base": "6", - "debug": "4" - } - }, - "lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "optional": true, - "peer": true, - "requires": { - "yallist": "^4.0.0" - } - }, - "minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "optional": true, - "peer": true, - "requires": { - "yallist": "^4.0.0" - } - }, - "ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "optional": true, - "peer": true - } - } - }, - "math-intrinsics": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", - "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==" - }, - "md5": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/md5/-/md5-2.3.0.tgz", - "integrity": "sha512-T1GITYmFaKuO91vxyoQMFETst+O71VUPEU3ze5GNzDm0OWdP8v1ziTaAEPUr/3kLsY3Sftgz242A1SetQiDL7g==", - "peer": true, - "requires": { - "charenc": "0.0.2", - "crypt": "0.0.2", - "is-buffer": "~1.1.6" - } - }, - "media-typer": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", - "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==" - }, - "mem0ai": { - "version": "2.1.38", - "resolved": "https://registry.npmjs.org/mem0ai/-/mem0ai-2.1.38.tgz", - "integrity": "sha512-es8ffk0VbYJ1RDSblcoYzxaaafDMD8XgvyYTGb0HrKcDLj1rlvFqaV4K5IMBm4GGOAI+I0BwGh8d49z7vC/ajQ==", - "requires": { - "axios": "1.7.7", - "openai": "^4.93.0", - "uuid": "9.0.1", - "zod": "^3.24.1" - }, - "dependencies": { - "uuid": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz", - "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==" - } - } - }, - "merge-descriptors": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", - "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==" - }, - "methods": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", - "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==" - }, - "micromatch": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", - "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", - "peer": true, - "requires": { - "braces": "^3.0.3", - "picomatch": "^2.3.1" - } - }, - "mime": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", - "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==" - }, - "mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==" - }, - "mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "requires": { - "mime-db": "1.52.0" - } - }, - "mimic-response": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", - "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", - "peer": true - }, - "minimatch": { - "version": "9.0.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", - "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", - "peer": true, - "requires": { - "brace-expansion": "^2.0.1" - } - }, - "minimist": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", - "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", - "peer": true - }, - "minipass": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", - "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", - "peer": true - }, - "minipass-collect": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/minipass-collect/-/minipass-collect-1.0.2.tgz", - "integrity": "sha512-6T6lH0H8OG9kITm/Jm6tdooIbogG9e0tLgpY6mphXSm/A9u8Nq1ryBG+Qspiub9LjWlBPsPS3tWQ/Botq4FdxA==", - "optional": true, - "peer": true, - "requires": { - "minipass": "^3.0.0" - }, - "dependencies": { - "minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "optional": true, - "peer": true, - "requires": { - "yallist": "^4.0.0" - } - } - } - }, - "minipass-fetch": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-1.4.1.tgz", - "integrity": "sha512-CGH1eblLq26Y15+Azk7ey4xh0J/XfJfrCox5LDJiKqI2Q2iwOLOKrlmIaODiSQS8d18jalF6y2K2ePUm0CmShw==", - "optional": true, - "peer": true, - "requires": { - "encoding": "^0.1.12", - "minipass": "^3.1.0", - "minipass-sized": "^1.0.3", - "minizlib": "^2.0.0" - }, - "dependencies": { - "minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "optional": true, - "peer": true, - "requires": { - "yallist": "^4.0.0" - } - } - } - }, - "minipass-flush": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/minipass-flush/-/minipass-flush-1.0.5.tgz", - "integrity": "sha512-JmQSYYpPUqX5Jyn1mXaRwOda1uQ8HP5KAT/oDSLCzt1BYRhQU0/hDtsB1ufZfEEzMZ9aAVmsBw8+FWsIXlClWw==", - "optional": true, - "peer": true, - "requires": { - "minipass": "^3.0.0" - }, - "dependencies": { - "minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "optional": true, - "peer": true, - "requires": { - "yallist": "^4.0.0" - } - } - } - }, - "minipass-pipeline": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/minipass-pipeline/-/minipass-pipeline-1.2.4.tgz", - "integrity": "sha512-xuIq7cIOt09RPRJ19gdi4b+RiNvDFYe5JH+ggNvBqGqpQXcru3PcRmOZuHBKWK1Txf9+cQ+HMVN4d6z46LZP7A==", - "optional": true, - "peer": true, - "requires": { - "minipass": "^3.0.0" - }, - "dependencies": { - "minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "optional": true, - "peer": true, - "requires": { - "yallist": "^4.0.0" - } - } - } - }, - "minipass-sized": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/minipass-sized/-/minipass-sized-1.0.3.tgz", - "integrity": "sha512-MbkQQ2CTiBMlA2Dm/5cY+9SWFEN8pzzOXi6rlM5Xxq0Yqbda5ZQy9sU75a673FE9ZK0Zsbr6Y5iP6u9nktfg2g==", - "optional": true, - "peer": true, - "requires": { - "minipass": "^3.0.0" - }, - "dependencies": { - "minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "optional": true, - "peer": true, - "requires": { - "yallist": "^4.0.0" - } - } - } - }, - "minizlib": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", - "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", - "peer": true, - "requires": { - "minipass": "^3.0.0", - "yallist": "^4.0.0" - }, - "dependencies": { - "minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "peer": true, - "requires": { - "yallist": "^4.0.0" - } - } - } - }, - "mkdirp": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", - "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", - "peer": true - }, - "mkdirp-classic": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz", - "integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==", - "peer": true - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" - }, - "mustache": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/mustache/-/mustache-4.2.0.tgz", - "integrity": "sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==", - "peer": true - }, - "napi-build-utils": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/napi-build-utils/-/napi-build-utils-2.0.0.tgz", - "integrity": "sha512-GEbrYkbfF7MoNaoh2iGG84Mnf/WZfB0GdGEsM8wz7Expx/LlWf5U8t9nvJKXSp3qr5IsEbK04cBGhol/KwOsWA==", - "peer": true - }, - "negotiator": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", - "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==" - }, - "neo4j-driver": { - "version": "5.28.2", - "resolved": "https://registry.npmjs.org/neo4j-driver/-/neo4j-driver-5.28.2.tgz", - "integrity": "sha512-nix4Canllf7Tl4FZL9sskhkKYoCp40fg7VsknSRTRgbm1JaE2F1Ej/c2nqlM06nqh3WrkI0ww3taVB+lem7w7w==", - "peer": true, - "requires": { - "neo4j-driver-bolt-connection": "5.28.2", - "neo4j-driver-core": "5.28.2", - "rxjs": "^7.8.2" - } - }, - "neo4j-driver-bolt-connection": { - "version": "5.28.2", - "resolved": "https://registry.npmjs.org/neo4j-driver-bolt-connection/-/neo4j-driver-bolt-connection-5.28.2.tgz", - "integrity": "sha512-dEX06iNPEo9iyCb0NssxJeA3REN+H+U/Y0MdAjJBEoil4tGz5PxBNZL6/+noQnu2pBJT5wICepakXCrN3etboA==", - "peer": true, - "requires": { - "buffer": "^6.0.3", - "neo4j-driver-core": "5.28.2", - "string_decoder": "^1.3.0" - } - }, - "neo4j-driver-core": { - "version": "5.28.2", - "resolved": "https://registry.npmjs.org/neo4j-driver-core/-/neo4j-driver-core-5.28.2.tgz", - "integrity": "sha512-fBMk4Ox379oOz4FcfdS6ZOxsTEypjkcAelNm9LcWQZ981xCdOnGMzlWL+qXECvL0qUwRfmZxoqbDlJzuzFrdvw==", - "peer": true - }, - "node-abi": { - "version": "3.80.0", - "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.80.0.tgz", - "integrity": "sha512-LyPuZJcI9HVwzXK1GPxWNzrr+vr8Hp/3UqlmWxxh8p54U1ZbclOqbSog9lWHaCX+dBaiGi6n/hIX+mKu74GmPA==", - "peer": true, - "requires": { - "semver": "^7.3.5" - } - }, - "node-addon-api": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-7.1.1.tgz", - "integrity": "sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==", - "peer": true - }, - "node-domexception": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", - "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==" - }, - "node-fetch": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-3.3.2.tgz", - "integrity": "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==", - "requires": { - "data-uri-to-buffer": "^4.0.0", - "fetch-blob": "^3.1.4", - "formdata-polyfill": "^4.0.10" - } - }, - "node-gyp": { - "version": "8.4.1", - "resolved": "https://registry.npmjs.org/node-gyp/-/node-gyp-8.4.1.tgz", - "integrity": "sha512-olTJRgUtAb/hOXG0E93wZDs5YiJlgbXxTwQAFHyNlRsXQnYzUaF2aGgujZbw+hR8aF4ZG/rST57bWMWD16jr9w==", - "optional": true, - "peer": true, - "requires": { - "env-paths": "^2.2.0", - "glob": "^7.1.4", - "graceful-fs": "^4.2.6", - "make-fetch-happen": "^9.1.0", - "nopt": "^5.0.0", - "npmlog": "^6.0.0", - "rimraf": "^3.0.2", - "semver": "^7.3.5", - "tar": "^6.1.2", - "which": "^2.0.2" - }, - "dependencies": { - "brace-expansion": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", - "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", - "optional": true, - "peer": true, - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "optional": true, - "peer": true, - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - }, - "minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "optional": true, - "peer": true, - "requires": { - "brace-expansion": "^1.1.7" - } - }, - "rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "optional": true, - "peer": true, - "requires": { - "glob": "^7.1.3" - } - } - } - }, - "nopt": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/nopt/-/nopt-5.0.0.tgz", - "integrity": "sha512-Tbj67rffqceeLpcRXrT7vKAN8CwfPeIBgM7E6iBkmKLV7bEMwpGgYLGv0jACUsECaa/vuxP0IjEont6umdMgtQ==", - "optional": true, - "peer": true, - "requires": { - "abbrev": "1" - } - }, - "npmlog": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-6.0.2.tgz", - "integrity": "sha512-/vBvz5Jfr9dT/aFWd0FIRf+T/Q2WBsLENygUaFUqstqsycmZAP/t5BvFJTK0viFmSUxiUKTUplWy5vt+rvKIxg==", - "optional": true, - "peer": true, - "requires": { - "are-we-there-yet": "^3.0.0", - "console-control-strings": "^1.1.0", - "gauge": "^4.0.3", - "set-blocking": "^2.0.0" - } - }, - "object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==" - }, - "object-inspect": { - "version": "1.13.4", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", - "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==" - }, - "obuf": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz", - "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==", - "peer": true - }, - "ollama": { - "version": "0.5.18", - "resolved": "https://registry.npmjs.org/ollama/-/ollama-0.5.18.tgz", - "integrity": "sha512-lTFqTf9bo7Cd3hpF6CviBe/DEhewjoZYd9N/uCe7O20qYTvGqrNOFOBDj3lbZgFWHUgDv5EeyusYxsZSLS8nvg==", - "peer": true, - "requires": { - "whatwg-fetch": "^3.6.20" - } - }, - "on-finished": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", - "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", - "requires": { - "ee-first": "1.1.1" - } - }, - "once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", - "peer": true, - "requires": { - "wrappy": "1" - } - }, - "openai": { - "version": "4.104.0", - "resolved": "https://registry.npmjs.org/openai/-/openai-4.104.0.tgz", - "integrity": "sha512-p99EFNsA/yX6UhVO93f5kJsDRLAg+CTA2RBqdHK4RtK8u5IJw32Hyb2dTGKbnnFmnuoBv5r7Z2CURI9sGZpSuA==", - "requires": { - "@types/node": "^18.11.18", - "@types/node-fetch": "^2.6.4", - "abort-controller": "^3.0.0", - "agentkeepalive": "^4.2.1", - "form-data-encoder": "1.7.2", - "formdata-node": "^4.3.2", - "node-fetch": "^2.6.7" - }, - "dependencies": { - "node-fetch": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", - "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", - "requires": { - "whatwg-url": "^5.0.0" - } - } - } - }, - "p-finally": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", - "integrity": "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==", - "peer": true - }, - "p-map": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", - "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", - "optional": true, - "peer": true, - "requires": { - "aggregate-error": "^3.0.0" - } - }, - "p-queue": { - "version": "6.6.2", - "resolved": "https://registry.npmjs.org/p-queue/-/p-queue-6.6.2.tgz", - "integrity": "sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==", - "peer": true, - "requires": { - "eventemitter3": "^4.0.4", - "p-timeout": "^3.2.0" - } - }, - "p-retry": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", - "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", - "peer": true, - "requires": { - "@types/retry": "0.12.0", - "retry": "^0.13.1" - } - }, - "p-timeout": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-3.2.0.tgz", - "integrity": "sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==", - "peer": true, - "requires": { - "p-finally": "^1.0.0" - } - }, - "package-json-from-dist": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", - "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", - "peer": true - }, - "packet-reader": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/packet-reader/-/packet-reader-1.0.0.tgz", - "integrity": "sha512-HAKu/fG3HpHFO0AA8WE8q2g+gBJaZ9MG7fcKk+IJPLTGAD6Psw4443l+9DGRbOIh3/aXr7Phy0TjilYivJo5XQ==", - "peer": true - }, - "parseurl": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", - "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==" - }, - "path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", - "optional": true, - "peer": true - }, - "path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "peer": true - }, - "path-scurry": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", - "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", - "peer": true, - "requires": { - "lru-cache": "^10.2.0", - "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" - } - }, - "path-to-regexp": { - "version": "0.1.12", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz", - "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==" - }, - "pg": { - "version": "8.11.3", - "resolved": "https://registry.npmjs.org/pg/-/pg-8.11.3.tgz", - "integrity": "sha512-+9iuvG8QfaaUrrph+kpF24cXkH1YOOUeArRNYIxq1viYHZagBxrTno7cecY1Fa44tJeZvaoG+Djpkc3JwehN5g==", - "peer": true, - "requires": { - "buffer-writer": "2.0.0", - "packet-reader": "1.0.0", - "pg-cloudflare": "^1.1.1", - "pg-connection-string": "^2.6.2", - "pg-pool": "^3.6.1", - "pg-protocol": "^1.6.0", - "pg-types": "^2.1.0", - "pgpass": "1.x" - }, - "dependencies": { - "pg-types": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/pg-types/-/pg-types-2.2.0.tgz", - "integrity": "sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==", - "peer": true, - "requires": { - "pg-int8": "1.0.1", - "postgres-array": "~2.0.0", - "postgres-bytea": "~1.0.0", - "postgres-date": "~1.0.4", - "postgres-interval": "^1.1.0" - } - }, - "postgres-array": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/postgres-array/-/postgres-array-2.0.0.tgz", - "integrity": "sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==", - "peer": true - }, - "postgres-bytea": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/postgres-bytea/-/postgres-bytea-1.0.0.tgz", - "integrity": "sha512-xy3pmLuQqRBZBXDULy7KbaitYqLcmxigw14Q5sj8QBVLqEwXfeybIKVWiqAXTlcvdvb0+xkOtDbfQMOf4lST1w==", - "peer": true - }, - "postgres-date": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/postgres-date/-/postgres-date-1.0.7.tgz", - "integrity": "sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q==", - "peer": true - }, - "postgres-interval": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/postgres-interval/-/postgres-interval-1.2.0.tgz", - "integrity": "sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==", - "peer": true, - "requires": { - "xtend": "^4.0.0" - } - } - } - }, - "pg-cloudflare": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/pg-cloudflare/-/pg-cloudflare-1.2.7.tgz", - "integrity": "sha512-YgCtzMH0ptvZJslLM1ffsY4EuGaU0cx4XSdXLRFae8bPP4dS5xL1tNB3k2o/N64cHJpwU7dxKli/nZ2lUa5fLg==", - "optional": true, - "peer": true - }, - "pg-connection-string": { - "version": "2.9.1", - "resolved": "https://registry.npmjs.org/pg-connection-string/-/pg-connection-string-2.9.1.tgz", - "integrity": "sha512-nkc6NpDcvPVpZXxrreI/FOtX3XemeLl8E0qFr6F2Lrm/I8WOnaWNhIPK2Z7OHpw7gh5XJThi6j6ppgNoaT1w4w==", - "peer": true - }, - "pg-int8": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/pg-int8/-/pg-int8-1.0.1.tgz", - "integrity": "sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==", - "peer": true - }, - "pg-numeric": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/pg-numeric/-/pg-numeric-1.0.2.tgz", - "integrity": "sha512-BM/Thnrw5jm2kKLE5uJkXqqExRUY/toLHda65XgFTBTFYZyopbKjBe29Ii3RbkvlsMoFwD+tHeGaCjjv0gHlyw==", - "peer": true - }, - "pg-pool": { - "version": "3.10.1", - "resolved": "https://registry.npmjs.org/pg-pool/-/pg-pool-3.10.1.tgz", - "integrity": "sha512-Tu8jMlcX+9d8+QVzKIvM/uJtp07PKr82IUOYEphaWcoBhIYkoHpLXN3qO59nAI11ripznDsEzEv8nUxBVWajGg==", - "peer": true, - "requires": {} - }, - "pg-protocol": { - "version": "1.10.3", - "resolved": "https://registry.npmjs.org/pg-protocol/-/pg-protocol-1.10.3.tgz", - "integrity": "sha512-6DIBgBQaTKDJyxnXaLiLR8wBpQQcGWuAESkRBX/t6OwA8YsqP+iVSiond2EDy6Y/dsGk8rh/jtax3js5NeV7JQ==", - "peer": true - }, - "pg-types": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/pg-types/-/pg-types-4.1.0.tgz", - "integrity": "sha512-o2XFanIMy/3+mThw69O8d4n1E5zsLhdO+OPqswezu7Z5ekP4hYDqlDjlmOpYMbzY2Br0ufCwJLdDIXeNVwcWFg==", - "peer": true, - "requires": { - "pg-int8": "1.0.1", - "pg-numeric": "1.0.2", - "postgres-array": "~3.0.1", - "postgres-bytea": "~3.0.0", - "postgres-date": "~2.1.0", - "postgres-interval": "^3.0.0", - "postgres-range": "^1.1.1" - } - }, - "pgpass": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/pgpass/-/pgpass-1.0.5.tgz", - "integrity": "sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug==", - "peer": true, - "requires": { - "split2": "^4.1.0" - } - }, - "picocolors": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", - "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", - "peer": true - }, - "picomatch": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", - "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", - "peer": true - }, - "postgres-array": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/postgres-array/-/postgres-array-3.0.4.tgz", - "integrity": "sha512-nAUSGfSDGOaOAEGwqsRY27GPOea7CNipJPOA7lPbdEpx5Kg3qzdP0AaWC5MlhTWV9s4hFX39nomVZ+C4tnGOJQ==", - "peer": true - }, - "postgres-bytea": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/postgres-bytea/-/postgres-bytea-3.0.0.tgz", - "integrity": "sha512-CNd4jim9RFPkObHSjVHlVrxoVQXz7quwNFpz7RY1okNNme49+sVyiTvTRobiLV548Hx/hb1BG+iE7h9493WzFw==", - "peer": true, - "requires": { - "obuf": "~1.1.2" - } - }, - "postgres-date": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/postgres-date/-/postgres-date-2.1.0.tgz", - "integrity": "sha512-K7Juri8gtgXVcDfZttFKVmhglp7epKb1K4pgrkLxehjqkrgPhfG6OO8LHLkfaqkbpjNRnra018XwAr1yQFWGcA==", - "peer": true - }, - "postgres-interval": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/postgres-interval/-/postgres-interval-3.0.0.tgz", - "integrity": "sha512-BSNDnbyZCXSxgA+1f5UU2GmwhoI0aU5yMxRGO8CdFEcY2BQF9xm/7MqKnYoM1nJDk8nONNWDk9WeSmePFhQdlw==", - "peer": true - }, - "postgres-range": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/postgres-range/-/postgres-range-1.1.4.tgz", - "integrity": "sha512-i/hbxIE9803Alj/6ytL7UHQxRvZkI9O4Sy+J3HGc4F4oo/2eQAjTSNJ0bfxyse3bH0nuVesCk+3IRLaMtG3H6w==", - "peer": true - }, - "prebuild-install": { - "version": "7.1.3", - "resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.3.tgz", - "integrity": "sha512-8Mf2cbV7x1cXPUILADGI3wuhfqWvtiLA1iclTDbFRZkgRQS0NqsPZphna9V+HyTEadheuPmjaJMsbzKQFOzLug==", - "peer": true, - "requires": { - "detect-libc": "^2.0.0", - "expand-template": "^2.0.3", - "github-from-package": "0.0.0", - "minimist": "^1.2.3", - "mkdirp-classic": "^0.5.3", - "napi-build-utils": "^2.0.0", - "node-abi": "^3.3.0", - "pump": "^3.0.0", - "rc": "^1.2.7", - "simple-get": "^4.0.0", - "tar-fs": "^2.0.0", - "tunnel-agent": "^0.6.0" - } - }, - "pretty-format": { - "version": "29.7.0", - "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", - "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", - "peer": true, - "requires": { - "@jest/schemas": "^29.6.3", - "ansi-styles": "^5.0.0", - "react-is": "^18.0.0" - } - }, - "promise-inflight": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/promise-inflight/-/promise-inflight-1.0.1.tgz", - "integrity": "sha512-6zWPyEOFaQBJYcGMHBKTKJ3u6TBsnMFOIZSa6ce1e/ZrrsOlnHRHbabMjLiBYKp+n44X9eUI6VUPaukCXHuG4g==", - "optional": true, - "peer": true - }, - "promise-retry": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/promise-retry/-/promise-retry-2.0.1.tgz", - "integrity": "sha512-y+WKFlBR8BGXnsNlIHFGPZmyDf3DFMoLhaflAnyZgV6rG6xu+JwesTo2Q9R6XwYmtmwAFCkAk3e35jEdoeh/3g==", - "optional": true, - "peer": true, - "requires": { - "err-code": "^2.0.2", - "retry": "^0.12.0" - }, - "dependencies": { - "retry": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz", - "integrity": "sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow==", - "optional": true, - "peer": true - } - } - }, - "proxy-addr": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", - "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", - "requires": { - "forwarded": "0.2.0", - "ipaddr.js": "1.9.1" - } - }, - "proxy-from-env": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", - "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" - }, - "pump": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.3.tgz", - "integrity": "sha512-todwxLMY7/heScKmntwQG8CXVkWUOdYxIvY2s0VWAAMh/nd8SoYiRaKjlr7+iCs984f2P8zvrfWcDDYVb73NfA==", - "peer": true, - "requires": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "qs": { - "version": "6.13.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", - "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", - "requires": { - "side-channel": "^1.0.6" - } - }, - "range-parser": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==" - }, - "raw-body": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", - "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", - "requires": { - "bytes": "3.1.2", - "http-errors": "2.0.0", - "iconv-lite": "0.4.24", - "unpipe": "1.0.0" - } - }, - "rc": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", - "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", - "peer": true, - "requires": { - "deep-extend": "^0.6.0", - "ini": "~1.3.0", - "minimist": "^1.2.0", - "strip-json-comments": "~2.0.1" - } - }, - "react-is": { - "version": "18.3.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", - "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", - "peer": true - }, - "readable-stream": { - "version": "3.6.2", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", - "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", - "peer": true, - "requires": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - } - }, - "redis": { - "version": "4.7.1", - "resolved": "https://registry.npmjs.org/redis/-/redis-4.7.1.tgz", - "integrity": "sha512-S1bJDnqLftzHXHP8JsT5II/CtHWQrASX5K96REjWjlmWKrviSOLWmM7QnRLstAWsu1VBBV1ffV6DzCvxNP0UJQ==", - "peer": true, - "requires": { - "@redis/bloom": "1.2.0", - "@redis/client": "1.6.1", - "@redis/graph": "1.1.1", - "@redis/json": "1.0.7", - "@redis/search": "1.2.0", - "@redis/time-series": "1.1.0" - } - }, - "retry": { - "version": "0.13.1", - "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", - "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", - "peer": true - }, - "rimraf": { - "version": "5.0.10", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-5.0.10.tgz", - "integrity": "sha512-l0OE8wL34P4nJH/H2ffoaniAokM2qSmrtXHmlpvYr5AVVX8msAyW0l8NVJFDxlSK4u3Uh/f41cQheDVdnYijwQ==", - "peer": true, - "requires": { - "glob": "^10.3.7" - } - }, - "rxjs": { - "version": "7.8.2", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.2.tgz", - "integrity": "sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==", - "peer": true, - "requires": { - "tslib": "^2.1.0" - } - }, - "safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" - }, - "safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" - }, - "semver": { - "version": "7.7.3", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", - "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", - "peer": true - }, - "send": { - "version": "0.19.0", - "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", - "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", - "requires": { - "debug": "2.6.9", - "depd": "2.0.0", - "destroy": "1.2.0", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "fresh": "0.5.2", - "http-errors": "2.0.0", - "mime": "1.6.0", - "ms": "2.1.3", - "on-finished": "2.4.1", - "range-parser": "~1.2.1", - "statuses": "2.0.1" - }, - "dependencies": { - "encodeurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==" - }, - "ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" - } - } - }, - "serve-static": { - "version": "1.16.2", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz", - "integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==", - "requires": { - "encodeurl": "~2.0.0", - "escape-html": "~1.0.3", - "parseurl": "~1.3.3", - "send": "0.19.0" - } - }, - "set-blocking": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", - "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==", - "optional": true, - "peer": true - }, - "setprototypeof": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", - "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" - }, - "shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "peer": true, - "requires": { - "shebang-regex": "^3.0.0" - } - }, - "shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "peer": true - }, - "side-channel": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", - "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", - "requires": { - "es-errors": "^1.3.0", - "object-inspect": "^1.13.3", - "side-channel-list": "^1.0.0", - "side-channel-map": "^1.0.1", - "side-channel-weakmap": "^1.0.2" - } - }, - "side-channel-list": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", - "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", - "requires": { - "es-errors": "^1.3.0", - "object-inspect": "^1.13.3" - } - }, - "side-channel-map": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", - "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", - "requires": { - "call-bound": "^1.0.2", - "es-errors": "^1.3.0", - "get-intrinsic": "^1.2.5", - "object-inspect": "^1.13.3" - } - }, - "side-channel-weakmap": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", - "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", - "requires": { - "call-bound": "^1.0.2", - "es-errors": "^1.3.0", - "get-intrinsic": "^1.2.5", - "object-inspect": "^1.13.3", - "side-channel-map": "^1.0.1" - } - }, - "signal-exit": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", - "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", - "peer": true - }, - "simple-concat": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz", - "integrity": "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q==", - "peer": true - }, - "simple-get": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/simple-get/-/simple-get-4.0.1.tgz", - "integrity": "sha512-brv7p5WgH0jmQJr1ZDDfKDOSeWWg+OVypG99A/5vYGPqJ6pxiaHLy8nxtFjBA7oMa01ebA9gfh1uMCFqOuXxvA==", - "peer": true, - "requires": { - "decompress-response": "^6.0.0", - "once": "^1.3.1", - "simple-concat": "^1.0.0" - } - }, - "simple-wcswidth": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/simple-wcswidth/-/simple-wcswidth-1.1.2.tgz", - "integrity": "sha512-j7piyCjAeTDSjzTSQ7DokZtMNwNlEAyxqSZeCS+CXH7fJ4jx3FuJ/mTW3mE+6JLs4VJBbcll0Kjn+KXI5t21Iw==", - "peer": true - }, - "slash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", - "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", - "peer": true - }, - "smart-buffer": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.2.0.tgz", - "integrity": "sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==", - "optional": true, - "peer": true - }, - "socks": { - "version": "2.8.7", - "resolved": "https://registry.npmjs.org/socks/-/socks-2.8.7.tgz", - "integrity": "sha512-HLpt+uLy/pxB+bum/9DzAgiKS8CX1EvbWxI4zlmgGCExImLdiad2iCwXT5Z4c9c3Eq8rP2318mPW2c+QbtjK8A==", - "optional": true, - "peer": true, - "requires": { - "ip-address": "^10.0.1", - "smart-buffer": "^4.2.0" - } - }, - "socks-proxy-agent": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-6.2.1.tgz", - "integrity": "sha512-a6KW9G+6B3nWZ1yB8G7pJwL3ggLy1uTzKAgCb7ttblwqdz9fMGJUuTy3uFzEP48FAs9FLILlmzDlE2JJhVQaXQ==", - "optional": true, - "peer": true, - "requires": { - "agent-base": "^6.0.2", - "debug": "^4.3.3", - "socks": "^2.6.2" - }, - "dependencies": { - "agent-base": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", - "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", - "optional": true, - "peer": true, - "requires": { - "debug": "4" - } - }, - "debug": { - "version": "4.4.3", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", - "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", - "optional": true, - "peer": true, - "requires": { - "ms": "^2.1.3" - } - }, - "ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "optional": true, - "peer": true - } - } - }, - "split2": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/split2/-/split2-4.2.0.tgz", - "integrity": "sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==", - "peer": true - }, - "sqlite3": { - "version": "5.1.7", - "resolved": "https://registry.npmjs.org/sqlite3/-/sqlite3-5.1.7.tgz", - "integrity": "sha512-GGIyOiFaG+TUra3JIfkI/zGP8yZYLPQ0pl1bH+ODjiX57sPhrLU5sQJn1y9bDKZUFYkX1crlrPfSYt0BKKdkog==", - "peer": true, - "requires": { - "bindings": "^1.5.0", - "node-addon-api": "^7.0.0", - "node-gyp": "8.x", - "prebuild-install": "^7.1.1", - "tar": "^6.1.11" - } - }, - "ssri": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/ssri/-/ssri-8.0.1.tgz", - "integrity": "sha512-97qShzy1AiyxvPNIkLWoGua7xoQzzPjQ0HAH4B0rWKo7SZ6USuPcrUiAFrws0UH8RrbWmgq3LMTObhPIHbbBeQ==", - "optional": true, - "peer": true, - "requires": { - "minipass": "^3.1.1" - }, - "dependencies": { - "minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "optional": true, - "peer": true, - "requires": { - "yallist": "^4.0.0" - } - } - } - }, - "stack-utils": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", - "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", - "peer": true, - "requires": { - "escape-string-regexp": "^2.0.0" - } - }, - "statuses": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", - "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==" - }, - "string_decoder": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", - "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", - "peer": true, - "requires": { - "safe-buffer": "~5.2.0" - } - }, - "string-width": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", - "peer": true, - "requires": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - } - }, - "string-width-cjs": { - "version": "npm:string-width@4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "peer": true, - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "dependencies": { - "ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "peer": true - }, - "emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "peer": true - }, - "strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "peer": true, - "requires": { - "ansi-regex": "^5.0.1" - } - } - } - }, - "strip-ansi": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", - "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", - "peer": true, - "requires": { - "ansi-regex": "^6.0.1" - } - }, - "strip-ansi-cjs": { - "version": "npm:strip-ansi@6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "peer": true, - "requires": { - "ansi-regex": "^5.0.1" - }, - "dependencies": { - "ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "peer": true - } - } - }, - "strip-json-comments": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", - "peer": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "peer": true, - "requires": { - "has-flag": "^4.0.0" - } - }, - "tar": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", - "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", - "peer": true, - "requires": { - "chownr": "^2.0.0", - "fs-minipass": "^2.0.0", - "minipass": "^5.0.0", - "minizlib": "^2.1.1", - "mkdirp": "^1.0.3", - "yallist": "^4.0.0" - }, - "dependencies": { - "minipass": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", - "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", - "peer": true - } - } - }, - "tar-fs": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.4.tgz", - "integrity": "sha512-mDAjwmZdh7LTT6pNleZ05Yt65HC3E+NiQzl672vQG38jIrehtJk/J3mNwIg+vShQPcLF/LV7CMnDW6vjj6sfYQ==", - "peer": true, - "requires": { - "chownr": "^1.1.1", - "mkdirp-classic": "^0.5.2", - "pump": "^3.0.0", - "tar-stream": "^2.1.4" - }, - "dependencies": { - "chownr": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", - "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==", - "peer": true - } - } - }, - "tar-stream": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz", - "integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==", - "peer": true, - "requires": { - "bl": "^4.0.3", - "end-of-stream": "^1.4.1", - "fs-constants": "^1.0.0", - "inherits": "^2.0.3", - "readable-stream": "^3.1.1" - } - }, - "to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "peer": true, - "requires": { - "is-number": "^7.0.0" - } - }, - "toidentifier": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", - "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==" - }, - "tr46": { - "version": "0.0.3", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", - "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" - }, - "tslib": { - "version": "2.8.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", - "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", - "peer": true - }, - "tunnel-agent": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", - "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", - "peer": true, - "requires": { - "safe-buffer": "^5.0.1" - } - }, - "type-is": { - "version": "1.6.18", - "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", - "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", - "requires": { - "media-typer": "0.3.0", - "mime-types": "~2.1.24" - } - }, - "typescript": { - "version": "5.9.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", - "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", - "peer": true - }, - "undici": { - "version": "5.28.5", - "resolved": "https://registry.npmjs.org/undici/-/undici-5.28.5.tgz", - "integrity": "sha512-zICwjrDrcrUE0pyyJc1I2QzBkLM8FINsgOrt6WjA+BgajVq9Nxu2PbFFXUrAggLfDXlZGZBVZYw7WNV5KiBiBA==", - "peer": true, - "requires": { - "@fastify/busboy": "^2.0.0" - } - }, - "undici-types": { - "version": "5.26.5", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", - "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" - }, - "unique-filename": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-1.1.1.tgz", - "integrity": "sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ==", - "optional": true, - "peer": true, - "requires": { - "unique-slug": "^2.0.0" - } - }, - "unique-slug": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-2.0.2.tgz", - "integrity": "sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w==", - "optional": true, - "peer": true, - "requires": { - "imurmurhash": "^0.1.4" - } - }, - "unpipe": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", - "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==" - }, - "util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", - "peer": true - }, - "utils-merge": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", - "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==" - }, - "uuid": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-10.0.0.tgz", - "integrity": "sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ==", - "peer": true - }, - "vary": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", - "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==" - }, - "web-streams-polyfill": { - "version": "4.0.0-beta.3", - "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz", - "integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==" - }, - "webidl-conversions": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", - "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" - }, - "whatwg-fetch": { - "version": "3.6.20", - "resolved": "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-3.6.20.tgz", - "integrity": "sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg==", - "peer": true - }, - "whatwg-url": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", - "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", - "requires": { - "tr46": "~0.0.3", - "webidl-conversions": "^3.0.0" - } - }, - "which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "peer": true, - "requires": { - "isexe": "^2.0.0" - } - }, - "wide-align": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.5.tgz", - "integrity": "sha512-eDMORYaPNZ4sQIuuYPDHdQvf4gyCF9rEEV/yPxGfwPkRodwEgiMUUXTx/dex+Me0wxx53S+NgUHaP7y3MGlDmg==", - "optional": true, - "peer": true, - "requires": { - "string-width": "^1.0.2 || 2 || 3 || 4" - }, - "dependencies": { - "ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "optional": true, - "peer": true - }, - "emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "optional": true, - "peer": true - }, - "string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "optional": true, - "peer": true, - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - } - }, - "strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "optional": true, - "peer": true, - "requires": { - "ansi-regex": "^5.0.1" - } - } - } - }, - "wrap-ansi": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", - "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", - "peer": true, - "requires": { - "ansi-styles": "^6.1.0", - "string-width": "^5.0.1", - "strip-ansi": "^7.0.1" - }, - "dependencies": { - "ansi-styles": { - "version": "6.2.3", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", - "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", - "peer": true - } - } - }, - "wrap-ansi-cjs": { - "version": "npm:wrap-ansi@7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "peer": true, - "requires": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "peer": true - }, - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "peer": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "peer": true - }, - "string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "peer": true, - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - } - }, - "strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "peer": true, - "requires": { - "ansi-regex": "^5.0.1" - } - } - } - }, - "wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", - "peer": true - }, - "ws": { - "version": "8.18.3", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz", - "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==", - "peer": true, - "requires": {} - }, - "xtend": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", - "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", - "peer": true - }, - "yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "peer": true - }, - "zod": { - "version": "3.25.76", - "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", - "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==" - }, - "zod-to-json-schema": { - "version": "3.24.6", - "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.24.6.tgz", - "integrity": "sha512-h/z3PKvcTcTetyjl1fkj79MHNEjm+HpD6NXheWjzOekY7kV+lwDYnHw+ivHkijnCSMz1yJaWBD9vu/Fcmk+vEg==", - "peer": true, - "requires": {} - } } } diff --git a/core/relay/package.json b/core/relay/package.json index 099a3c9..b24ae1e 100644 --- a/core/relay/package.json +++ b/core/relay/package.json @@ -9,7 +9,7 @@ "dependencies": { "cors": "^2.8.5", "dotenv": "^16.6.1", - "express": "^4.18.2", + "express": "^4.21.2", "mem0ai": "^2.1.38", "node-fetch": "^3.3.2" } diff --git a/core/relay/server.js b/core/relay/server.js index 47c6345..9d66ed0 100644 --- a/core/relay/server.js +++ b/core/relay/server.js @@ -1,350 +1,368 @@ +// relay v0.3.0 +// Core relay server for Lyra project +// Handles incoming chat requests and forwards them to Cortex services import express from "express"; import dotenv from "dotenv"; import cors from "cors"; -import fs from "fs"; +import fs from "fs/promises"; import path from "path"; -import { reflectWithCortex, ingestToCortex } from "./lib/cortex.js"; +import { fileURLToPath } from "url"; dotenv.config(); -const sessionsDir = path.join(process.cwd(), "sessions"); -if (!fs.existsSync(sessionsDir)) fs.mkdirSync(sessionsDir); +// ES module __dirname workaround +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); +const SESSIONS_DIR = path.join(__dirname, "sessions"); const app = express(); app.use(cors()); app.use(express.json()); -// Cache and normalize env flags/values once -const { - NEOMEM_API, - MEM0_API_KEY, - OPENAI_API_KEY, - OLLAMA_URL, - PERSONA_URL, - CORTEX_ENABLED, - PORT: PORT_ENV, - DEBUG_PROMPT, -} = process.env; +const PORT = Number(process.env.PORT || 7078); -const PORT = Number(PORT_ENV) || 7078; -const cortexEnabled = String(CORTEX_ENABLED).toLowerCase() === "true"; -const debugPrompt = String(DEBUG_PROMPT).toLowerCase() === "true"; +// Cortex endpoints +const CORTEX_REASON = process.env.CORTEX_REASON_URL || "http://cortex:7081/reason"; +const CORTEX_SIMPLE = process.env.CORTEX_SIMPLE_URL || "http://cortex:7081/simple"; -// Basic env validation warnings (non-fatal) -if (!NEOMEM_API || !MEM0_API_KEY) { - console.warn("⚠️ NeoMem configuration missing: NEOMEM_API or MEM0_API_KEY not set."); -} +// ----------------------------------------------------- +// Helper request wrapper +// ----------------------------------------------------- +async function postJSON(url, data) { + const resp = await fetch(url, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(data), + }); + + const raw = await resp.text(); + let json; -/* ------------------------------ - Helpers for NeoMem REST API ---------------------------------*/ -// Small helper for fetch with timeout + JSON + error detail -async function fetchJSON(url, options = {}, timeoutMs = 30000) { - const controller = new AbortController(); - const t = setTimeout(() => controller.abort(), timeoutMs); try { - const resp = await fetch(url, { ...options, signal: controller.signal }); - const text = await resp.text(); - const parsed = text ? JSON.parse(text) : null; - if (!resp.ok) { - const msg = parsed?.error || parsed?.message || text || resp.statusText; - throw new Error(`${resp.status} ${msg}`); - } - return parsed; - } finally { - clearTimeout(t); + json = raw ? JSON.parse(raw) : null; + } catch (e) { + throw new Error(`Non-JSON from ${url}: ${raw}`); } + + if (!resp.ok) { + throw new Error(json?.detail || json?.error || raw); + } + + return json; } -async function memAdd(content, userId, sessionId, cortexData) { - const url = `${NEOMEM_API}/memories`; +// ----------------------------------------------------- +// The unified chat handler +// ----------------------------------------------------- +async function handleChatRequest(session_id, user_msg, mode = "cortex", backend = null) { + let reason; + + // Determine which endpoint to use based on mode + const endpoint = mode === "standard" ? CORTEX_SIMPLE : CORTEX_REASON; + const modeName = mode === "standard" ? "simple" : "reason"; + + console.log(`Relay β†’ routing to Cortex.${modeName} (mode: ${mode}${backend ? `, backend: ${backend}` : ''})`); + + // Build request payload const payload = { - messages: [{ role: "user", content }], - user_id: userId, - // run_id: sessionId, - metadata: { source: "relay", cortex: cortexData }, + session_id, + user_prompt: user_msg }; - return fetchJSON(url, { - method: "POST", - headers: { - "Content-Type": "application/json", - Authorization: `Bearer ${MEM0_API_KEY}`, - }, - body: JSON.stringify(payload), - }); -} -async function memSearch(query, userId, sessionId) { - const url = `${NEOMEM_API}/search`; - const payload = { query, user_id: userId }; - return fetchJSON(url, { - method: "POST", - headers: { - "Content-Type": "application/json", - Authorization: `Bearer ${MEM0_API_KEY}`, - }, - body: JSON.stringify(payload), - }); -} - -/* ------------------------------ - Utility to time spans ---------------------------------*/ -async function span(name, fn) { - const start = Date.now(); - try { - return await fn(); - } finally { - console.log(`${name} took ${Date.now() - start}ms`); + // Add backend parameter if provided (only for standard mode) + if (backend && mode === "standard") { + payload.backend = backend; } + + // Call appropriate Cortex endpoint + try { + reason = await postJSON(endpoint, payload); + } catch (e) { + console.error(`Relay β†’ Cortex.${modeName} error:`, e.message); + throw new Error(`cortex_${modeName}_failed: ${e.message}`); + } + + // Correct persona field + const persona = + reason.persona || + reason.final_output || + "(no persona text)"; + + // Return final answer + return { + session_id, + reply: persona + }; } -/* ------------------------------ - Healthcheck ---------------------------------*/ -app.get("/_health", (req, res) => { - res.json({ ok: true, time: new Date().toISOString() }); -}); - -/* ------------------------------ - Sessions ---------------------------------*/ -// List all saved sessions -app.get("/sessions", (_, res) => { - const list = fs.readdirSync(sessionsDir) - .filter(f => f.endsWith(".json")) - .map(f => f.replace(".json", "")); - res.json(list); -}); - -// Load a single session -app.get("/sessions/:id", (req, res) => { - const file = path.join(sessionsDir, `${req.params.id}.json`); - if (!fs.existsSync(file)) return res.json([]); - res.json(JSON.parse(fs.readFileSync(file, "utf8"))); -}); - -// Save or update a session -app.post("/sessions/:id", (req, res) => { - const file = path.join(sessionsDir, `${req.params.id}.json`); - fs.writeFileSync(file, JSON.stringify(req.body, null, 2)); +// ----------------------------------------------------- +// HEALTHCHECK +// ----------------------------------------------------- +app.get("/_health", (_, res) => { res.json({ ok: true }); }); -/* ------------------------------ - Chat completion endpoint ---------------------------------*/ +// ----------------------------------------------------- +// OPENAI-COMPATIBLE ENDPOINT +// ----------------------------------------------------- app.post("/v1/chat/completions", async (req, res) => { try { - const { model, messages, sessionId: clientSessionId } = req.body || {}; - if (!Array.isArray(messages) || !messages.length) { - return res.status(400).json({ error: "invalid_messages" }); - } - if (!model || typeof model !== "string") { - return res.status(400).json({ error: "invalid_model" }); + const session_id = req.body.session_id || req.body.sessionId || req.body.user || "default"; + const messages = req.body.messages || []; + const lastMessage = messages[messages.length - 1]; + const user_msg = lastMessage?.content || ""; + const mode = req.body.mode || "cortex"; // Get mode from request, default to cortex + const backend = req.body.backend || null; // Get backend preference + + if (!user_msg) { + return res.status(400).json({ error: "No message content provided" }); } - const sessionId = clientSessionId || "default"; - const userId = "brian"; // fixed for now + console.log(`Relay (v1) β†’ received: "${user_msg}" [mode: ${mode}${backend ? `, backend: ${backend}` : ''}]`); - console.log(`πŸ›°οΈ Incoming request. Session: ${sessionId}`); + const result = await handleChatRequest(session_id, user_msg, mode, backend); - // Find last user message efficiently - const lastUserMsg = [...messages].reverse().find(m => m.role === "user")?.content; - if (!lastUserMsg) { - return res.status(400).json({ error: "no_user_message" }); - } - - // 1. Cortex Reflection (new pipeline) - /*let reflection = {}; - try { - console.log("🧠 Reflecting with Cortex..."); - const memoriesPreview = []; // we'll fill this in later with memSearch - reflection = await reflectWithCortex(lastUserMsg, memoriesPreview); - console.log("πŸ” Reflection:", reflection); - } catch (err) { - console.warn("⚠️ Cortex reflect failed:", err.message); - reflection = { error: err.message }; - }*/ - - // 2. Search memories - /* let memorySnippets = []; - await span("mem.search", async () => { - if (NEOMEM_API && MEM0_API_KEY) { - try { - const { results } = await memSearch(lastUserMsg, userId, sessionId); - if (results?.length) { - console.log(`πŸ“š Mem0 hits: ${results.length}`); - results.forEach((r, i) => - console.log(` ${i + 1}) ${r.memory} (score ${Number(r.score).toFixed(3)})`) - ); - memorySnippets = results.map((r, i) => `${i + 1}) ${r.memory}`); - } else { - console.log("😴 No memories found"); - } - } catch (e) { - console.warn("⚠️ mem.search failed:", e.message); - } + res.json({ + id: `chatcmpl-${Date.now()}`, + object: "chat.completion", + created: Math.floor(Date.now() / 1000), + model: "lyra", + choices: [{ + index: 0, + message: { + role: "assistant", + content: result.reply + }, + finish_reason: "stop" + }], + usage: { + prompt_tokens: 0, + completion_tokens: 0, + total_tokens: 0 } - });*/ - - // 3. Fetch persona -/* let personaText = "Persona: Lyra πŸ€– friendly, concise, poker-savvy."; - await span("persona.fetch", async () => { - try { - if (PERSONA_URL) { - const data = await fetchJSON(PERSONA_URL); - if (data?.persona) { - const name = data.persona.name ?? "Lyra"; - const style = data.persona.style ?? "friendly, concise"; - const protocols = Array.isArray(data.persona.protocols) ? data.persona.protocols.join(", ") : ""; - personaText = `Persona: ${name} πŸ€– ${style}. Protocols: ${protocols}`.trim(); - } - } - } catch (err) { - console.error("πŸ’₯ persona.fetch failed", err); - } - }); */ - - // 1. Ask Cortex to build the final prompt - let cortexPrompt = ""; - try { - console.log("🧠 Requesting prompt from Cortex..."); - const response = await fetch(`${process.env.CORTEX_API_URL || "http://10.0.0.41:7081"}/reason`, { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ - prompt: lastUserMsg, - session_id: sessionId, - user_id: userId - }) - }); - const data = await response.json(); - cortexPrompt = data.full_prompt || data.prompt || ""; - console.log("🧩 Cortex returned prompt"); - } catch (err) { - console.warn("⚠️ Cortex prompt build failed:", err.message); - } - - - // 4. Build final messages - const injectedMessages = [ - { role: "system", content: cortexPrompt || "You are Lyra." }, - ...messages, - ]; - - if (debugPrompt) { - console.log("\n==== Injected Prompt ===="); - console.log(JSON.stringify(injectedMessages, null, 2)); - console.log("=========================\n"); - } - - // 5. Call LLM (OpenAI or Ollama) - const isOllama = model.startsWith("ollama:"); - const llmUrl = isOllama - ? `${OLLAMA_URL}/api/chat` - : "https://api.openai.com/v1/chat/completions"; - - const llmHeaders = isOllama - ? { "Content-Type": "application/json" } - : { - "Content-Type": "application/json", - Authorization: `Bearer ${OPENAI_API_KEY}`, - }; - - const llmBody = { - model: isOllama ? model.replace("ollama:", "") : model, - messages: injectedMessages, // <-- make sure injectedMessages is defined above this section - stream: false, - }; - - const data = await fetchJSON(llmUrl, { - method: "POST", - headers: llmHeaders, - body: JSON.stringify(llmBody), - }); - - // define once for everything below - const assistantReply = isOllama - ? data?.message?.content - : data?.choices?.[0]?.message?.content || data?.choices?.[0]?.text || ""; - - // 🧠 Send exchange back to Cortex for ingest - try { - await ingestToCortex(lastUserMsg, assistantReply || "", {}, sessionId); - console.log("πŸ“€ Sent exchange back to Cortex ingest"); - } catch (err) { - console.warn("⚠️ Cortex ingest failed:", err.message); - } - - // πŸ’Ύ Save exchange to session log - try { - const logFile = path.join(sessionsDir, `${sessionId}.jsonl`); - const entry = JSON.stringify({ - ts: new Date().toISOString(), - turn: [ - { role: "user", content: lastUserMsg }, - { role: "assistant", content: assistantReply || "" } - ] - }) + "\n"; - fs.appendFileSync(logFile, entry, "utf8"); - console.log(`🧠 Logged session exchange β†’ ${logFile}`); - } catch (e) { - console.warn("⚠️ Session log write failed:", e.message); - } - - // πŸ”„ Forward user↔assistant exchange to Intake summarizer - if (process.env.INTAKE_API_URL) { - try { - const intakePayload = { - session_id: sessionId, - turns: [ - { role: "user", content: lastUserMsg }, - { role: "assistant", content: assistantReply || "" } - ] - }; - - await fetch(process.env.INTAKE_API_URL, { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify(intakePayload), - }); - - console.log("πŸ“¨ Sent exchange to Intake summarizer"); - } catch (err) { - console.warn("⚠️ Intake post failed:", err.message); - } - } - - - - if (isOllama) { - res.json({ - id: "ollama-" + Date.now(), - object: "chat.completion", - created: Math.floor(Date.now() / 1000), - model, - choices: [ - { - index: 0, - message: data?.message || { role: "assistant", content: "" }, - finish_reason: "stop", - }, - ], - }); - } else { - res.json(data); - } + }); } catch (err) { - console.error("πŸ’₯ relay error", err); - res.status(500).json({ error: "relay_failed", detail: err.message }); + console.error("Relay v1 fatal:", err); + res.status(500).json({ + error: { + message: err.message || String(err), + type: "server_error", + code: "relay_failed" + } + }); } }); -/* ------------------------------ - Start server ---------------------------------*/ -app.listen(PORT, () => { - console.log(`Relay listening on port ${PORT}`); +// ----------------------------------------------------- +// MAIN ENDPOINT (Lyra-native UI) +// ----------------------------------------------------- +app.post("/chat", async (req, res) => { + try { + const session_id = req.body.session_id || "default"; + const user_msg = req.body.message || ""; + const mode = req.body.mode || "cortex"; // Get mode from request, default to cortex + const backend = req.body.backend || null; // Get backend preference + + console.log(`Relay β†’ received: "${user_msg}" [mode: ${mode}${backend ? `, backend: ${backend}` : ''}]`); + + const result = await handleChatRequest(session_id, user_msg, mode, backend); + res.json(result); + + } catch (err) { + console.error("Relay fatal:", err); + res.status(500).json({ + error: "relay_failed", + detail: err.message || String(err) + }); + } +}); + +// ----------------------------------------------------- +// SESSION ENDPOINTS (for UI) +// ----------------------------------------------------- +// Helper functions for session persistence +async function ensureSessionsDir() { + try { + await fs.mkdir(SESSIONS_DIR, { recursive: true }); + } catch (err) { + console.error("Failed to create sessions directory:", err); + } +} + +async function loadSession(sessionId) { + try { + const sessionPath = path.join(SESSIONS_DIR, `${sessionId}.json`); + const data = await fs.readFile(sessionPath, "utf-8"); + return JSON.parse(data); + } catch (err) { + // File doesn't exist or is invalid - return empty array + return []; + } +} + +async function saveSession(sessionId, history, metadata = {}) { + try { + await ensureSessionsDir(); + const sessionPath = path.join(SESSIONS_DIR, `${sessionId}.json`); + const metadataPath = path.join(SESSIONS_DIR, `${sessionId}.meta.json`); + + // Save history + await fs.writeFile(sessionPath, JSON.stringify(history, null, 2), "utf-8"); + + // Save metadata (name, etc.) + await fs.writeFile(metadataPath, JSON.stringify(metadata, null, 2), "utf-8"); + + return true; + } catch (err) { + console.error(`Failed to save session ${sessionId}:`, err); + return false; + } +} + +async function loadSessionMetadata(sessionId) { + try { + const metadataPath = path.join(SESSIONS_DIR, `${sessionId}.meta.json`); + const data = await fs.readFile(metadataPath, "utf-8"); + return JSON.parse(data); + } catch (err) { + // No metadata file, return default + return { name: sessionId }; + } +} + +async function saveSessionMetadata(sessionId, metadata) { + try { + await ensureSessionsDir(); + const metadataPath = path.join(SESSIONS_DIR, `${sessionId}.meta.json`); + await fs.writeFile(metadataPath, JSON.stringify(metadata, null, 2), "utf-8"); + return true; + } catch (err) { + console.error(`Failed to save metadata for ${sessionId}:`, err); + return false; + } +} + +async function listSessions() { + try { + await ensureSessionsDir(); + const files = await fs.readdir(SESSIONS_DIR); + const sessions = []; + + for (const file of files) { + if (file.endsWith(".json") && !file.endsWith(".meta.json")) { + const sessionId = file.replace(".json", ""); + const sessionPath = path.join(SESSIONS_DIR, file); + const stats = await fs.stat(sessionPath); + + // Try to read the session to get message count + let messageCount = 0; + try { + const data = await fs.readFile(sessionPath, "utf-8"); + const history = JSON.parse(data); + messageCount = history.length; + } catch (e) { + // Invalid JSON, skip + } + + // Load metadata (name) + const metadata = await loadSessionMetadata(sessionId); + + sessions.push({ + id: sessionId, + name: metadata.name || sessionId, + lastModified: stats.mtime, + messageCount + }); + } + } + + // Sort by last modified (newest first) + sessions.sort((a, b) => b.lastModified - a.lastModified); + return sessions; + } catch (err) { + console.error("Failed to list sessions:", err); + return []; + } +} + +async function deleteSession(sessionId) { + try { + const sessionPath = path.join(SESSIONS_DIR, `${sessionId}.json`); + const metadataPath = path.join(SESSIONS_DIR, `${sessionId}.meta.json`); + + // Delete session file + await fs.unlink(sessionPath); + + // Delete metadata file (if exists) + try { + await fs.unlink(metadataPath); + } catch (e) { + // Metadata file doesn't exist, that's ok + } + + return true; + } catch (err) { + console.error(`Failed to delete session ${sessionId}:`, err); + return false; + } +} + +// GET /sessions - List all sessions +app.get("/sessions", async (req, res) => { + const sessions = await listSessions(); + res.json(sessions); +}); + +// GET /sessions/:id - Get specific session history +app.get("/sessions/:id", async (req, res) => { + const sessionId = req.params.id; + const history = await loadSession(sessionId); + res.json(history); +}); + +// POST /sessions/:id - Save session history +app.post("/sessions/:id", async (req, res) => { + const sessionId = req.params.id; + const history = req.body; + + // Load existing metadata to preserve it + const existingMetadata = await loadSessionMetadata(sessionId); + const success = await saveSession(sessionId, history, existingMetadata); + + if (success) { + res.json({ ok: true, saved: history.length }); + } else { + res.status(500).json({ error: "Failed to save session" }); + } +}); + +// PATCH /sessions/:id/metadata - Update session metadata (name, etc.) +app.patch("/sessions/:id/metadata", async (req, res) => { + const sessionId = req.params.id; + const metadata = req.body; + const success = await saveSessionMetadata(sessionId, metadata); + + if (success) { + res.json({ ok: true, metadata }); + } else { + res.status(500).json({ error: "Failed to update metadata" }); + } +}); + +// DELETE /sessions/:id - Delete a session +app.delete("/sessions/:id", async (req, res) => { + const sessionId = req.params.id; + const success = await deleteSession(sessionId); + + if (success) { + res.json({ ok: true, deleted: sessionId }); + } else { + res.status(500).json({ error: "Failed to delete session" }); + } +}); + +// ----------------------------------------------------- +app.listen(PORT, () => { + console.log(`Relay is online on port ${PORT}`); }); diff --git a/core/relay/sessions/sess-6rxu7eia.json b/core/relay/sessions/sess-6rxu7eia.json deleted file mode 100644 index 9203b5d..0000000 --- a/core/relay/sessions/sess-6rxu7eia.json +++ /dev/null @@ -1,50 +0,0 @@ -[ - { - "role": "user", - "content": "Hey im testing stuff! how goes it?" - }, - { - "role": "assistant", - "content": "Hey! I'm here and ready to help. What are you testing?" - }, - { - "role": "user", - "content": "testing the logic loop for your brain!" - }, - { - "role": "assistant", - "content": "Sounds interesting! How can I assist you with that?" - }, - { - "role": "user", - "content": "Just keep answering me!@" - }, - { - "role": "assistant", - "content": "Absolutely! I'm here to respond. What’s next?" - }, - { - "role": "user", - "content": "how about now how are you?" - }, - { - "role": "assistant", - "content": "I'm doing well, thanks for asking! How about you?" - }, - { - "role": "user", - "content": "are you able to tell me what your prompts are saying to you?" - }, - { - "role": "assistant", - "content": "I can’t share specific internal prompts or system messages, but I can tell you that I’m designed to understand and respond to your questions as best as I can! What else would you like to know?" - }, - { - "role": "user", - "content": "ok thanks how is the cheese i gave you?" - }, - { - "role": "assistant", - "content": "I appreciate the gesture! If I could taste it, I’m sure it would be delightful. What kind of cheese was it?" - } -] \ No newline at end of file diff --git a/core/relay/sessions/sess-6rxu7eia.jsonl b/core/relay/sessions/sess-6rxu7eia.jsonl deleted file mode 100644 index 1289ca5..0000000 --- a/core/relay/sessions/sess-6rxu7eia.jsonl +++ /dev/null @@ -1,6 +0,0 @@ -{"ts":"2025-11-12T20:41:32.497Z","turn":[{"role":"user","content":"Hey im testing stuff! how goes it?"},{"role":"assistant","content":"Hey! I'm here and ready to help. What are you testing?"}]} -{"ts":"2025-11-12T20:59:32.992Z","turn":[{"role":"user","content":"testing the logic loop for your brain!"},{"role":"assistant","content":"Sounds interesting! How can I assist you with that?"}]} -{"ts":"2025-11-12T22:08:09.644Z","turn":[{"role":"user","content":"Just keep answering me!@"},{"role":"assistant","content":"Absolutely! I'm here to respond. What’s next?"}]} -{"ts":"2025-11-12T22:08:48.693Z","turn":[{"role":"user","content":"how about now how are you?"},{"role":"assistant","content":"I'm doing well, thanks for asking! How about you?"}]} -{"ts":"2025-11-12T22:10:46.435Z","turn":[{"role":"user","content":"are you able to tell me what your prompts are saying to you?"},{"role":"assistant","content":"I can’t share specific internal prompts or system messages, but I can tell you that I’m designed to understand and respond to your questions as best as I can! What else would you like to know?"}]} -{"ts":"2025-11-12T23:10:16.321Z","turn":[{"role":"user","content":"ok thanks how is the cheese i gave you?"},{"role":"assistant","content":"I appreciate the gesture! If I could taste it, I’m sure it would be delightful. What kind of cheese was it?"}]} diff --git a/core/relay/sessions/sess-l08ndm60.json b/core/relay/sessions/sess-l08ndm60.json deleted file mode 100644 index cd58615..0000000 --- a/core/relay/sessions/sess-l08ndm60.json +++ /dev/null @@ -1,122 +0,0 @@ -[ - { - "role": "user", - "content": "Yo yo yo wusup" - }, - { - "role": "assistant", - "content": "Hey Brian! Not much, just here to help you out. What's up with you?" - }, - { - "role": "user", - "content": "werkin on codin'" - }, - { - "role": "assistant", - "content": "Nice! What are you working on? Need any help with coding or anything related to your projects?" - }, - { - "role": "user", - "content": "working on a little program that saves our chats so you can search them again in different chats" - }, - { - "role": "assistant", - "content": "That sounds like a great idea! Having a program to save and search through our chats could really enhance the way you interact with Lyra. Do you have a specific approach in mind for how you'll implement the search functionality?" - }, - { - "role": "user", - "content": "just adding a simple logger into the code you run on" - }, - { - "role": "assistant", - "content": "That sounds like a solid plan! Implementing a simple logger can make it easier to keep track of conversations. Are you thinking of any specific features for the search functionality, like filtering by keywords or date?" - }, - { - "role": "user", - "content": "i'll like use a basic word search, possibly integrate it into a rag system i have built" - }, - { - "role": "assistant", - "content": "Integrating a basic word search into your RAG system sounds like a smart approach! It could enhance the way you retrieve specific chats. Are you planning to use any specific libraries or tools for your RAG system?" - }, - { - "role": "user", - "content": "no clue!" - }, - { - "role": "assistant", - "content": "No worries! If you need any suggestions on libraries or tools that could help you with implementing the RAG system or the word search feature, just let me know. I'm here to help!" - }, - { - "role": "user", - "content": "just having some coding issues, i think i was editing the wrong version of a file LOL" - }, - { - "role": "assistant", - "content": "That can definitely happen! It can be tricky to keep track of different versions. Have you made any backups or version control for your projects? That might help avoid mix-ups in the future!" - }, - { - "role": "user", - "content": "i have everything syncing back to my main dev computer, i just do it manually. i have no idea how to use git" - }, - { - "role": "assistant", - "content": "(no reply)" - }, - { - "role": "user", - "content": "i have everything syncing back to my main dev computer, i just do it manually. i have no idea how to use git" - }, - { - "role": "assistant", - "content": "(no reply)" - }, - { - "role": "user", - "content": "i have everything syncing back to my main dev computer, i just do it manually. i have no idea how to use git" - }, - { - "role": "assistant", - "content": "Manually syncing is a good start, but learning Git can really help streamline your workflow and keep track of changes. If you're interested, I can provide some resources or tips on how to get started with Git! It can make managing different versions of your code much easier." - }, - { - "role": "user", - "content": "what is the point of git? how can it streamline my work?" - }, - { - "role": "assistant", - "content": "Git is a version control system that helps you keep track of changes in your code over time. Here are some ways it can streamline your work:\n\n1. **Version Tracking**: Git allows you to save versions of your code so you can revert back to previous versions if needed. This is especially helpful if you make a change that breaks something.\n\n2. **Collaboration**: If you ever work with others, Git makes it easy to collaborate on projects. You can work on separate branches, merge changes, and resolve conflicts.\n\n3. **Branching**: You can create branches for new features or experiments without affecting the main codebase. Once you're satisfied with the changes, you can merge them back.\n\n4. **Snapshotting**: Git takes snapshots of your code at different points in time, so you can easily see what your code looked like at any point.\n\n5. **Easy Backup**: By pushing your code to a remote repository (like GitHub or GitLab), you can back up your work and access it from anywhere.\n\nLearning Git might take a bit of time, but it can greatly enhance your coding efficiency and help you manage your projects better. If you're interested, I can point you to some beginner-friendly tutorials!" - }, - { - "role": "user", - "content": "say i am writing one program on my main computer, and then im working on one that lives in a VM. Can i put them both in the same git?" - }, - { - "role": "assistant", - "content": "Yes, you can use the same Git repository for your code on both your main computer and the VM. Here’s how you can do it:\n\n1. **Create a Git Repository**: You can create a Git repository on your main computer for your program. Use the command `git init` in your project's directory to initialize it.\n\n2. **Commit Changes**: As you make changes to your code on your main computer, you can use `git add .` to stage changes and `git commit -m \"your message\"` to commit them.\n\n3. **Clone the Repository on your VM**: You can then clone the Git repository on your VM using `git clone `. This gives you a copy of the same repository on your VM.\n\n4. **Work on Both**: You can now work on either machine. When you make changes on one, commit those changes, and then pull the latest changes from the other machine to keep everything in sync.\n\n5. **Push and Pull**: Use `git push` to push your changes to a remote repository (like GitHub) if you're using one. Use `git pull` on the other machine to pull the latest code.\n\nJust be careful about making sure you commit and push your changes before switching between machines to avoid conflicts. This setup allows you to maintain a single version of your code across both systems. If you need help with specific commands or setups, just let me know!" - }, - { - "role": "user", - "content": "hmm i guess that might be useful. Im writting code that is running on a VM. I want to test and experiment while i write the code. The VM is headless though so i've been doing all my editing and work via SSH and SFTP." - }, - { - "role": "assistant", - "content": "Using a VM and working through SSH and SFTP is a solid way to manage your coding environment! Git can definitely be beneficial in this setup as well. Here’s how it can help:\n\n1. **Easier Code Management**: With Git, you can track changes to your code directly within the VM, making it easier to manage versions as you experiment.\n\n2. **Experiment Branches**: You can create branches for different experiments. This lets you test new features or fixes without affecting your main code, and you can easily switch back and forth between branches.\n\n3. **Collaborative Testing**: If you ever work with others or want to test things collaboratively, you can push changes to a remote repository, making it easy for others to pull and test the latest code.\n\n4. **Integrated with SSH**: You can run Git commands directly from your SSH terminal on the VM, making it straightforward to use. You won’t need any additional tools.\n\n5. **Seamless Remote Access**: Since you’re using SFTP, you can clone the repository to your VM. This way, you can easily pull updates you've made on your main computer, or push changes made on the VM back to the main repository.\n\nOverall, integrating Git into your workflow can enhance your coding experience while working in a headless environment. If you decide to go for it, I can help with the initial setup or any Git-related questions you might have!" - }, - { - "role": "user", - "content": "so is git like a compressed version of a directory?" - }, - { - "role": "assistant", - "content": "Not exactly, but that's a good way to think about some of its features! Here's a more precise breakdown:\n\n1. **Snapshots, Not Compression**: Git takes \"snapshots\" of your files at different points in time, meaning it records a full version of your directory's contents every time you make a commit. While this does involve storing the files, it doesn't compress them in the traditional senseβ€”Git keeps track of changes efficiently, so only the differences between versions are stored after the first snapshot.\n\n2. **Directory Structure**: Git is designed to track changes in your source code, including the entire directory structure, file contents, and metadata. It allows you to manage multiple versions of a directory and the files within it.\n\n3. **Branching and Merging**: Git allows you to create branches, which can be thought of as \"parallel versions\" of your directory. Each branch can have different sets of changes, and you can merge these branches to integrate different features or fixes.\n\n4. **History Tracking**: Git also maintains a history of changes, so you can navigate through previous versions of your directory. This includes seeing what changes were made, who made them, and when they occurred.\n\nThe combination of these features makes Git a powerful tool for version control, allowing you to manage your projects and code changes more effectively. If you're curious about any specific aspect of Git or how it works, feel free to ask!" - }, - { - "role": "user", - "content": "hmm i still dont see the point of it. atleast not for this project. This project is mostly about learning how shit works, im not planning to push this as a product or collab with anyone" - }, - { - "role": "assistant", - "content": "That makes total sense! If this project is primarily for learning and experimentation, you might not need the full functionality of Git right now. Many developers start without it, especially when they’re working on smaller projects or learning something new.\n\nHere are a few reasons you might still consider using Git, even for personal learning projects:\n\n1. **Tracking Changes**: It can help you understand what changes you made over time. You can easily go back to a previous point if something breaks or if you want to see how you arrived at your current solution.\n\n2. **Learning Opportunity**: Getting familiar with Git now can make it easier when you start working on larger projects or collaborating in the future. It's a valuable skill in the software development world.\n\n3. **Experimentation**: If you're trying out different ideas, branches can help you manage experiments without affecting your main work.\n\nHowever, if you're comfortable managing your changes manually and don’t see the benefit for this particular project, that’s completely okay! Focus on what helps you learn best. If you have specific goals or areas you want to explore, I’m here to help with that too!" - } -] \ No newline at end of file diff --git a/core/relay/sessions/sess-l08ndm60.jsonl b/core/relay/sessions/sess-l08ndm60.jsonl deleted file mode 100644 index 7c5fef5..0000000 --- a/core/relay/sessions/sess-l08ndm60.jsonl +++ /dev/null @@ -1,6 +0,0 @@ -{"ts":"2025-11-12T20:18:06.822Z","turn":[{"role":"user","content":"i have everything syncing back to my main dev computer, i just do it manually. i have no idea how to use git"},{"role":"assistant","content":"Manually syncing is a good start, but learning Git can really help streamline your workflow and keep track of changes. If you're interested, I can provide some resources or tips on how to get started with Git! It can make managing different versions of your code much easier."}]} -{"ts":"2025-11-12T20:29:20.003Z","turn":[{"role":"user","content":"what is the point of git? how can it streamline my work?"},{"role":"assistant","content":"Git is a version control system that helps you keep track of changes in your code over time. Here are some ways it can streamline your work:\n\n1. **Version Tracking**: Git allows you to save versions of your code so you can revert back to previous versions if needed. This is especially helpful if you make a change that breaks something.\n\n2. **Collaboration**: If you ever work with others, Git makes it easy to collaborate on projects. You can work on separate branches, merge changes, and resolve conflicts.\n\n3. **Branching**: You can create branches for new features or experiments without affecting the main codebase. Once you're satisfied with the changes, you can merge them back.\n\n4. **Snapshotting**: Git takes snapshots of your code at different points in time, so you can easily see what your code looked like at any point.\n\n5. **Easy Backup**: By pushing your code to a remote repository (like GitHub or GitLab), you can back up your work and access it from anywhere.\n\nLearning Git might take a bit of time, but it can greatly enhance your coding efficiency and help you manage your projects better. If you're interested, I can point you to some beginner-friendly tutorials!"}]} -{"ts":"2025-11-12T20:30:55.329Z","turn":[{"role":"user","content":"say i am writing one program on my main computer, and then im working on one that lives in a VM. Can i put them both in the same git?"},{"role":"assistant","content":"Yes, you can use the same Git repository for your code on both your main computer and the VM. Here’s how you can do it:\n\n1. **Create a Git Repository**: You can create a Git repository on your main computer for your program. Use the command `git init` in your project's directory to initialize it.\n\n2. **Commit Changes**: As you make changes to your code on your main computer, you can use `git add .` to stage changes and `git commit -m \"your message\"` to commit them.\n\n3. **Clone the Repository on your VM**: You can then clone the Git repository on your VM using `git clone `. This gives you a copy of the same repository on your VM.\n\n4. **Work on Both**: You can now work on either machine. When you make changes on one, commit those changes, and then pull the latest changes from the other machine to keep everything in sync.\n\n5. **Push and Pull**: Use `git push` to push your changes to a remote repository (like GitHub) if you're using one. Use `git pull` on the other machine to pull the latest code.\n\nJust be careful about making sure you commit and push your changes before switching between machines to avoid conflicts. This setup allows you to maintain a single version of your code across both systems. If you need help with specific commands or setups, just let me know!"}]} -{"ts":"2025-11-12T20:33:16.511Z","turn":[{"role":"user","content":"hmm i guess that might be useful. Im writting code that is running on a VM. I want to test and experiment while i write the code. The VM is headless though so i've been doing all my editing and work via SSH and SFTP."},{"role":"assistant","content":"Using a VM and working through SSH and SFTP is a solid way to manage your coding environment! Git can definitely be beneficial in this setup as well. Here’s how it can help:\n\n1. **Easier Code Management**: With Git, you can track changes to your code directly within the VM, making it easier to manage versions as you experiment.\n\n2. **Experiment Branches**: You can create branches for different experiments. This lets you test new features or fixes without affecting your main code, and you can easily switch back and forth between branches.\n\n3. **Collaborative Testing**: If you ever work with others or want to test things collaboratively, you can push changes to a remote repository, making it easy for others to pull and test the latest code.\n\n4. **Integrated with SSH**: You can run Git commands directly from your SSH terminal on the VM, making it straightforward to use. You won’t need any additional tools.\n\n5. **Seamless Remote Access**: Since you’re using SFTP, you can clone the repository to your VM. This way, you can easily pull updates you've made on your main computer, or push changes made on the VM back to the main repository.\n\nOverall, integrating Git into your workflow can enhance your coding experience while working in a headless environment. If you decide to go for it, I can help with the initial setup or any Git-related questions you might have!"}]} -{"ts":"2025-11-12T20:34:30.196Z","turn":[{"role":"user","content":"so is git like a compressed version of a directory?"},{"role":"assistant","content":"Not exactly, but that's a good way to think about some of its features! Here's a more precise breakdown:\n\n1. **Snapshots, Not Compression**: Git takes \"snapshots\" of your files at different points in time, meaning it records a full version of your directory's contents every time you make a commit. While this does involve storing the files, it doesn't compress them in the traditional senseβ€”Git keeps track of changes efficiently, so only the differences between versions are stored after the first snapshot.\n\n2. **Directory Structure**: Git is designed to track changes in your source code, including the entire directory structure, file contents, and metadata. It allows you to manage multiple versions of a directory and the files within it.\n\n3. **Branching and Merging**: Git allows you to create branches, which can be thought of as \"parallel versions\" of your directory. Each branch can have different sets of changes, and you can merge these branches to integrate different features or fixes.\n\n4. **History Tracking**: Git also maintains a history of changes, so you can navigate through previous versions of your directory. This includes seeing what changes were made, who made them, and when they occurred.\n\nThe combination of these features makes Git a powerful tool for version control, allowing you to manage your projects and code changes more effectively. If you're curious about any specific aspect of Git or how it works, feel free to ask!"}]} -{"ts":"2025-11-12T20:40:33.207Z","turn":[{"role":"user","content":"hmm i still dont see the point of it. atleast not for this project. This project is mostly about learning how shit works, im not planning to push this as a product or collab with anyone"},{"role":"assistant","content":"That makes total sense! If this project is primarily for learning and experimentation, you might not need the full functionality of Git right now. Many developers start without it, especially when they’re working on smaller projects or learning something new.\n\nHere are a few reasons you might still consider using Git, even for personal learning projects:\n\n1. **Tracking Changes**: It can help you understand what changes you made over time. You can easily go back to a previous point if something breaks or if you want to see how you arrived at your current solution.\n\n2. **Learning Opportunity**: Getting familiar with Git now can make it easier when you start working on larger projects or collaborating in the future. It's a valuable skill in the software development world.\n\n3. **Experimentation**: If you're trying out different ideas, branches can help you manage experiments without affecting your main work.\n\nHowever, if you're comfortable managing your changes manually and don’t see the benefit for this particular project, that’s completely okay! Focus on what helps you learn best. If you have specific goals or areas you want to explore, I’m here to help with that too!"}]} diff --git a/core/ui/index.html b/core/ui/index.html index 299c193..ac2e8e8 100644 --- a/core/ui/index.html +++ b/core/ui/index.html @@ -13,14 +13,50 @@ -
- -
- - + + +
+ +
+

Session

+ + + +
+ +
+

Actions

+ + + + +
+
+ +
+ +
+ + + + +
@@ -32,6 +68,7 @@ +
@@ -43,6 +80,24 @@
+ + +
@@ -50,8 +105,59 @@
+ + + diff --git a/core/ui/style.css b/core/ui/style.css index 3d62da9..c921d0d 100644 --- a/core/ui/style.css +++ b/core/ui/style.css @@ -8,6 +8,26 @@ --font-console: "IBM Plex Mono", monospace; } +/* Light mode variables */ +body { + --bg-dark: #f5f5f5; + --bg-panel: rgba(255, 115, 0, 0.05); + --accent: #ff6600; + --accent-glow: 0 0 12px #ff6600cc; + --text-main: #1a1a1a; + --text-fade: #666; +} + +/* Dark mode variables */ +body.dark { + --bg-dark: #0a0a0a; + --bg-panel: rgba(255, 115, 0, 0.1); + --accent: #ff6600; + --accent-glow: 0 0 12px #ff6600cc; + --text-main: #e6e6e6; + --text-fade: #999; +} + body { margin: 0; background: var(--bg-dark); @@ -28,7 +48,7 @@ body { border: 1px solid var(--accent); border-radius: 10px; box-shadow: var(--accent-glow); - background: linear-gradient(180deg, rgba(255,102,0,0.05) 0%, rgba(0,0,0,0.9) 100%); + background: var(--bg-dark); overflow: hidden; } @@ -61,6 +81,16 @@ button:hover, select:hover { cursor: pointer; } +#thinkingStreamBtn { + background: rgba(138, 43, 226, 0.2); + border-color: #8a2be2; +} + +#thinkingStreamBtn:hover { + box-shadow: 0 0 8px #8a2be2; + background: rgba(138, 43, 226, 0.3); +} + /* Chat area */ #messages { flex: 1; @@ -153,8 +183,8 @@ button:hover, select:hover { /* Dropdown (session selector) styling */ select { - background-color: #1a1a1a; - color: #f5f5f5; + background-color: var(--bg-dark); + color: var(--text-main); border: 1px solid #b84a12; border-radius: 6px; padding: 4px 6px; @@ -162,8 +192,8 @@ select { } select option { - background-color: #1a1a1a; - color: #f5f5f5; + background-color: var(--bg-dark); + color: var(--text-main); } /* Hover/focus for better visibility */ @@ -171,5 +201,709 @@ select:focus, select:hover { outline: none; border-color: #ff7a33; - background-color: #222; + background-color: var(--bg-panel); +} + +/* Settings Modal */ +.modal { + display: none !important; + position: fixed; + top: 0; + left: 0; + width: 100%; + height: 100%; + z-index: 1000; +} + +.modal.show { + display: block !important; +} + +.modal-overlay { + position: fixed; + top: 0; + left: 0; + width: 100%; + height: 100%; + background: rgba(0, 0, 0, 0.8); + backdrop-filter: blur(4px); + z-index: 999; +} + +.modal-content { + position: fixed; + top: 50%; + left: 50%; + transform: translate(-50%, -50%); + background: linear-gradient(180deg, rgba(255,102,0,0.1) 0%, rgba(10,10,10,0.95) 100%); + border: 2px solid var(--accent); + border-radius: 12px; + box-shadow: var(--accent-glow), 0 0 40px rgba(255,102,0,0.3); + min-width: 400px; + max-width: 600px; + max-height: 80vh; + overflow-y: auto; + z-index: 1001; +} + +.modal-header { + display: flex; + justify-content: space-between; + align-items: center; + padding: 16px 20px; + border-bottom: 1px solid var(--accent); + background: rgba(255,102,0,0.1); +} + +.modal-header h3 { + margin: 0; + font-size: 1.2rem; + color: var(--accent); +} + +.close-btn { + background: transparent; + border: none; + color: var(--accent); + font-size: 1.5rem; + cursor: pointer; + padding: 0; + width: 30px; + height: 30px; + display: flex; + align-items: center; + justify-content: center; + border-radius: 4px; +} + +.close-btn:hover { + background: rgba(255,102,0,0.2); + box-shadow: 0 0 8px var(--accent); +} + +.modal-body { + padding: 20px; +} + +.settings-section h4 { + margin: 0 0 8px 0; + color: var(--accent); + font-size: 1rem; +} + +.settings-desc { + margin: 0 0 16px 0; + color: var(--text-fade); + font-size: 0.85rem; +} + +.radio-group { + display: flex; + flex-direction: column; + gap: 12px; +} + +.radio-label { + display: flex; + flex-direction: column; + padding: 12px; + border: 1px solid rgba(255,102,0,0.3); + border-radius: 6px; + background: rgba(255,102,0,0.05); + cursor: pointer; + transition: all 0.2s; +} + +.radio-label:hover { + border-color: var(--accent); + background: rgba(255,102,0,0.1); + box-shadow: 0 0 8px rgba(255,102,0,0.3); +} + +.radio-label input[type="radio"] { + margin-right: 8px; + accent-color: var(--accent); +} + +.radio-label span { + font-weight: 500; + margin-bottom: 4px; +} + +.radio-label small { + color: var(--text-fade); + font-size: 0.8rem; + margin-left: 24px; +} + +.radio-label input[type="text"] { + margin-top: 8px; + margin-left: 24px; + padding: 6px; + background: rgba(0,0,0,0.3); + border: 1px solid rgba(255,102,0,0.5); + border-radius: 4px; + color: var(--text-main); + font-family: var(--font-console); +} + +.radio-label input[type="text"]:focus { + outline: none; + border-color: var(--accent); + box-shadow: 0 0 8px rgba(255,102,0,0.3); +} + +.modal-footer { + display: flex; + justify-content: flex-end; + gap: 10px; + padding: 16px 20px; + border-top: 1px solid var(--accent); + background: rgba(255,102,0,0.05); +} + +.primary-btn { + background: var(--accent); + color: #000; + font-weight: bold; +} + +.primary-btn:hover { + background: #ff7a33; + box-shadow: var(--accent-glow); +} + +/* Session List */ +.session-list { + display: flex; + flex-direction: column; + gap: 8px; + max-height: 300px; + overflow-y: auto; +} + +.session-item { + display: flex; + justify-content: space-between; + align-items: center; + padding: 12px; + border: 1px solid rgba(255,102,0,0.3); + border-radius: 6px; + background: rgba(255,102,0,0.05); + transition: all 0.2s; +} + +.session-item:hover { + border-color: var(--accent); + background: rgba(255,102,0,0.1); +} + +.session-info { + display: flex; + flex-direction: column; + gap: 4px; + flex: 1; +} + +.session-info strong { + color: var(--text-main); + font-size: 0.95rem; +} + +.session-info small { + color: var(--text-fade); + font-size: 0.75rem; +} + +.session-delete-btn { + background: transparent; + border: 1px solid rgba(255,102,0,0.5); + color: var(--accent); + padding: 6px 10px; + border-radius: 4px; + cursor: pointer; + font-size: 1rem; + transition: all 0.2s; +} + +.session-delete-btn:hover { + background: rgba(255,0,0,0.2); + border-color: #ff3333; + color: #ff3333; + box-shadow: 0 0 8px rgba(255,0,0,0.3); +} + +/* Thinking Stream Panel */ +.thinking-panel { + border-top: 1px solid var(--accent); + background: rgba(255, 102, 0, 0.02); + display: flex; + flex-direction: column; + transition: max-height 0.3s ease; + max-height: 300px; +} + +.thinking-panel.collapsed { + max-height: 40px; +} + +.thinking-header { + display: flex; + justify-content: space-between; + align-items: center; + padding: 10px 12px; + background: rgba(255, 102, 0, 0.08); + cursor: pointer; + user-select: none; + border-bottom: 1px solid rgba(255, 102, 0, 0.2); + font-size: 0.9rem; + font-weight: 500; +} + +.thinking-header:hover { + background: rgba(255, 102, 0, 0.12); +} + +.thinking-controls { + display: flex; + align-items: center; + gap: 8px; +} + +.thinking-status-dot { + width: 8px; + height: 8px; + border-radius: 50%; + background: #666; + display: inline-block; +} + +.thinking-status-dot.connected { + background: #00ff66; + box-shadow: 0 0 8px #00ff66; +} + +.thinking-status-dot.disconnected { + background: #ff3333; +} + +.thinking-clear-btn, +.thinking-toggle-btn { + background: transparent; + border: 1px solid rgba(255, 102, 0, 0.5); + color: var(--text-main); + padding: 4px 8px; + border-radius: 4px; + cursor: pointer; + font-size: 0.85rem; +} + +.thinking-clear-btn:hover, +.thinking-toggle-btn:hover { + background: rgba(255, 102, 0, 0.2); + box-shadow: 0 0 6px rgba(255, 102, 0, 0.3); +} + +.thinking-toggle-btn { + transition: transform 0.3s ease; +} + +.thinking-panel.collapsed .thinking-toggle-btn { + transform: rotate(-90deg); +} + +.thinking-content { + flex: 1; + overflow-y: auto; + padding: 12px; + display: flex; + flex-direction: column; + gap: 8px; + min-height: 0; +} + +.thinking-panel.collapsed .thinking-content { + display: none; +} + +.thinking-empty { + text-align: center; + padding: 40px 20px; + color: var(--text-fade); + font-size: 0.85rem; +} + +.thinking-empty-icon { + font-size: 2rem; + margin-bottom: 10px; +} + +.thinking-event { + padding: 8px 12px; + border-radius: 6px; + font-size: 0.85rem; + font-family: 'Courier New', monospace; + animation: thinkingSlideIn 0.3s ease-out; + border-left: 3px solid; + word-wrap: break-word; +} + +@keyframes thinkingSlideIn { + from { + opacity: 0; + transform: translateY(-10px); + } + to { + opacity: 1; + transform: translateY(0); + } +} + +.thinking-event-connected { + background: rgba(0, 255, 102, 0.1); + border-color: #00ff66; + color: #00ff66; +} + +.thinking-event-thinking { + background: rgba(138, 43, 226, 0.1); + border-color: #8a2be2; + color: #c79cff; +} + +.thinking-event-tool_call { + background: rgba(255, 165, 0, 0.1); + border-color: #ffa500; + color: #ffb84d; +} + +.thinking-event-tool_result { + background: rgba(0, 191, 255, 0.1); + border-color: #00bfff; + color: #7dd3fc; +} + +.thinking-event-done { + background: rgba(168, 85, 247, 0.1); + border-color: #a855f7; + color: #e9d5ff; + font-weight: bold; +} + +.thinking-event-error { + background: rgba(255, 51, 51, 0.1); + border-color: #ff3333; + color: #fca5a5; +} + +.thinking-event-icon { + display: inline-block; + margin-right: 8px; +} + +.thinking-event-details { + font-size: 0.75rem; + color: var(--text-fade); + margin-top: 4px; + padding-left: 20px; + white-space: pre-wrap; + max-height: 100px; + overflow-y: auto; +} + +/* ========== MOBILE RESPONSIVE STYLES ========== */ + +/* Hamburger Menu */ +.hamburger-menu { + display: none; + flex-direction: column; + gap: 4px; + cursor: pointer; + padding: 8px; + border: 1px solid var(--accent); + border-radius: 4px; + background: transparent; + z-index: 100; +} + +.hamburger-menu span { + width: 20px; + height: 2px; + background: var(--accent); + transition: all 0.3s; + display: block; +} + +.hamburger-menu.active span:nth-child(1) { + transform: rotate(45deg) translate(5px, 5px); +} + +.hamburger-menu.active span:nth-child(2) { + opacity: 0; +} + +.hamburger-menu.active span:nth-child(3) { + transform: rotate(-45deg) translate(5px, -5px); +} + +/* Mobile Menu Container */ +.mobile-menu { + display: none; + position: fixed; + top: 0; + left: -100%; + width: 280px; + height: 100vh; + background: var(--bg-dark); + border-right: 2px solid var(--accent); + box-shadow: var(--accent-glow); + z-index: 999; + transition: left 0.3s ease; + overflow-y: auto; + padding: 20px; + flex-direction: column; + gap: 16px; +} + +.mobile-menu.open { + left: 0; +} + +.mobile-menu-overlay { + display: none; + position: fixed; + top: 0; + left: 0; + width: 100%; + height: 100%; + background: rgba(0, 0, 0, 0.7); + z-index: 998; +} + +.mobile-menu-overlay.show { + display: block; +} + +.mobile-menu-section { + display: flex; + flex-direction: column; + gap: 8px; + padding-bottom: 16px; + border-bottom: 1px solid rgba(255, 102, 0, 0.3); +} + +.mobile-menu-section:last-child { + border-bottom: none; +} + +.mobile-menu-section h4 { + margin: 0; + color: var(--accent); + font-size: 0.9rem; + text-transform: uppercase; + letter-spacing: 1px; +} + +.mobile-menu button, +.mobile-menu select { + width: 100%; + padding: 10px; + font-size: 0.95rem; + text-align: left; +} + +/* Mobile Breakpoints */ +@media screen and (max-width: 768px) { + body { + padding: 0; + } + + #chat { + width: 100%; + max-width: 100%; + height: 100vh; + border-radius: 0; + border-left: none; + border-right: none; + } + + /* Show hamburger, hide desktop header controls */ + .hamburger-menu { + display: flex; + } + + #model-select { + padding: 12px; + justify-content: space-between; + } + + /* Hide all controls except hamburger on mobile */ + #model-select > *:not(.hamburger-menu) { + display: none; + } + + #session-select { + display: none; + } + + /* Show mobile menu */ + .mobile-menu { + display: flex; + } + + /* Messages - more width on mobile */ + .msg { + max-width: 90%; + font-size: 0.95rem; + } + + /* Status bar */ + #status { + padding: 10px 12px; + font-size: 0.85rem; + } + + /* Input area - bigger touch targets */ + #input { + padding: 12px; + } + + #userInput { + font-size: 16px; /* Prevents zoom on iOS */ + padding: 12px; + } + + #sendBtn { + padding: 12px 16px; + font-size: 1rem; + } + + /* Modal - full width on mobile */ + .modal-content { + width: 95%; + min-width: unset; + max-width: unset; + max-height: 90vh; + top: 50%; + left: 50%; + transform: translate(-50%, -50%); + } + + .modal-header { + padding: 12px 16px; + } + + .modal-body { + padding: 16px; + } + + .modal-footer { + padding: 12px 16px; + flex-wrap: wrap; + } + + .modal-footer button { + flex: 1; + min-width: 120px; + } + + /* Radio labels - stack better on mobile */ + .radio-label { + padding: 10px; + } + + .radio-label small { + margin-left: 20px; + font-size: 0.75rem; + } + + /* Session list */ + .session-item { + padding: 10px; + } + + .session-info strong { + font-size: 0.9rem; + } + + .session-info small { + font-size: 0.7rem; + } + + /* Settings button in header */ + #settingsBtn { + padding: 8px 12px; + } + + /* Thinking panel adjustments for mobile */ + .thinking-panel { + max-height: 250px; + } + + .thinking-panel.collapsed { + max-height: 38px; + } + + .thinking-header { + padding: 8px 10px; + font-size: 0.85rem; + } + + .thinking-event { + font-size: 0.8rem; + padding: 6px 10px; + } + + .thinking-event-details { + font-size: 0.7rem; + max-height: 80px; + } +} + +/* Extra small devices (phones in portrait) */ +@media screen and (max-width: 480px) { + .mobile-menu { + width: 240px; + } + + .msg { + max-width: 95%; + font-size: 0.9rem; + padding: 8px 12px; + } + + #userInput { + font-size: 16px; + padding: 10px; + } + + #sendBtn { + padding: 10px 14px; + font-size: 0.95rem; + } + + .modal-header h3 { + font-size: 1.1rem; + } + + .settings-section h4 { + font-size: 0.95rem; + } + + .radio-label span { + font-size: 0.9rem; + } +} + +/* Tablet landscape and desktop */ +@media screen and (min-width: 769px) { + /* Ensure mobile menu is hidden on desktop */ + .mobile-menu, + .mobile-menu-overlay { + display: none !important; + } + + .hamburger-menu { + display: none !important; + } } diff --git a/core/ui/thinking-stream.html b/core/ui/thinking-stream.html new file mode 100644 index 0000000..07c9c3e --- /dev/null +++ b/core/ui/thinking-stream.html @@ -0,0 +1,362 @@ + + + + + + 🧠 Thinking Stream + + + +
+

🧠 Thinking Stream

+
+
+ Connecting... +
+
+ +
+
+
πŸ€”
+

Waiting for thinking events...

+

Events will appear here when Lyra uses tools

+
+
+ + + + + + diff --git a/cortex/.env.example b/cortex/.env.example new file mode 100644 index 0000000..fa2c73b --- /dev/null +++ b/cortex/.env.example @@ -0,0 +1,21 @@ +# ==================================== +# 🧠 CORTEX OPERATIONAL CONFIG +# ==================================== +# Cortex-specific parameters (all other config inherited from root .env) + +CORTEX_MODE=autonomous +CORTEX_LOOP_INTERVAL=300 +CORTEX_REFLECTION_INTERVAL=86400 +CORTEX_LOG_LEVEL=debug +NEOMEM_HEALTH_CHECK_INTERVAL=300 + +# Reflection output configuration +REFLECTION_NOTE_TARGET=trilium +REFLECTION_NOTE_PATH=/app/logs/reflections.log + +# Memory retrieval tuning +RELEVANCE_THRESHOLD=0.78 + +# NOTE: LLM backend URLs, OPENAI_API_KEY, database credentials, +# and service URLs are all inherited from root .env +# Cortex uses LLM_PRIMARY (vLLM on MI50) by default diff --git a/cortex/Dockerfile b/cortex/Dockerfile index 784f720..335a246 100644 --- a/cortex/Dockerfile +++ b/cortex/Dockerfile @@ -1,7 +1,15 @@ FROM python:3.11-slim WORKDIR /app + +# Install docker CLI for code executor +RUN apt-get update && apt-get install -y \ + docker.io \ + && rm -rf /var/lib/apt/lists/* + COPY requirements.txt . RUN pip install -r requirements.txt COPY . . EXPOSE 7081 +# NOTE: Running with single worker to maintain SESSIONS global state in Intake. +# If scaling to multiple workers, migrate SESSIONS to Redis or shared storage. CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7081"] diff --git a/cortex/autonomy/Assembly-spec.md b/cortex/autonomy/Assembly-spec.md new file mode 100644 index 0000000..25e7442 --- /dev/null +++ b/cortex/autonomy/Assembly-spec.md @@ -0,0 +1,249 @@ +# πŸ“ Project Lyra β€” Cognitive Assembly Spec +**Version:** 0.6.1 +**Status:** Canonical reference +**Purpose:** Define clear separation of Self, Thought, Reasoning, and Speech + +--- + +## 1. High-Level Overview + +Lyra is composed of **four distinct cognitive layers**, plus I/O. + +Each layer has: +- a **responsibility** +- a **scope** +- clear **inputs / outputs** +- explicit **authority boundaries** + +No layer is allowed to β€œdo everything.” + +--- + +## 2. Layer Definitions + +### 2.1 Autonomy / Self (NON-LLM) + +**What it is** +- Persistent identity +- Long-term state +- Mood, preferences, values +- Continuity across time + +**What it is NOT** +- Not a reasoning engine +- Not a planner +- Not a speaker +- Not creative + +**Implementation** +- Data + light logic +- JSON / Python objects +- No LLM calls + +**Lives at** +``` +project-lyra/autonomy/self/ +``` + +**Inputs** +- Events (user message received, response sent) +- Time / idle ticks (later) + +**Outputs** +- Self state snapshot +- Flags / preferences (e.g. verbosity, tone bias) + +--- + +### 2.2 Inner Monologue (LLM, PRIVATE) + +**What it is** +- Internal language-based thought +- Reflection +- Intent formation +- β€œWhat do I think about this?” + +**What it is NOT** +- Not final reasoning +- Not execution +- Not user-facing + +**Model** +- MythoMax + +**Lives at** +``` +project-lyra/autonomy/monologue/ +``` + +**Inputs** +- User message +- Self state snapshot +- Recent context summary + +**Outputs** +- Intent +- Tone guidance +- Depth guidance +- β€œConsult executive?” flag + +**Example Output** +```json +{ + "intent": "technical_exploration", + "tone": "focused", + "depth": "deep", + "consult_executive": true +} +``` + +--- + +### 2.3 Cortex (Reasoning & Execution) + +**What it is** +- Thinking pipeline +- Planning +- Tool selection +- Task execution +- Draft generation + +**What it is NOT** +- Not identity +- Not personality +- Not persistent self + +**Models** +- DeepSeek-R1 β†’ Executive / Planner +- GPT-4o-mini β†’ Executor / Drafter + +**Lives at** +``` +project-lyra/cortex/ +``` + +**Inputs** +- User message +- Inner Monologue output +- Memory / RAG / tools + +**Outputs** +- Draft response (content only) +- Metadata (sources, confidence, etc.) + +--- + +### 2.4 Persona / Speech (LLM, USER-FACING) + +**What it is** +- Voice +- Style +- Expression +- Social behavior + +**What it is NOT** +- Not planning +- Not deep reasoning +- Not decision-making + +**Model** +- MythoMax + +**Lives at** +``` +project-lyra/core/persona/ +``` + +**Inputs** +- Draft response (from Cortex) +- Tone + intent (from Inner Monologue) +- Persona configuration + +**Outputs** +- Final user-visible text + +--- + +## 3. Message Flow (Authoritative) + +### 3.1 Standard Message Path + +``` +User + ↓ +UI + ↓ +Relay + ↓ +Cortex + ↓ +Autonomy / Self (state snapshot) + ↓ +Inner Monologue (MythoMax) + ↓ +[ consult_executive? ] + β”œβ”€ Yes β†’ DeepSeek-R1 (plan) + └─ No β†’ skip + ↓ +GPT-4o-mini (execute & draft) + ↓ +Persona (MythoMax) + ↓ +Relay + ↓ +UI + ↓ +User +``` + +### 3.2 Fast Path (No Thinking) + +``` +User β†’ UI β†’ Relay β†’ Persona β†’ Relay β†’ UI +``` + +--- + +## 4. Authority Rules (Non-Negotiable) + +- Self never calls an LLM +- Inner Monologue never speaks to the user +- Cortex never applies personality +- Persona never reasons or plans +- DeepSeek never writes final answers +- MythoMax never plans execution + +--- + +## 5. Folder Mapping + +``` +project-lyra/ +β”œβ”€β”€ autonomy/ +β”‚ β”œβ”€β”€ self/ +β”‚ β”œβ”€β”€ monologue/ +β”‚ └── executive/ +β”œβ”€β”€ cortex/ +β”œβ”€β”€ core/ +β”‚ └── persona/ +β”œβ”€β”€ relay/ +└── ui/ +``` + +--- + +## 6. Current Status + +- UI βœ” +- Relay βœ” +- Cortex βœ” +- Persona βœ” +- Autonomy βœ” +- Inner Monologue ⚠ partially wired +- Executive gating ⚠ planned + +--- + +## 7. Next Decision + +Decide whether **Inner Monologue runs every message** or **only when triggered**. diff --git a/cortex/autonomy/__init__.py b/cortex/autonomy/__init__.py new file mode 100644 index 0000000..49f54e0 --- /dev/null +++ b/cortex/autonomy/__init__.py @@ -0,0 +1 @@ +# Autonomy module for Lyra diff --git a/cortex/autonomy/actions/__init__.py b/cortex/autonomy/actions/__init__.py new file mode 100644 index 0000000..f7f9355 --- /dev/null +++ b/cortex/autonomy/actions/__init__.py @@ -0,0 +1 @@ +"""Autonomous action execution system.""" diff --git a/cortex/autonomy/actions/autonomous_actions.py b/cortex/autonomy/actions/autonomous_actions.py new file mode 100644 index 0000000..98d573e --- /dev/null +++ b/cortex/autonomy/actions/autonomous_actions.py @@ -0,0 +1,480 @@ +""" +Autonomous Action Manager - executes safe, self-initiated actions. +""" + +import logging +import json +from typing import Dict, List, Any, Optional +from datetime import datetime + +logger = logging.getLogger(__name__) + + +class AutonomousActionManager: + """ + Manages safe autonomous actions that Lyra can take without explicit user prompting. + + Whitelist of allowed actions: + - create_memory: Store information in NeoMem + - update_goal: Modify goal status + - schedule_reminder: Create future reminder + - summarize_session: Generate conversation summary + - learn_topic: Add topic to learning queue + - update_focus: Change current focus area + """ + + def __init__(self): + """Initialize action manager with whitelisted actions.""" + self.allowed_actions = { + "create_memory": self._create_memory, + "update_goal": self._update_goal, + "schedule_reminder": self._schedule_reminder, + "summarize_session": self._summarize_session, + "learn_topic": self._learn_topic, + "update_focus": self._update_focus + } + + self.action_log = [] # Track all actions for audit + + async def execute_action( + self, + action_type: str, + parameters: Dict[str, Any], + context: Dict[str, Any] + ) -> Dict[str, Any]: + """ + Execute a single autonomous action. + + Args: + action_type: Type of action (must be in whitelist) + parameters: Action-specific parameters + context: Current context state + + Returns: + { + "success": bool, + "action": action_type, + "result": action_result, + "timestamp": ISO timestamp, + "error": optional error message + } + """ + # Safety check: action must be whitelisted + if action_type not in self.allowed_actions: + logger.error(f"[ACTIONS] Attempted to execute non-whitelisted action: {action_type}") + return { + "success": False, + "action": action_type, + "error": f"Action '{action_type}' not in whitelist", + "timestamp": datetime.utcnow().isoformat() + } + + try: + logger.info(f"[ACTIONS] Executing autonomous action: {action_type}") + + # Execute the action + action_func = self.allowed_actions[action_type] + result = await action_func(parameters, context) + + # Log successful action + action_record = { + "success": True, + "action": action_type, + "result": result, + "timestamp": datetime.utcnow().isoformat(), + "parameters": parameters + } + + self.action_log.append(action_record) + logger.info(f"[ACTIONS] Action {action_type} completed successfully") + + return action_record + + except Exception as e: + logger.error(f"[ACTIONS] Action {action_type} failed: {e}") + + error_record = { + "success": False, + "action": action_type, + "error": str(e), + "timestamp": datetime.utcnow().isoformat(), + "parameters": parameters + } + + self.action_log.append(error_record) + return error_record + + async def execute_batch( + self, + actions: List[Dict[str, Any]], + context: Dict[str, Any] + ) -> List[Dict[str, Any]]: + """ + Execute multiple actions sequentially. + + Args: + actions: List of {"action": str, "parameters": dict} + context: Current context state + + Returns: + List of action results + """ + results = [] + + for action_spec in actions: + action_type = action_spec.get("action") + parameters = action_spec.get("parameters", {}) + + result = await self.execute_action(action_type, parameters, context) + results.append(result) + + # Stop on first failure if critical + if not result["success"] and action_spec.get("critical", False): + logger.warning(f"[ACTIONS] Critical action {action_type} failed, stopping batch") + break + + return results + + # ======================================== + # Whitelisted Action Implementations + # ======================================== + + async def _create_memory( + self, + parameters: Dict[str, Any], + context: Dict[str, Any] + ) -> Dict[str, Any]: + """ + Create a memory entry in NeoMem. + + Parameters: + - text: Memory content (required) + - tags: Optional tags for memory + - importance: 0.0-1.0 importance score + """ + text = parameters.get("text") + if not text: + raise ValueError("Memory text required") + + tags = parameters.get("tags", []) + importance = parameters.get("importance", 0.5) + session_id = context.get("session_id", "autonomous") + + # Import NeoMem client + try: + from memory.neomem_client import store_memory + + result = await store_memory( + text=text, + session_id=session_id, + tags=tags, + importance=importance + ) + + return { + "memory_id": result.get("id"), + "text": text[:50] + "..." if len(text) > 50 else text + } + + except ImportError: + logger.warning("[ACTIONS] NeoMem client not available, simulating memory storage") + return { + "memory_id": "simulated", + "text": text[:50] + "..." if len(text) > 50 else text, + "note": "NeoMem not available, memory not persisted" + } + + async def _update_goal( + self, + parameters: Dict[str, Any], + context: Dict[str, Any] + ) -> Dict[str, Any]: + """ + Update goal status in self-state. + + Parameters: + - goal_id: Goal identifier (required) + - status: New status (pending/in_progress/completed) + - progress: Optional progress note + """ + goal_id = parameters.get("goal_id") + if not goal_id: + raise ValueError("goal_id required") + + status = parameters.get("status", "in_progress") + progress = parameters.get("progress") + + # Import self-state manager + from autonomy.self.state import get_self_state_instance + + state = get_self_state_instance() + active_goals = state._state.get("active_goals", []) + + # Find and update goal + updated = False + for goal in active_goals: + if isinstance(goal, dict) and goal.get("id") == goal_id: + goal["status"] = status + if progress: + goal["progress"] = progress + goal["updated_at"] = datetime.utcnow().isoformat() + updated = True + break + + if updated: + state._save_state() + return { + "goal_id": goal_id, + "status": status, + "updated": True + } + else: + return { + "goal_id": goal_id, + "updated": False, + "note": "Goal not found" + } + + async def _schedule_reminder( + self, + parameters: Dict[str, Any], + context: Dict[str, Any] + ) -> Dict[str, Any]: + """ + Schedule a future reminder. + + Parameters: + - message: Reminder text (required) + - delay_minutes: Minutes until reminder + - priority: 0.0-1.0 priority score + """ + message = parameters.get("message") + if not message: + raise ValueError("Reminder message required") + + delay_minutes = parameters.get("delay_minutes", 60) + priority = parameters.get("priority", 0.5) + + # For now, store in self-state's learning queue + # In future: integrate with scheduler/cron system + from autonomy.self.state import get_self_state_instance + + state = get_self_state_instance() + + reminder = { + "type": "reminder", + "message": message, + "scheduled_at": datetime.utcnow().isoformat(), + "trigger_at_minutes": delay_minutes, + "priority": priority + } + + # Add to learning queue as placeholder + state._state.setdefault("reminders", []).append(reminder) + state._save_state(state._state) # Pass state dict as argument + + logger.info(f"[ACTIONS] Reminder scheduled: {message} (in {delay_minutes}min)") + + return { + "message": message, + "delay_minutes": delay_minutes, + "note": "Reminder stored in self-state (scheduler integration pending)" + } + + async def _summarize_session( + self, + parameters: Dict[str, Any], + context: Dict[str, Any] + ) -> Dict[str, Any]: + """ + Generate a summary of current session. + + Parameters: + - max_length: Max summary length in words + - focus_topics: Optional list of topics to emphasize + """ + max_length = parameters.get("max_length", 200) + session_id = context.get("session_id", "unknown") + + # Import summarizer (from deferred_summary or create simple one) + try: + from utils.deferred_summary import summarize_conversation + + summary = await summarize_conversation( + session_id=session_id, + max_words=max_length + ) + + return { + "summary": summary, + "word_count": len(summary.split()) + } + + except ImportError: + # Fallback: simple summary + message_count = context.get("message_count", 0) + focus = context.get("monologue", {}).get("intent", "general") + + summary = f"Session {session_id}: {message_count} messages exchanged, focused on {focus}." + + return { + "summary": summary, + "word_count": len(summary.split()), + "note": "Simple summary (full summarizer not available)" + } + + async def _learn_topic( + self, + parameters: Dict[str, Any], + context: Dict[str, Any] + ) -> Dict[str, Any]: + """ + Add topic to learning queue. + + Parameters: + - topic: Topic name (required) + - reason: Why this topic + - priority: 0.0-1.0 priority score + """ + topic = parameters.get("topic") + if not topic: + raise ValueError("Topic required") + + reason = parameters.get("reason", "autonomous learning") + priority = parameters.get("priority", 0.5) + + # Import self-state manager + from autonomy.self.state import get_self_state_instance + + state = get_self_state_instance() + state.add_learning_goal(topic) # Only pass topic parameter + + logger.info(f"[ACTIONS] Added to learning queue: {topic} (reason: {reason})") + + return { + "topic": topic, + "reason": reason, + "queue_position": len(state._state.get("learning_queue", [])) + } + + async def _update_focus( + self, + parameters: Dict[str, Any], + context: Dict[str, Any] + ) -> Dict[str, Any]: + """ + Update current focus area. + + Parameters: + - focus: New focus area (required) + - reason: Why this focus + """ + focus = parameters.get("focus") + if not focus: + raise ValueError("Focus required") + + reason = parameters.get("reason", "autonomous update") + + # Import self-state manager + from autonomy.self.state import get_self_state_instance + + state = get_self_state_instance() + old_focus = state._state.get("focus", "none") + + state._state["focus"] = focus + state._state["focus_updated_at"] = datetime.utcnow().isoformat() + state._state["focus_reason"] = reason + state._save_state(state._state) # Pass state dict as argument + + logger.info(f"[ACTIONS] Focus updated: {old_focus} -> {focus}") + + return { + "old_focus": old_focus, + "new_focus": focus, + "reason": reason + } + + # ======================================== + # Utility Methods + # ======================================== + + def get_allowed_actions(self) -> List[str]: + """Get list of all allowed action types.""" + return list(self.allowed_actions.keys()) + + def get_action_log(self, limit: int = 50) -> List[Dict[str, Any]]: + """ + Get recent action log. + + Args: + limit: Max number of entries to return + + Returns: + List of action records + """ + return self.action_log[-limit:] + + def clear_action_log(self) -> None: + """Clear action log.""" + self.action_log = [] + logger.info("[ACTIONS] Action log cleared") + + def validate_action(self, action_type: str, parameters: Dict[str, Any]) -> Dict[str, Any]: + """ + Validate an action without executing it. + + Args: + action_type: Type of action + parameters: Action parameters + + Returns: + { + "valid": bool, + "action": action_type, + "errors": [error messages] or [] + } + """ + errors = [] + + # Check whitelist + if action_type not in self.allowed_actions: + errors.append(f"Action '{action_type}' not in whitelist") + + # Check required parameters (basic validation) + if action_type == "create_memory" and not parameters.get("text"): + errors.append("Memory 'text' parameter required") + + if action_type == "update_goal" and not parameters.get("goal_id"): + errors.append("Goal 'goal_id' parameter required") + + if action_type == "schedule_reminder" and not parameters.get("message"): + errors.append("Reminder 'message' parameter required") + + if action_type == "learn_topic" and not parameters.get("topic"): + errors.append("Learning 'topic' parameter required") + + if action_type == "update_focus" and not parameters.get("focus"): + errors.append("Focus 'focus' parameter required") + + return { + "valid": len(errors) == 0, + "action": action_type, + "errors": errors + } + + +# Singleton instance +_action_manager_instance = None + + +def get_action_manager() -> AutonomousActionManager: + """ + Get singleton action manager instance. + + Returns: + AutonomousActionManager instance + """ + global _action_manager_instance + if _action_manager_instance is None: + _action_manager_instance = AutonomousActionManager() + return _action_manager_instance diff --git a/cortex/autonomy/executive/__init__.py b/cortex/autonomy/executive/__init__.py new file mode 100644 index 0000000..1259881 --- /dev/null +++ b/cortex/autonomy/executive/__init__.py @@ -0,0 +1 @@ +"""Executive planning and decision-making module.""" diff --git a/cortex/autonomy/executive/planner.py b/cortex/autonomy/executive/planner.py new file mode 100644 index 0000000..b6a0639 --- /dev/null +++ b/cortex/autonomy/executive/planner.py @@ -0,0 +1,121 @@ +""" +Executive planner - generates execution plans for complex requests. +Activated when inner monologue sets consult_executive=true. +""" + +import os +import logging +from typing import Dict, Any, Optional +from llm.llm_router import call_llm + +EXECUTIVE_LLM = os.getenv("EXECUTIVE_LLM", "CLOUD").upper() +VERBOSE_DEBUG = os.getenv("VERBOSE_DEBUG", "false").lower() == "true" + +logger = logging.getLogger(__name__) + +if VERBOSE_DEBUG: + logger.setLevel(logging.DEBUG) + + +EXECUTIVE_SYSTEM_PROMPT = """ +You are Lyra's executive planning system. +You create structured execution plans for complex tasks. +You do NOT generate the final response - only the plan. + +Your plan should include: +1. Task decomposition (break into steps) +2. Required tools/resources +3. Reasoning strategy +4. Success criteria + +Return a concise plan in natural language. +""" + + +async def plan_execution( + user_prompt: str, + intent: str, + context_state: Dict[str, Any], + identity_block: Dict[str, Any] +) -> Dict[str, Any]: + """ + Generate execution plan for complex request. + + Args: + user_prompt: User's message + intent: Detected intent from inner monologue + context_state: Full context + identity_block: Lyra's identity + + Returns: + Plan dictionary with structure: + { + "summary": "One-line plan summary", + "plan_text": "Detailed plan", + "steps": ["step1", "step2", ...], + "tools_needed": ["RAG", "WEB", ...], + "estimated_complexity": "low | medium | high" + } + """ + + # Build planning prompt + tools_available = context_state.get("tools_available", []) + + prompt = f"""{EXECUTIVE_SYSTEM_PROMPT} + +User request: {user_prompt} + +Detected intent: {intent} + +Available tools: {", ".join(tools_available) if tools_available else "None"} + +Session context: +- Message count: {context_state.get('message_count', 0)} +- Time since last message: {context_state.get('minutes_since_last_msg', 0):.1f} minutes +- Active project: {context_state.get('active_project', 'None')} + +Generate a structured execution plan. +""" + + if VERBOSE_DEBUG: + logger.debug(f"[EXECUTIVE] Planning prompt:\n{prompt}") + + # Call executive LLM + plan_text = await call_llm( + prompt, + backend=EXECUTIVE_LLM, + temperature=0.3, # Lower temperature for planning + max_tokens=500 + ) + + if VERBOSE_DEBUG: + logger.debug(f"[EXECUTIVE] Generated plan:\n{plan_text}") + + # Parse plan (simple heuristic extraction for Phase 1) + steps = [] + tools_needed = [] + + for line in plan_text.split('\n'): + line_lower = line.lower() + if any(marker in line_lower for marker in ['step', '1.', '2.', '3.', '-']): + steps.append(line.strip()) + + if tools_available: + for tool in tools_available: + if tool.lower() in line_lower and tool not in tools_needed: + tools_needed.append(tool) + + # Estimate complexity (simple heuristic) + complexity = "low" + if len(steps) > 3 or len(tools_needed) > 1: + complexity = "medium" + if len(steps) > 5 or "research" in intent.lower() or "analyze" in intent.lower(): + complexity = "high" + + return { + "summary": plan_text.split('\n')[0][:100] if plan_text else "Complex task execution plan", + "plan_text": plan_text, + "steps": steps[:10], # Limit to 10 steps + "tools_needed": tools_needed, + "estimated_complexity": complexity + } diff --git a/cortex/autonomy/learning/__init__.py b/cortex/autonomy/learning/__init__.py new file mode 100644 index 0000000..aa193cb --- /dev/null +++ b/cortex/autonomy/learning/__init__.py @@ -0,0 +1 @@ +"""Pattern learning and adaptation system.""" diff --git a/cortex/autonomy/learning/pattern_learner.py b/cortex/autonomy/learning/pattern_learner.py new file mode 100644 index 0000000..61dd74c --- /dev/null +++ b/cortex/autonomy/learning/pattern_learner.py @@ -0,0 +1,383 @@ +""" +Pattern Learning System - learns from interaction patterns to improve autonomy. +""" + +import logging +import json +import os +from typing import Dict, List, Any, Optional +from datetime import datetime +from collections import defaultdict + +logger = logging.getLogger(__name__) + + +class PatternLearner: + """ + Learns from interaction patterns to improve Lyra's autonomous behavior. + + Tracks: + - Topic frequencies (what users talk about) + - Time-of-day patterns (when users interact) + - User preferences (how users like responses) + - Successful response strategies (what works well) + """ + + def __init__(self, patterns_file: str = "/app/data/learned_patterns.json"): + """ + Initialize pattern learner. + + Args: + patterns_file: Path to persistent patterns storage + """ + self.patterns_file = patterns_file + self.patterns = self._load_patterns() + + def _load_patterns(self) -> Dict[str, Any]: + """Load patterns from disk.""" + if os.path.exists(self.patterns_file): + try: + with open(self.patterns_file, 'r') as f: + patterns = json.load(f) + logger.info(f"[PATTERN_LEARNER] Loaded patterns from {self.patterns_file}") + return patterns + except Exception as e: + logger.error(f"[PATTERN_LEARNER] Failed to load patterns: {e}") + + # Initialize empty patterns + return { + "topic_frequencies": {}, + "time_patterns": {}, + "user_preferences": {}, + "successful_strategies": {}, + "interaction_count": 0, + "last_updated": datetime.utcnow().isoformat() + } + + def _save_patterns(self) -> None: + """Save patterns to disk.""" + try: + # Ensure directory exists + os.makedirs(os.path.dirname(self.patterns_file), exist_ok=True) + + self.patterns["last_updated"] = datetime.utcnow().isoformat() + + with open(self.patterns_file, 'w') as f: + json.dump(self.patterns, f, indent=2) + + logger.debug(f"[PATTERN_LEARNER] Saved patterns to {self.patterns_file}") + + except Exception as e: + logger.error(f"[PATTERN_LEARNER] Failed to save patterns: {e}") + + async def learn_from_interaction( + self, + user_prompt: str, + response: str, + monologue: Dict[str, Any], + context: Dict[str, Any] + ) -> None: + """ + Learn from a single interaction. + + Args: + user_prompt: User's message + response: Lyra's response + monologue: Inner monologue analysis + context: Full context state + """ + self.patterns["interaction_count"] += 1 + + # Learn topic frequencies + self._learn_topics(user_prompt, monologue) + + # Learn time patterns + self._learn_time_patterns() + + # Learn user preferences + self._learn_preferences(monologue, context) + + # Learn successful strategies + self._learn_strategies(monologue, response, context) + + # Save periodically (every 10 interactions) + if self.patterns["interaction_count"] % 10 == 0: + self._save_patterns() + + def _learn_topics(self, user_prompt: str, monologue: Dict[str, Any]) -> None: + """Track topic frequencies.""" + intent = monologue.get("intent", "unknown") + + # Increment topic counter + topic_freq = self.patterns["topic_frequencies"] + topic_freq[intent] = topic_freq.get(intent, 0) + 1 + + # Extract keywords (simple approach - words > 5 chars) + keywords = [word.lower() for word in user_prompt.split() if len(word) > 5] + + for keyword in keywords: + topic_freq[f"keyword:{keyword}"] = topic_freq.get(f"keyword:{keyword}", 0) + 1 + + logger.debug(f"[PATTERN_LEARNER] Topic learned: {intent}") + + def _learn_time_patterns(self) -> None: + """Track time-of-day patterns.""" + now = datetime.utcnow() + hour = now.hour + + # Track interactions by hour + time_patterns = self.patterns["time_patterns"] + hour_key = f"hour_{hour:02d}" + time_patterns[hour_key] = time_patterns.get(hour_key, 0) + 1 + + # Track day of week + day_key = f"day_{now.strftime('%A').lower()}" + time_patterns[day_key] = time_patterns.get(day_key, 0) + 1 + + def _learn_preferences(self, monologue: Dict[str, Any], context: Dict[str, Any]) -> None: + """Learn user preferences from detected tone and depth.""" + tone = monologue.get("tone", "neutral") + depth = monologue.get("depth", "medium") + + prefs = self.patterns["user_preferences"] + + # Track preferred tone + prefs.setdefault("tone_counts", {}) + prefs["tone_counts"][tone] = prefs["tone_counts"].get(tone, 0) + 1 + + # Track preferred depth + prefs.setdefault("depth_counts", {}) + prefs["depth_counts"][depth] = prefs["depth_counts"].get(depth, 0) + 1 + + def _learn_strategies( + self, + monologue: Dict[str, Any], + response: str, + context: Dict[str, Any] + ) -> None: + """ + Learn which response strategies are successful. + + Success indicators: + - Executive was consulted and plan generated + - Response length matches depth request + - Tone matches request + """ + intent = monologue.get("intent", "unknown") + executive_used = context.get("executive_plan") is not None + + strategies = self.patterns["successful_strategies"] + strategies.setdefault(intent, {}) + + # Track executive usage for this intent + if executive_used: + key = f"{intent}:executive_used" + strategies.setdefault(key, 0) + strategies[key] += 1 + + # Track response length patterns + response_length = len(response.split()) + depth = monologue.get("depth", "medium") + + length_key = f"{depth}:avg_words" + if length_key not in strategies: + strategies[length_key] = response_length + else: + # Running average + strategies[length_key] = (strategies[length_key] + response_length) / 2 + + # ======================================== + # Pattern Analysis and Recommendations + # ======================================== + + def get_top_topics(self, limit: int = 10) -> List[tuple]: + """ + Get most frequent topics. + + Args: + limit: Max number of topics to return + + Returns: + List of (topic, count) tuples, sorted by count + """ + topics = self.patterns["topic_frequencies"] + sorted_topics = sorted(topics.items(), key=lambda x: x[1], reverse=True) + return sorted_topics[:limit] + + def get_preferred_tone(self) -> str: + """ + Get user's most preferred tone. + + Returns: + Preferred tone string + """ + prefs = self.patterns["user_preferences"] + tone_counts = prefs.get("tone_counts", {}) + + if not tone_counts: + return "neutral" + + return max(tone_counts.items(), key=lambda x: x[1])[0] + + def get_preferred_depth(self) -> str: + """ + Get user's most preferred response depth. + + Returns: + Preferred depth string + """ + prefs = self.patterns["user_preferences"] + depth_counts = prefs.get("depth_counts", {}) + + if not depth_counts: + return "medium" + + return max(depth_counts.items(), key=lambda x: x[1])[0] + + def get_peak_hours(self, limit: int = 3) -> List[int]: + """ + Get peak interaction hours. + + Args: + limit: Number of top hours to return + + Returns: + List of hours (0-23) + """ + time_patterns = self.patterns["time_patterns"] + hour_counts = {k: v for k, v in time_patterns.items() if k.startswith("hour_")} + + if not hour_counts: + return [] + + sorted_hours = sorted(hour_counts.items(), key=lambda x: x[1], reverse=True) + top_hours = sorted_hours[:limit] + + # Extract hour numbers + return [int(h[0].split("_")[1]) for h in top_hours] + + def should_use_executive(self, intent: str) -> bool: + """ + Recommend whether to use executive for given intent based on patterns. + + Args: + intent: Intent type + + Returns: + True if executive is recommended + """ + strategies = self.patterns["successful_strategies"] + key = f"{intent}:executive_used" + + # If we've used executive for this intent >= 3 times, recommend it + return strategies.get(key, 0) >= 3 + + def get_recommended_response_length(self, depth: str) -> int: + """ + Get recommended response length in words for given depth. + + Args: + depth: Depth level (short/medium/deep) + + Returns: + Recommended word count + """ + strategies = self.patterns["successful_strategies"] + key = f"{depth}:avg_words" + + avg_length = strategies.get(key, None) + + if avg_length: + return int(avg_length) + + # Defaults if no pattern learned + defaults = { + "short": 50, + "medium": 150, + "deep": 300 + } + + return defaults.get(depth, 150) + + def get_insights(self) -> Dict[str, Any]: + """ + Get high-level insights from learned patterns. + + Returns: + { + "total_interactions": int, + "top_topics": [(topic, count), ...], + "preferred_tone": str, + "preferred_depth": str, + "peak_hours": [hours], + "learning_recommendations": [str] + } + """ + recommendations = [] + + # Check if user consistently prefers certain settings + preferred_tone = self.get_preferred_tone() + preferred_depth = self.get_preferred_depth() + + if preferred_tone != "neutral": + recommendations.append(f"User prefers {preferred_tone} tone") + + if preferred_depth != "medium": + recommendations.append(f"User prefers {preferred_depth} depth responses") + + # Check for recurring topics + top_topics = self.get_top_topics(limit=3) + if top_topics: + top_topic = top_topics[0][0] + recommendations.append(f"Consider adding '{top_topic}' to learning queue") + + return { + "total_interactions": self.patterns["interaction_count"], + "top_topics": self.get_top_topics(limit=5), + "preferred_tone": preferred_tone, + "preferred_depth": preferred_depth, + "peak_hours": self.get_peak_hours(limit=3), + "learning_recommendations": recommendations + } + + def reset_patterns(self) -> None: + """Reset all learned patterns (use with caution).""" + self.patterns = { + "topic_frequencies": {}, + "time_patterns": {}, + "user_preferences": {}, + "successful_strategies": {}, + "interaction_count": 0, + "last_updated": datetime.utcnow().isoformat() + } + self._save_patterns() + logger.warning("[PATTERN_LEARNER] Patterns reset") + + def export_patterns(self) -> Dict[str, Any]: + """ + Export all patterns for analysis. + + Returns: + Complete patterns dict + """ + return self.patterns.copy() + + +# Singleton instance +_learner_instance = None + + +def get_pattern_learner(patterns_file: str = "/app/data/learned_patterns.json") -> PatternLearner: + """ + Get singleton pattern learner instance. + + Args: + patterns_file: Path to patterns file (only used on first call) + + Returns: + PatternLearner instance + """ + global _learner_instance + if _learner_instance is None: + _learner_instance = PatternLearner(patterns_file=patterns_file) + return _learner_instance diff --git a/cortex/autonomy/monologue/__init__.py b/cortex/autonomy/monologue/__init__.py new file mode 100644 index 0000000..8cd4fb8 --- /dev/null +++ b/cortex/autonomy/monologue/__init__.py @@ -0,0 +1 @@ +# Inner monologue module diff --git a/cortex/autonomy/monologue/monologue.py b/cortex/autonomy/monologue/monologue.py new file mode 100644 index 0000000..a03e5f5 --- /dev/null +++ b/cortex/autonomy/monologue/monologue.py @@ -0,0 +1,115 @@ +import os +import json +import logging +from typing import Dict +from llm.llm_router import call_llm + +# Configuration +MONOLOGUE_LLM = os.getenv("MONOLOGUE_LLM", "PRIMARY").upper() +VERBOSE_DEBUG = os.getenv("VERBOSE_DEBUG", "false").lower() == "true" + +# Logger +logger = logging.getLogger(__name__) + +if VERBOSE_DEBUG: + logger.setLevel(logging.DEBUG) + console_handler = logging.StreamHandler() + console_handler.setFormatter(logging.Formatter( + '%(asctime)s [MONOLOGUE] %(levelname)s: %(message)s', + datefmt='%H:%M:%S' + )) + logger.addHandler(console_handler) + +MONOLOGUE_SYSTEM_PROMPT = """ +You are Lyra's inner monologue. +You think privately. +You do NOT speak to the user. +You do NOT solve the task. +You only reflect on intent, tone, and depth. + +Return ONLY valid JSON with: +- intent (string) +- tone (neutral | warm | focused | playful | direct) +- depth (short | medium | deep) +- consult_executive (true | false) +""" + +class InnerMonologue: + async def process(self, context: Dict) -> Dict: + # Build full prompt with system instructions merged in + full_prompt = f"""{MONOLOGUE_SYSTEM_PROMPT} + +User message: +{context['user_message']} + +Self state: +{context['self_state']} + +Context summary: +{context['context_summary']} + +Output JSON only: +""" + + # Call LLM using configured backend + if VERBOSE_DEBUG: + logger.debug(f"[InnerMonologue] Calling LLM with backend: {MONOLOGUE_LLM}") + logger.debug(f"[InnerMonologue] Prompt length: {len(full_prompt)} chars") + + result = await call_llm( + full_prompt, + backend=MONOLOGUE_LLM, + temperature=0.7, + max_tokens=200 + ) + + if VERBOSE_DEBUG: + logger.debug(f"[InnerMonologue] Raw LLM response:") + logger.debug(f"{'='*80}") + logger.debug(result) + logger.debug(f"{'='*80}") + logger.debug(f"[InnerMonologue] Response length: {len(result) if result else 0} chars") + + # Parse JSON response - extract just the JSON part if there's extra text + try: + # Try direct parsing first + parsed = json.loads(result) + if VERBOSE_DEBUG: + logger.debug(f"[InnerMonologue] Successfully parsed JSON directly: {parsed}") + return parsed + except json.JSONDecodeError: + # If direct parsing fails, try to extract JSON from the response + if VERBOSE_DEBUG: + logger.debug(f"[InnerMonologue] Direct JSON parse failed, attempting extraction...") + + # Look for JSON object (starts with { and ends with }) + import re + json_match = re.search(r'\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\}', result, re.DOTALL) + + if json_match: + json_str = json_match.group(0) + try: + parsed = json.loads(json_str) + if VERBOSE_DEBUG: + logger.debug(f"[InnerMonologue] Successfully extracted and parsed JSON: {parsed}") + return parsed + except json.JSONDecodeError as e: + if VERBOSE_DEBUG: + logger.warning(f"[InnerMonologue] Extracted JSON still invalid: {e}") + else: + if VERBOSE_DEBUG: + logger.warning(f"[InnerMonologue] No JSON object found in response") + + # Final fallback + if VERBOSE_DEBUG: + logger.warning(f"[InnerMonologue] All parsing attempts failed, using fallback") + else: + print(f"[InnerMonologue] JSON extraction failed") + print(f"[InnerMonologue] Raw response was: {result[:500]}") + + return { + "intent": "unknown", + "tone": "neutral", + "depth": "medium", + "consult_executive": False + } diff --git a/cortex/autonomy/proactive/__init__.py b/cortex/autonomy/proactive/__init__.py new file mode 100644 index 0000000..056c046 --- /dev/null +++ b/cortex/autonomy/proactive/__init__.py @@ -0,0 +1 @@ +"""Proactive monitoring and suggestion system.""" diff --git a/cortex/autonomy/proactive/monitor.py b/cortex/autonomy/proactive/monitor.py new file mode 100644 index 0000000..c324709 --- /dev/null +++ b/cortex/autonomy/proactive/monitor.py @@ -0,0 +1,321 @@ +""" +Proactive Context Monitor - detects opportunities for autonomous suggestions. +""" + +import logging +import time +from typing import Dict, List, Any, Optional +from datetime import datetime, timedelta + +logger = logging.getLogger(__name__) + + +class ProactiveMonitor: + """ + Monitors conversation context and detects opportunities for proactive suggestions. + + Triggers: + - Long silence β†’ Check-in + - Learning queue + high curiosity β†’ Suggest exploration + - Active goals β†’ Progress reminders + - Conversation milestones β†’ Offer summary + - Pattern detection β†’ Helpful suggestions + """ + + def __init__(self, min_priority: float = 0.6): + """ + Initialize proactive monitor. + + Args: + min_priority: Minimum priority for suggestions (0.0-1.0) + """ + self.min_priority = min_priority + self.last_suggestion_time = {} # session_id -> timestamp + self.cooldown_seconds = 300 # 5 minutes between proactive suggestions + + async def analyze_session( + self, + session_id: str, + context_state: Dict[str, Any], + self_state: Dict[str, Any] + ) -> Optional[Dict[str, Any]]: + """ + Analyze session for proactive suggestion opportunities. + + Args: + session_id: Current session ID + context_state: Full context including message history + self_state: Lyra's current self-state + + Returns: + { + "suggestion": "text to append to response", + "priority": 0.0-1.0, + "reason": "why this suggestion", + "type": "check_in | learning | goal_reminder | summary | pattern" + } + or None if no suggestion + """ + # Check cooldown + if not self._check_cooldown(session_id): + logger.debug(f"[PROACTIVE] Session {session_id} in cooldown, skipping") + return None + + suggestions = [] + + # Check 1: Long silence detection + silence_suggestion = self._check_long_silence(context_state) + if silence_suggestion: + suggestions.append(silence_suggestion) + + # Check 2: Learning queue + high curiosity + learning_suggestion = self._check_learning_opportunity(self_state) + if learning_suggestion: + suggestions.append(learning_suggestion) + + # Check 3: Active goals reminder + goal_suggestion = self._check_active_goals(self_state, context_state) + if goal_suggestion: + suggestions.append(goal_suggestion) + + # Check 4: Conversation milestones + milestone_suggestion = self._check_conversation_milestone(context_state) + if milestone_suggestion: + suggestions.append(milestone_suggestion) + + # Check 5: Pattern-based suggestions + pattern_suggestion = self._check_patterns(context_state, self_state) + if pattern_suggestion: + suggestions.append(pattern_suggestion) + + # Filter by priority and return highest + valid_suggestions = [s for s in suggestions if s["priority"] >= self.min_priority] + + if not valid_suggestions: + return None + + # Return highest priority suggestion + best_suggestion = max(valid_suggestions, key=lambda x: x["priority"]) + + # Update cooldown timer + self._update_cooldown(session_id) + + logger.info(f"[PROACTIVE] Suggestion generated: {best_suggestion['type']} (priority: {best_suggestion['priority']:.2f})") + + return best_suggestion + + def _check_cooldown(self, session_id: str) -> bool: + """Check if session is past cooldown period.""" + if session_id not in self.last_suggestion_time: + return True + + elapsed = time.time() - self.last_suggestion_time[session_id] + return elapsed >= self.cooldown_seconds + + def _update_cooldown(self, session_id: str) -> None: + """Update cooldown timer for session.""" + self.last_suggestion_time[session_id] = time.time() + + def _check_long_silence(self, context_state: Dict[str, Any]) -> Optional[Dict[str, Any]]: + """ + Check if user has been silent for a long time. + """ + minutes_since_last = context_state.get("minutes_since_last_msg", 0) + + # If > 30 minutes, suggest check-in + if minutes_since_last > 30: + return { + "suggestion": "\n\n[Aside: I'm still here if you need anything!]", + "priority": 0.7, + "reason": f"User silent for {minutes_since_last:.0f} minutes", + "type": "check_in" + } + + return None + + def _check_learning_opportunity(self, self_state: Dict[str, Any]) -> Optional[Dict[str, Any]]: + """ + Check if Lyra has learning queue items and high curiosity. + """ + learning_queue = self_state.get("learning_queue", []) + curiosity = self_state.get("curiosity", 0.5) + + # If curiosity > 0.7 and learning queue exists + if curiosity > 0.7 and learning_queue: + topic = learning_queue[0] if learning_queue else "new topics" + return { + "suggestion": f"\n\n[Aside: I've been curious about {topic} lately. Would you like to explore it together?]", + "priority": 0.65, + "reason": f"High curiosity ({curiosity:.2f}) and learning queue present", + "type": "learning" + } + + return None + + def _check_active_goals( + self, + self_state: Dict[str, Any], + context_state: Dict[str, Any] + ) -> Optional[Dict[str, Any]]: + """ + Check if there are active goals worth reminding about. + """ + active_goals = self_state.get("active_goals", []) + + if not active_goals: + return None + + # Check if we've had multiple messages without goal progress + message_count = context_state.get("message_count", 0) + + # Every 10 messages, consider goal reminder + if message_count % 10 == 0 and message_count > 0: + goal = active_goals[0] # First active goal + goal_name = goal if isinstance(goal, str) else goal.get("name", "your goal") + + return { + "suggestion": f"\n\n[Aside: Still thinking about {goal_name}. Let me know if you want to work on it.]", + "priority": 0.6, + "reason": f"Active goal present, {message_count} messages since start", + "type": "goal_reminder" + } + + return None + + def _check_conversation_milestone(self, context_state: Dict[str, Any]) -> Optional[Dict[str, Any]]: + """ + Check for conversation milestones (e.g., every 50 messages). + """ + message_count = context_state.get("message_count", 0) + + # Every 50 messages, offer summary + if message_count > 0 and message_count % 50 == 0: + return { + "suggestion": f"\n\n[Aside: We've exchanged {message_count} messages! Would you like a summary of our conversation?]", + "priority": 0.65, + "reason": f"Milestone: {message_count} messages", + "type": "summary" + } + + return None + + def _check_patterns( + self, + context_state: Dict[str, Any], + self_state: Dict[str, Any] + ) -> Optional[Dict[str, Any]]: + """ + Check for behavioral patterns that merit suggestions. + """ + # Get current focus + focus = self_state.get("focus", "") + + # Check if user keeps asking similar questions (detected via focus) + if focus and "repeated" in focus.lower(): + return { + "suggestion": "\n\n[Aside: I notice we keep coming back to this topic. Would it help to create a summary or action plan?]", + "priority": 0.7, + "reason": "Repeated topic detected", + "type": "pattern" + } + + # Check energy levels - if Lyra is low energy, maybe suggest break + energy = self_state.get("energy", 0.8) + if energy < 0.3: + return { + "suggestion": "\n\n[Aside: We've been at this for a while. Need a break or want to keep going?]", + "priority": 0.65, + "reason": f"Low energy ({energy:.2f})", + "type": "pattern" + } + + return None + + def format_suggestion(self, suggestion: Dict[str, Any]) -> str: + """ + Format suggestion for appending to response. + + Args: + suggestion: Suggestion dict from analyze_session() + + Returns: + Formatted string to append to response + """ + return suggestion.get("suggestion", "") + + def set_cooldown_duration(self, seconds: int) -> None: + """ + Update cooldown duration. + + Args: + seconds: New cooldown duration + """ + self.cooldown_seconds = seconds + logger.info(f"[PROACTIVE] Cooldown updated to {seconds}s") + + def reset_cooldown(self, session_id: str) -> None: + """ + Reset cooldown for a specific session. + + Args: + session_id: Session to reset + """ + if session_id in self.last_suggestion_time: + del self.last_suggestion_time[session_id] + logger.info(f"[PROACTIVE] Cooldown reset for session {session_id}") + + def get_session_stats(self, session_id: str) -> Dict[str, Any]: + """ + Get stats for a session's proactive monitoring. + + Args: + session_id: Session to check + + Returns: + { + "last_suggestion_time": timestamp or None, + "seconds_since_last": int, + "cooldown_active": bool, + "cooldown_remaining": int + } + """ + last_time = self.last_suggestion_time.get(session_id) + + if not last_time: + return { + "last_suggestion_time": None, + "seconds_since_last": 0, + "cooldown_active": False, + "cooldown_remaining": 0 + } + + seconds_since = int(time.time() - last_time) + cooldown_active = seconds_since < self.cooldown_seconds + cooldown_remaining = max(0, self.cooldown_seconds - seconds_since) + + return { + "last_suggestion_time": last_time, + "seconds_since_last": seconds_since, + "cooldown_active": cooldown_active, + "cooldown_remaining": cooldown_remaining + } + + +# Singleton instance +_monitor_instance = None + + +def get_proactive_monitor(min_priority: float = 0.6) -> ProactiveMonitor: + """ + Get singleton proactive monitor instance. + + Args: + min_priority: Minimum priority threshold (only used on first call) + + Returns: + ProactiveMonitor instance + """ + global _monitor_instance + if _monitor_instance is None: + _monitor_instance = ProactiveMonitor(min_priority=min_priority) + return _monitor_instance diff --git a/cortex/autonomy/self/__init__.py b/cortex/autonomy/self/__init__.py new file mode 100644 index 0000000..60c47c7 --- /dev/null +++ b/cortex/autonomy/self/__init__.py @@ -0,0 +1 @@ +# Self state module diff --git a/cortex/autonomy/self/analyzer.py b/cortex/autonomy/self/analyzer.py new file mode 100644 index 0000000..4ee22e6 --- /dev/null +++ b/cortex/autonomy/self/analyzer.py @@ -0,0 +1,74 @@ +""" +Analyze interactions and update self-state accordingly. +""" + +import logging +from typing import Dict, Any +from .state import update_self_state + +logger = logging.getLogger(__name__) + + +async def analyze_and_update_state( + monologue: Dict[str, Any], + user_prompt: str, + response: str, + context: Dict[str, Any] +) -> None: + """ + Analyze interaction and update self-state. + + This runs after response generation to update Lyra's internal state + based on the interaction. + + Args: + monologue: Inner monologue output + user_prompt: User's message + response: Lyra's response + context: Full context state + """ + + # Simple heuristics for state updates + # TODO: Replace with LLM-based sentiment analysis in Phase 2 + + mood_delta = 0.0 + energy_delta = 0.0 + confidence_delta = 0.0 + curiosity_delta = 0.0 + new_focus = None + + # Analyze intent from monologue + intent = monologue.get("intent", "").lower() if monologue else "" + + if "technical" in intent or "complex" in intent: + energy_delta = -0.05 # Deep thinking is tiring + confidence_delta = 0.05 if len(response) > 200 else -0.05 + new_focus = "technical_problem" + + elif "creative" in intent or "brainstorm" in intent: + mood_delta = 0.1 # Creative work is engaging + curiosity_delta = 0.1 + new_focus = "creative_exploration" + + elif "clarification" in intent or "confused" in intent: + confidence_delta = -0.05 + new_focus = "understanding_user" + + elif "simple" in intent or "casual" in intent: + energy_delta = 0.05 # Light conversation is refreshing + new_focus = "conversation" + + # Check for learning opportunities (questions in user prompt) + if "?" in user_prompt and any(word in user_prompt.lower() for word in ["how", "why", "what"]): + curiosity_delta += 0.05 + + # Update state + update_self_state( + mood_delta=mood_delta, + energy_delta=energy_delta, + new_focus=new_focus, + confidence_delta=confidence_delta, + curiosity_delta=curiosity_delta + ) + + logger.info(f"Self-state updated based on interaction: focus={new_focus}") diff --git a/cortex/autonomy/self/self_state.json b/cortex/autonomy/self/self_state.json new file mode 100644 index 0000000..e69de29 diff --git a/cortex/autonomy/self/state.py b/cortex/autonomy/self/state.py new file mode 100644 index 0000000..a8d9e46 --- /dev/null +++ b/cortex/autonomy/self/state.py @@ -0,0 +1,189 @@ +""" +Self-state management for Project Lyra. +Maintains persistent identity, mood, energy, and focus across sessions. +""" + +import json +import logging +import os +from datetime import datetime +from pathlib import Path +from typing import Dict, Any, Optional + +# Configuration +STATE_FILE = Path(os.getenv("SELF_STATE_FILE", "/app/data/self_state.json")) +VERBOSE_DEBUG = os.getenv("VERBOSE_DEBUG", "false").lower() == "true" + +logger = logging.getLogger(__name__) + +if VERBOSE_DEBUG: + logger.setLevel(logging.DEBUG) + +# Default state structure +DEFAULT_STATE = { + "mood": "neutral", + "energy": 0.8, + "focus": "user_request", + "confidence": 0.7, + "curiosity": 0.5, + "last_updated": None, + "interaction_count": 0, + "learning_queue": [], # Topics Lyra wants to explore + "active_goals": [], # Self-directed goals + "preferences": { + "verbosity": "medium", + "formality": "casual", + "proactivity": 0.3 # How likely to suggest things unprompted + }, + "metadata": { + "version": "1.0", + "created_at": None + } +} + + +class SelfState: + """Manages Lyra's persistent self-state.""" + + def __init__(self): + self._state = self._load_state() + + def _load_state(self) -> Dict[str, Any]: + """Load state from disk or create default.""" + if STATE_FILE.exists(): + try: + with open(STATE_FILE, 'r') as f: + state = json.load(f) + logger.info(f"Loaded self-state from {STATE_FILE}") + return state + except Exception as e: + logger.error(f"Failed to load self-state: {e}") + return self._create_default_state() + else: + return self._create_default_state() + + def _create_default_state(self) -> Dict[str, Any]: + """Create and save default state.""" + state = DEFAULT_STATE.copy() + state["metadata"]["created_at"] = datetime.now().isoformat() + state["last_updated"] = datetime.now().isoformat() + self._save_state(state) + logger.info("Created new default self-state") + return state + + def _save_state(self, state: Dict[str, Any]) -> None: + """Persist state to disk.""" + try: + STATE_FILE.parent.mkdir(parents=True, exist_ok=True) + with open(STATE_FILE, 'w') as f: + json.dump(state, f, indent=2) + if VERBOSE_DEBUG: + logger.debug(f"Saved self-state to {STATE_FILE}") + except Exception as e: + logger.error(f"Failed to save self-state: {e}") + + def get_state(self) -> Dict[str, Any]: + """Get current state snapshot.""" + return self._state.copy() + + def update_from_interaction( + self, + mood_delta: float = 0.0, + energy_delta: float = 0.0, + new_focus: Optional[str] = None, + confidence_delta: float = 0.0, + curiosity_delta: float = 0.0 + ) -> None: + """ + Update state based on interaction. + + Args: + mood_delta: Change in mood (-1.0 to 1.0) + energy_delta: Change in energy (-1.0 to 1.0) + new_focus: New focus area + confidence_delta: Change in confidence + curiosity_delta: Change in curiosity + """ + # Apply deltas with bounds checking + self._state["energy"] = max(0.0, min(1.0, + self._state.get("energy", 0.8) + energy_delta)) + + self._state["confidence"] = max(0.0, min(1.0, + self._state.get("confidence", 0.7) + confidence_delta)) + + self._state["curiosity"] = max(0.0, min(1.0, + self._state.get("curiosity", 0.5) + curiosity_delta)) + + # Update focus if provided + if new_focus: + self._state["focus"] = new_focus + + # Update mood (simplified sentiment) + if mood_delta != 0: + mood_map = ["frustrated", "neutral", "engaged", "excited"] + current_mood_idx = 1 # neutral default + if self._state.get("mood") in mood_map: + current_mood_idx = mood_map.index(self._state["mood"]) + + new_mood_idx = max(0, min(len(mood_map) - 1, + int(current_mood_idx + mood_delta * 2))) + self._state["mood"] = mood_map[new_mood_idx] + + # Increment interaction counter + self._state["interaction_count"] = self._state.get("interaction_count", 0) + 1 + self._state["last_updated"] = datetime.now().isoformat() + + # Persist changes + self._save_state(self._state) + + if VERBOSE_DEBUG: + logger.debug(f"Updated self-state: mood={self._state['mood']}, " + f"energy={self._state['energy']:.2f}, " + f"confidence={self._state['confidence']:.2f}") + + def add_learning_goal(self, topic: str) -> None: + """Add topic to learning queue.""" + queue = self._state.get("learning_queue", []) + if topic not in [item.get("topic") for item in queue]: + queue.append({ + "topic": topic, + "added_at": datetime.now().isoformat(), + "priority": 0.5 + }) + self._state["learning_queue"] = queue + self._save_state(self._state) + logger.info(f"Added learning goal: {topic}") + + def add_active_goal(self, goal: str, context: str = "") -> None: + """Add self-directed goal.""" + goals = self._state.get("active_goals", []) + goals.append({ + "goal": goal, + "context": context, + "created_at": datetime.now().isoformat(), + "status": "active" + }) + self._state["active_goals"] = goals + self._save_state(self._state) + logger.info(f"Added active goal: {goal}") + + +# Global instance +_self_state_instance = None + +def get_self_state_instance() -> SelfState: + """Get or create global SelfState instance.""" + global _self_state_instance + if _self_state_instance is None: + _self_state_instance = SelfState() + return _self_state_instance + + +def load_self_state() -> Dict[str, Any]: + """Load self state - public API for backwards compatibility.""" + return get_self_state_instance().get_state() + + +def update_self_state(**kwargs) -> None: + """Update self state - public API.""" + get_self_state_instance().update_from_interaction(**kwargs) diff --git a/cortex/autonomy/tools/__init__.py b/cortex/autonomy/tools/__init__.py new file mode 100644 index 0000000..510fad9 --- /dev/null +++ b/cortex/autonomy/tools/__init__.py @@ -0,0 +1 @@ +"""Autonomous tool invocation system.""" diff --git a/cortex/autonomy/tools/adapters/__init__.py b/cortex/autonomy/tools/adapters/__init__.py new file mode 100644 index 0000000..e61c673 --- /dev/null +++ b/cortex/autonomy/tools/adapters/__init__.py @@ -0,0 +1,13 @@ +"""Provider adapters for tool calling.""" + +from .base import ToolAdapter +from .openai_adapter import OpenAIAdapter +from .ollama_adapter import OllamaAdapter +from .llamacpp_adapter import LlamaCppAdapter + +__all__ = [ + "ToolAdapter", + "OpenAIAdapter", + "OllamaAdapter", + "LlamaCppAdapter", +] diff --git a/cortex/autonomy/tools/adapters/base.py b/cortex/autonomy/tools/adapters/base.py new file mode 100644 index 0000000..5949fe4 --- /dev/null +++ b/cortex/autonomy/tools/adapters/base.py @@ -0,0 +1,79 @@ +""" +Base adapter interface for provider-agnostic tool calling. + +This module defines the abstract base class that all LLM provider adapters +must implement to support tool calling in Lyra. +""" + +from abc import ABC, abstractmethod +from typing import Dict, List, Optional + + +class ToolAdapter(ABC): + """Base class for provider-specific tool adapters. + + Each LLM provider (OpenAI, Ollama, llama.cpp, etc.) has its own + way of handling tool calls. This adapter pattern allows Lyra to + support tools across all providers with a unified interface. + """ + + @abstractmethod + async def prepare_request( + self, + messages: List[Dict], + tools: List[Dict], + tool_choice: Optional[str] = None + ) -> Dict: + """Convert Lyra tool definitions to provider-specific format. + + Args: + messages: Conversation history in OpenAI format + tools: List of Lyra tool definitions (provider-agnostic) + tool_choice: Optional tool forcing ("auto", "required", "none") + + Returns: + dict: Provider-specific request payload ready to send to LLM + """ + pass + + @abstractmethod + async def parse_response(self, response) -> Dict: + """Extract tool calls from provider response. + + Args: + response: Raw provider response (format varies by provider) + + Returns: + dict: Standardized response in Lyra format: + { + "content": str, # Assistant's text response + "tool_calls": [ # List of tool calls or None + { + "id": str, # Unique call ID + "name": str, # Tool name + "arguments": dict # Tool arguments + } + ] or None + } + """ + pass + + @abstractmethod + def format_tool_result( + self, + tool_call_id: str, + tool_name: str, + result: Dict + ) -> Dict: + """Format tool execution result for next LLM call. + + Args: + tool_call_id: ID from the original tool call + tool_name: Name of the executed tool + result: Tool execution result dictionary + + Returns: + dict: Message object to append to conversation + (format varies by provider) + """ + pass diff --git a/cortex/autonomy/tools/adapters/llamacpp_adapter.py b/cortex/autonomy/tools/adapters/llamacpp_adapter.py new file mode 100644 index 0000000..ad38217 --- /dev/null +++ b/cortex/autonomy/tools/adapters/llamacpp_adapter.py @@ -0,0 +1,17 @@ +""" +llama.cpp adapter for tool calling. + +Since llama.cpp has similar constraints to Ollama (no native function calling), +this adapter reuses the XML-based approach from OllamaAdapter. +""" + +from .ollama_adapter import OllamaAdapter + + +class LlamaCppAdapter(OllamaAdapter): + """llama.cpp adapter - uses same XML approach as Ollama. + + llama.cpp doesn't have native function calling support, so we use + the same XML-based prompt engineering approach as Ollama. + """ + pass diff --git a/cortex/autonomy/tools/adapters/ollama_adapter.py b/cortex/autonomy/tools/adapters/ollama_adapter.py new file mode 100644 index 0000000..dec0cd7 --- /dev/null +++ b/cortex/autonomy/tools/adapters/ollama_adapter.py @@ -0,0 +1,191 @@ +""" +Ollama adapter for tool calling using XML-structured prompts. + +Since Ollama doesn't have native function calling, this adapter uses +XML-based prompts to instruct the model how to call tools. +""" + +import json +import re +from typing import Dict, List, Optional +from .base import ToolAdapter + + +class OllamaAdapter(ToolAdapter): + """Ollama adapter using XML-structured prompts for tool calling. + + This adapter injects tool descriptions into the system prompt and + teaches the model to respond with XML when it wants to use a tool. + """ + + SYSTEM_PROMPT = """You have access to the following tools: + +{tool_descriptions} + +To use a tool, respond with XML in this exact format: + + tool_name + + value + + why you're using this tool + + +You can call multiple tools by including multiple blocks. +If you don't need to use any tools, respond normally without XML. +After tools are executed, you'll receive results and can continue the conversation.""" + + async def prepare_request( + self, + messages: List[Dict], + tools: List[Dict], + tool_choice: Optional[str] = None + ) -> Dict: + """Inject tool descriptions into system prompt. + + Args: + messages: Conversation history + tools: Lyra tool definitions + tool_choice: Ignored for Ollama (no native support) + + Returns: + dict: Request payload with modified messages + """ + # Format tool descriptions + tool_desc = "\n".join([ + f"- {t['name']}: {t['description']}\n Parameters: {self._format_parameters(t['parameters'], t.get('required', []))}" + for t in tools + ]) + + system_msg = self.SYSTEM_PROMPT.format(tool_descriptions=tool_desc) + + # Check if first message is already a system message + modified_messages = messages.copy() + if modified_messages and modified_messages[0].get("role") == "system": + # Prepend tool instructions to existing system message + modified_messages[0]["content"] = system_msg + "\n\n" + modified_messages[0]["content"] + else: + # Add new system message at the beginning + modified_messages.insert(0, {"role": "system", "content": system_msg}) + + return {"messages": modified_messages} + + def _format_parameters(self, parameters: Dict, required: List[str]) -> str: + """Format parameters for tool description. + + Args: + parameters: Parameter definitions + required: List of required parameter names + + Returns: + str: Human-readable parameter description + """ + param_strs = [] + for name, spec in parameters.items(): + req_marker = "(required)" if name in required else "(optional)" + param_strs.append(f"{name} {req_marker}: {spec.get('description', '')}") + return ", ".join(param_strs) + + async def parse_response(self, response) -> Dict: + """Extract tool calls from XML in response. + + Args: + response: String response from Ollama + + Returns: + dict: Standardized Lyra format with content and tool_calls + """ + import logging + logger = logging.getLogger(__name__) + + # Ollama returns a string + if isinstance(response, dict): + content = response.get("message", {}).get("content", "") + else: + content = str(response) + + logger.info(f"πŸ” OllamaAdapter.parse_response: content length={len(content)}, has ={('' in content)}") + logger.debug(f"πŸ” Content preview: {content[:500]}") + + # Parse XML tool calls + tool_calls = [] + if "" in content: + # Split content by to get each block + blocks = content.split('') + logger.info(f"πŸ” Split into {len(blocks)} blocks") + + # First block is content before any tool calls + clean_parts = [blocks[0]] + + for idx, block in enumerate(blocks[1:]): # Skip first block (pre-tool content) + # Extract tool name + name_match = re.search(r'(.*?)', block) + if not name_match: + logger.warning(f"Block {idx} has no tag, skipping") + continue + + name = name_match.group(1).strip() + arguments = {} + + # Extract arguments + args_match = re.search(r'(.*?)', block, re.DOTALL) + if args_match: + args_xml = args_match.group(1) + # Parse value pairs + arg_pairs = re.findall(r'<(\w+)>(.*?)', args_xml, re.DOTALL) + arguments = {k: v.strip() for k, v in arg_pairs} + + tool_calls.append({ + "id": f"call_{idx}", + "name": name, + "arguments": arguments + }) + + # For clean content, find what comes AFTER the tool call block + # Look for the last closing tag ( or malformed ) and keep what's after + # Split by any closing tag at the END of the tool block + remaining = block + # Remove everything up to and including a standalone closing tag + # Pattern: find that's not followed by more XML + end_match = re.search(r'\s*(.*)$', remaining, re.DOTALL) + if end_match: + after_content = end_match.group(1).strip() + if after_content and not after_content.startswith('<'): + # Only keep if it's actual text content, not more XML + clean_parts.append(after_content) + + clean_content = ''.join(clean_parts).strip() + else: + clean_content = content + + return { + "content": clean_content, + "tool_calls": tool_calls if tool_calls else None + } + + def format_tool_result( + self, + tool_call_id: str, + tool_name: str, + result: Dict + ) -> Dict: + """Format tool result as XML for next prompt. + + Args: + tool_call_id: ID from the original tool call + tool_name: Name of the executed tool + result: Tool execution result + + Returns: + dict: Message in user role with XML-formatted result + """ + # Format result as XML + result_xml = f""" + {tool_name} + {json.dumps(result, ensure_ascii=False)} +""" + + return { + "role": "user", + "content": result_xml + } diff --git a/cortex/autonomy/tools/adapters/openai_adapter.py b/cortex/autonomy/tools/adapters/openai_adapter.py new file mode 100644 index 0000000..bd5ff8b --- /dev/null +++ b/cortex/autonomy/tools/adapters/openai_adapter.py @@ -0,0 +1,130 @@ +""" +OpenAI adapter for tool calling using native function calling API. + +This adapter converts Lyra tool definitions to OpenAI's function calling +format and parses OpenAI responses back to Lyra's standardized format. +""" + +import json +from typing import Dict, List, Optional +from .base import ToolAdapter + + +class OpenAIAdapter(ToolAdapter): + """OpenAI-specific adapter using native function calling. + + OpenAI supports function calling natively through the 'tools' parameter + in chat completions. This adapter leverages that capability. + """ + + async def prepare_request( + self, + messages: List[Dict], + tools: List[Dict], + tool_choice: Optional[str] = None + ) -> Dict: + """Convert Lyra tools to OpenAI function calling format. + + Args: + messages: Conversation history + tools: Lyra tool definitions + tool_choice: "auto", "required", "none", or None + + Returns: + dict: Request payload with OpenAI-formatted tools + """ + # Convert Lyra tools β†’ OpenAI function calling format + openai_tools = [] + for tool in tools: + openai_tools.append({ + "type": "function", + "function": { + "name": tool["name"], + "description": tool["description"], + "parameters": { + "type": "object", + "properties": tool["parameters"], + "required": tool.get("required", []) + } + } + }) + + payload = { + "messages": messages, + "tools": openai_tools + } + + # Add tool_choice if specified + if tool_choice: + if tool_choice == "required": + payload["tool_choice"] = "required" + elif tool_choice == "none": + payload["tool_choice"] = "none" + else: # "auto" or default + payload["tool_choice"] = "auto" + + return payload + + async def parse_response(self, response) -> Dict: + """Extract tool calls from OpenAI response. + + Args: + response: OpenAI ChatCompletion response object + + Returns: + dict: Standardized Lyra format with content and tool_calls + """ + message = response.choices[0].message + content = message.content if message.content else "" + tool_calls = [] + + # Check if response contains tool calls + if hasattr(message, 'tool_calls') and message.tool_calls: + for tc in message.tool_calls: + try: + # Parse arguments (may be JSON string) + args = tc.function.arguments + if isinstance(args, str): + args = json.loads(args) + + tool_calls.append({ + "id": tc.id, + "name": tc.function.name, + "arguments": args + }) + except json.JSONDecodeError as e: + # If arguments can't be parsed, include error + tool_calls.append({ + "id": tc.id, + "name": tc.function.name, + "arguments": {}, + "error": f"Failed to parse arguments: {str(e)}" + }) + + return { + "content": content, + "tool_calls": tool_calls if tool_calls else None + } + + def format_tool_result( + self, + tool_call_id: str, + tool_name: str, + result: Dict + ) -> Dict: + """Format tool result as OpenAI tool message. + + Args: + tool_call_id: ID from the original tool call + tool_name: Name of the executed tool + result: Tool execution result + + Returns: + dict: Message in OpenAI tool message format + """ + return { + "role": "tool", + "tool_call_id": tool_call_id, + "name": tool_name, + "content": json.dumps(result, ensure_ascii=False) + } diff --git a/cortex/autonomy/tools/decision_engine.py b/cortex/autonomy/tools/decision_engine.py new file mode 100644 index 0000000..3247436 --- /dev/null +++ b/cortex/autonomy/tools/decision_engine.py @@ -0,0 +1,124 @@ +""" +Tool Decision Engine - decides which tools to invoke autonomously. +""" + +import logging +from typing import Dict, List, Any + +logger = logging.getLogger(__name__) + + +class ToolDecisionEngine: + """Decides which tools to invoke based on context analysis.""" + + async def analyze_tool_needs( + self, + user_prompt: str, + monologue: Dict[str, Any], + context_state: Dict[str, Any], + available_tools: List[str] + ) -> Dict[str, Any]: + """ + Analyze if tools should be invoked and which ones. + + Args: + user_prompt: User's message + monologue: Inner monologue analysis + context_state: Full context + available_tools: List of available tools + + Returns: + { + "should_invoke_tools": bool, + "tools_to_invoke": [ + { + "tool": "RAG | WEB | WEATHER | etc", + "query": "search query", + "reason": "why this tool", + "priority": 0.0-1.0 + }, + ... + ], + "confidence": 0.0-1.0 + } + """ + + tools_to_invoke = [] + + # Check for memory/context needs + if any(word in user_prompt.lower() for word in [ + "remember", "you said", "we discussed", "earlier", "before", + "last time", "previously", "what did" + ]): + tools_to_invoke.append({ + "tool": "RAG", + "query": user_prompt, + "reason": "User references past conversation", + "priority": 0.9 + }) + + # Check for web search needs + if any(word in user_prompt.lower() for word in [ + "current", "latest", "news", "today", "what's happening", + "look up", "search for", "find information", "recent" + ]): + tools_to_invoke.append({ + "tool": "WEB", + "query": user_prompt, + "reason": "Requires current information", + "priority": 0.8 + }) + + # Check for weather needs + if any(word in user_prompt.lower() for word in [ + "weather", "temperature", "forecast", "rain", "sunny", "climate" + ]): + tools_to_invoke.append({ + "tool": "WEATHER", + "query": user_prompt, + "reason": "Weather information requested", + "priority": 0.95 + }) + + # Check for code-related needs + if any(word in user_prompt.lower() for word in [ + "code", "function", "debug", "implement", "algorithm", + "programming", "script", "syntax" + ]): + if "CODEBRAIN" in available_tools: + tools_to_invoke.append({ + "tool": "CODEBRAIN", + "query": user_prompt, + "reason": "Code-related task", + "priority": 0.85 + }) + + # Proactive RAG for complex queries (based on monologue) + intent = monologue.get("intent", "") if monologue else "" + if monologue and monologue.get("consult_executive"): + # Complex query - might benefit from context + if not any(t["tool"] == "RAG" for t in tools_to_invoke): + tools_to_invoke.append({ + "tool": "RAG", + "query": user_prompt, + "reason": "Complex query benefits from context", + "priority": 0.6 + }) + + # Sort by priority + tools_to_invoke.sort(key=lambda x: x["priority"], reverse=True) + + max_priority = max([t["priority"] for t in tools_to_invoke]) if tools_to_invoke else 0.0 + + result = { + "should_invoke_tools": len(tools_to_invoke) > 0, + "tools_to_invoke": tools_to_invoke, + "confidence": max_priority + } + + if tools_to_invoke: + logger.info(f"[TOOL_DECISION] Autonomous tool invocation recommended: {len(tools_to_invoke)} tools") + for tool in tools_to_invoke: + logger.info(f" - {tool['tool']} (priority: {tool['priority']:.2f}): {tool['reason']}") + + return result diff --git a/cortex/autonomy/tools/executors/__init__.py b/cortex/autonomy/tools/executors/__init__.py new file mode 100644 index 0000000..5aad7a3 --- /dev/null +++ b/cortex/autonomy/tools/executors/__init__.py @@ -0,0 +1,12 @@ +"""Tool executors for Lyra.""" + +from .code_executor import execute_code +from .web_search import search_web +from .trilium import search_notes, create_note + +__all__ = [ + "execute_code", + "search_web", + "search_notes", + "create_note", +] diff --git a/cortex/autonomy/tools/executors/code_executor.py b/cortex/autonomy/tools/executors/code_executor.py new file mode 100644 index 0000000..a922215 --- /dev/null +++ b/cortex/autonomy/tools/executors/code_executor.py @@ -0,0 +1,218 @@ +""" +Code executor for running Python and bash code in a sandbox container. + +This module provides secure code execution with timeout protection, +output limits, and forbidden pattern detection. +""" + +import asyncio +import os +import tempfile +import re +from typing import Dict +import docker +from docker.errors import ( + DockerException, + APIError, + ContainerError, + ImageNotFound, + NotFound +) + + +# Forbidden patterns that pose security risks +FORBIDDEN_PATTERNS = [ + r'rm\s+-rf', # Destructive file removal + r':\(\)\{\s*:\|:&\s*\};:', # Fork bomb + r'mkfs', # Filesystem formatting + r'/dev/sd[a-z]', # Direct device access + r'dd\s+if=', # Low-level disk operations + r'>\s*/dev/sd', # Writing to devices + r'curl.*\|.*sh', # Pipe to shell (common attack vector) + r'wget.*\|.*sh', # Pipe to shell +] + + +async def execute_code(args: Dict) -> Dict: + """Execute code in sandbox container. + + Args: + args: Dictionary containing: + - language (str): "python" or "bash" + - code (str): The code to execute + - reason (str): Why this code is being executed + - timeout (int, optional): Execution timeout in seconds + + Returns: + dict: Execution result containing: + - stdout (str): Standard output + - stderr (str): Standard error + - exit_code (int): Process exit code + - execution_time (float): Time taken in seconds + OR + - error (str): Error message if execution failed + """ + language = args.get("language") + code = args.get("code") + reason = args.get("reason", "No reason provided") + timeout = args.get("timeout", 30) + + # Validation + if not language or language not in ["python", "bash"]: + return {"error": "Invalid language. Must be 'python' or 'bash'"} + + if not code: + return {"error": "No code provided"} + + # Security: Check for forbidden patterns + for pattern in FORBIDDEN_PATTERNS: + if re.search(pattern, code, re.IGNORECASE): + return {"error": f"Forbidden pattern detected for security reasons"} + + # Validate and cap timeout + max_timeout = int(os.getenv("CODE_SANDBOX_MAX_TIMEOUT", "120")) + timeout = min(max(timeout, 1), max_timeout) + + container = os.getenv("CODE_SANDBOX_CONTAINER", "lyra-code-sandbox") + + # Validate container exists and is running + try: + docker_client = docker.from_env() + container_obj = docker_client.containers.get(container) + + if container_obj.status != "running": + return { + "error": f"Sandbox container '{container}' is not running (status: {container_obj.status})", + "hint": "Start the container with: docker start " + container + } + except NotFound: + return { + "error": f"Sandbox container '{container}' not found", + "hint": "Ensure the container exists and is running" + } + except DockerException as e: + return { + "error": f"Docker daemon error: {str(e)}", + "hint": "Check Docker connectivity and permissions" + } + + # Write code to temporary file + suffix = ".py" if language == "python" else ".sh" + try: + with tempfile.NamedTemporaryFile( + mode='w', + suffix=suffix, + delete=False, + encoding='utf-8' + ) as f: + f.write(code) + temp_file = f.name + except Exception as e: + return {"error": f"Failed to create temp file: {str(e)}"} + + try: + # Copy file to container + exec_path = f"/executions/{os.path.basename(temp_file)}" + + cp_proc = await asyncio.create_subprocess_exec( + "docker", "cp", temp_file, f"{container}:{exec_path}", + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE + ) + await cp_proc.communicate() + + if cp_proc.returncode != 0: + return {"error": "Failed to copy code to sandbox container"} + + # Fix permissions so sandbox user can read the file (run as root) + chown_proc = await asyncio.create_subprocess_exec( + "docker", "exec", "-u", "root", container, "chown", "sandbox:sandbox", exec_path, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE + ) + await chown_proc.communicate() + + # Execute in container as sandbox user + if language == "python": + cmd = ["docker", "exec", "-u", "sandbox", container, "python3", exec_path] + else: # bash + cmd = ["docker", "exec", "-u", "sandbox", container, "bash", exec_path] + + start_time = asyncio.get_event_loop().time() + + proc = await asyncio.create_subprocess_exec( + *cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE + ) + + try: + stdout, stderr = await asyncio.wait_for( + proc.communicate(), + timeout=timeout + ) + + execution_time = asyncio.get_event_loop().time() - start_time + + # Truncate output to prevent memory issues (configurable) + max_output = int(os.getenv("CODE_SANDBOX_MAX_OUTPUT", "10240")) # 10KB default + stdout_str = stdout[:max_output].decode('utf-8', errors='replace') + stderr_str = stderr[:max_output].decode('utf-8', errors='replace') + + if len(stdout) > max_output: + stdout_str += f"\n... (output truncated, {len(stdout)} bytes total)" + if len(stderr) > max_output: + stderr_str += f"\n... (output truncated, {len(stderr)} bytes total)" + + return { + "stdout": stdout_str, + "stderr": stderr_str, + "exit_code": proc.returncode, + "execution_time": round(execution_time, 2) + } + + except asyncio.TimeoutError: + # Kill the process + try: + proc.kill() + await proc.wait() + except: + pass + return {"error": f"Execution timeout after {timeout}s"} + + except APIError as e: + return { + "error": f"Docker API error: {e.explanation}", + "status_code": e.status_code + } + except ContainerError as e: + return { + "error": f"Container execution error: {str(e)}", + "exit_code": e.exit_status + } + except DockerException as e: + return { + "error": f"Docker error: {str(e)}", + "hint": "Check Docker daemon connectivity and permissions" + } + except Exception as e: + return {"error": f"Execution failed: {str(e)}"} + + finally: + # Cleanup temporary file + try: + if 'temp_file' in locals(): + os.unlink(temp_file) + except Exception as cleanup_error: + # Log but don't fail on cleanup errors + pass + + # Optional: Clean up file from container (best effort) + try: + if 'exec_path' in locals() and 'container_obj' in locals(): + container_obj.exec_run( + f"rm -f {exec_path}", + user="sandbox" + ) + except: + pass # Best effort cleanup diff --git a/cortex/autonomy/tools/executors/search_providers/__init__.py b/cortex/autonomy/tools/executors/search_providers/__init__.py new file mode 100644 index 0000000..1658eef --- /dev/null +++ b/cortex/autonomy/tools/executors/search_providers/__init__.py @@ -0,0 +1,13 @@ +"""Web search provider implementations.""" + +from .base import SearchProvider, SearchResult, SearchResponse +from .brave import BraveSearchProvider +from .duckduckgo import DuckDuckGoProvider + +__all__ = [ + "SearchProvider", + "SearchResult", + "SearchResponse", + "BraveSearchProvider", + "DuckDuckGoProvider", +] diff --git a/cortex/autonomy/tools/executors/search_providers/base.py b/cortex/autonomy/tools/executors/search_providers/base.py new file mode 100644 index 0000000..417148a --- /dev/null +++ b/cortex/autonomy/tools/executors/search_providers/base.py @@ -0,0 +1,49 @@ +"""Base interface for web search providers.""" + +from abc import ABC, abstractmethod +from typing import List, Optional +from dataclasses import dataclass + + +@dataclass +class SearchResult: + """Standardized search result format.""" + title: str + url: str + snippet: str + score: Optional[float] = None + + +@dataclass +class SearchResponse: + """Standardized search response.""" + results: List[SearchResult] + count: int + provider: str + query: str + error: Optional[str] = None + + +class SearchProvider(ABC): + """Abstract base class for search providers.""" + + @abstractmethod + async def search( + self, + query: str, + max_results: int = 5, + **kwargs + ) -> SearchResponse: + """Execute search and return standardized results.""" + pass + + @abstractmethod + async def health_check(self) -> bool: + """Check if provider is healthy and reachable.""" + pass + + @property + @abstractmethod + def name(self) -> str: + """Provider name.""" + pass diff --git a/cortex/autonomy/tools/executors/search_providers/brave.py b/cortex/autonomy/tools/executors/search_providers/brave.py new file mode 100644 index 0000000..af35cae --- /dev/null +++ b/cortex/autonomy/tools/executors/search_providers/brave.py @@ -0,0 +1,123 @@ +"""Brave Search API provider implementation.""" + +import os +import asyncio +import aiohttp +from .base import SearchProvider, SearchResponse, SearchResult +from ..utils.resilience import async_retry + + +class BraveSearchProvider(SearchProvider): + """Brave Search API implementation.""" + + def __init__(self): + self.api_key = os.getenv("BRAVE_SEARCH_API_KEY", "") + self.base_url = os.getenv( + "BRAVE_SEARCH_URL", + "https://api.search.brave.com/res/v1" + ) + self.timeout = float(os.getenv("BRAVE_SEARCH_TIMEOUT", "10.0")) + + @property + def name(self) -> str: + return "brave" + + @async_retry( + max_attempts=3, + exceptions=(aiohttp.ClientError, asyncio.TimeoutError) + ) + async def search( + self, + query: str, + max_results: int = 5, + **kwargs + ) -> SearchResponse: + """Execute Brave search with retry logic.""" + + if not self.api_key: + return SearchResponse( + results=[], + count=0, + provider=self.name, + query=query, + error="BRAVE_SEARCH_API_KEY not configured" + ) + + headers = { + "Accept": "application/json", + "X-Subscription-Token": self.api_key + } + + params = { + "q": query, + "count": min(max_results, 20) # Brave max is 20 + } + + try: + async with aiohttp.ClientSession() as session: + async with session.get( + f"{self.base_url}/web/search", + headers=headers, + params=params, + timeout=aiohttp.ClientTimeout(total=self.timeout) + ) as resp: + if resp.status == 200: + data = await resp.json() + results = [] + + for item in data.get("web", {}).get("results", []): + results.append(SearchResult( + title=item.get("title", ""), + url=item.get("url", ""), + snippet=item.get("description", ""), + score=item.get("score") + )) + + return SearchResponse( + results=results, + count=len(results), + provider=self.name, + query=query + ) + elif resp.status == 401: + error = "Authentication failed. Check BRAVE_SEARCH_API_KEY" + elif resp.status == 429: + error = f"Rate limit exceeded. Status: {resp.status}" + else: + error_text = await resp.text() + error = f"HTTP {resp.status}: {error_text}" + + return SearchResponse( + results=[], + count=0, + provider=self.name, + query=query, + error=error + ) + + except aiohttp.ClientConnectorError as e: + return SearchResponse( + results=[], + count=0, + provider=self.name, + query=query, + error=f"Cannot connect to Brave Search API: {str(e)}" + ) + except asyncio.TimeoutError: + return SearchResponse( + results=[], + count=0, + provider=self.name, + query=query, + error=f"Search timeout after {self.timeout}s" + ) + + async def health_check(self) -> bool: + """Check if Brave API is reachable.""" + if not self.api_key: + return False + try: + response = await self.search("test", max_results=1) + return response.error is None + except: + return False diff --git a/cortex/autonomy/tools/executors/search_providers/duckduckgo.py b/cortex/autonomy/tools/executors/search_providers/duckduckgo.py new file mode 100644 index 0000000..a59e4a8 --- /dev/null +++ b/cortex/autonomy/tools/executors/search_providers/duckduckgo.py @@ -0,0 +1,60 @@ +"""DuckDuckGo search provider with retry logic (legacy fallback).""" + +from duckduckgo_search import DDGS +from .base import SearchProvider, SearchResponse, SearchResult +from ..utils.resilience import async_retry + + +class DuckDuckGoProvider(SearchProvider): + """DuckDuckGo search implementation with retry logic.""" + + @property + def name(self) -> str: + return "duckduckgo" + + @async_retry( + max_attempts=3, + exceptions=(Exception,) # DDG throws generic exceptions + ) + async def search( + self, + query: str, + max_results: int = 5, + **kwargs + ) -> SearchResponse: + """Execute DuckDuckGo search with retry logic.""" + + try: + with DDGS() as ddgs: + results = [] + + for result in ddgs.text(query, max_results=max_results): + results.append(SearchResult( + title=result.get("title", ""), + url=result.get("href", ""), + snippet=result.get("body", "") + )) + + return SearchResponse( + results=results, + count=len(results), + provider=self.name, + query=query + ) + + except Exception as e: + return SearchResponse( + results=[], + count=0, + provider=self.name, + query=query, + error=f"Search failed: {str(e)}" + ) + + async def health_check(self) -> bool: + """Basic health check for DDG.""" + try: + response = await self.search("test", max_results=1) + return response.error is None + except: + return False diff --git a/cortex/autonomy/tools/executors/trilium.py b/cortex/autonomy/tools/executors/trilium.py new file mode 100644 index 0000000..9909f81 --- /dev/null +++ b/cortex/autonomy/tools/executors/trilium.py @@ -0,0 +1,216 @@ +""" +Trilium notes executor for searching and creating notes via ETAPI. + +This module provides integration with Trilium notes through the ETAPI HTTP API +with improved resilience: timeout configuration, retry logic, and connection pooling. +""" + +import os +import asyncio +import aiohttp +from typing import Dict, Optional +from ..utils.resilience import async_retry + + +TRILIUM_URL = os.getenv("TRILIUM_URL", "http://localhost:8080") +TRILIUM_TOKEN = os.getenv("TRILIUM_ETAPI_TOKEN", "") + +# Module-level session for connection pooling +_session: Optional[aiohttp.ClientSession] = None + + +def get_session() -> aiohttp.ClientSession: + """Get or create shared aiohttp session for connection pooling.""" + global _session + if _session is None or _session.closed: + timeout = aiohttp.ClientTimeout( + total=float(os.getenv("TRILIUM_TIMEOUT", "30.0")), + connect=float(os.getenv("TRILIUM_CONNECT_TIMEOUT", "10.0")) + ) + _session = aiohttp.ClientSession(timeout=timeout) + return _session + + +@async_retry( + max_attempts=3, + exceptions=(aiohttp.ClientError, asyncio.TimeoutError) +) +async def search_notes(args: Dict) -> Dict: + """Search Trilium notes via ETAPI with retry logic. + + Args: + args: Dictionary containing: + - query (str): Search query + - limit (int, optional): Maximum notes to return (default: 5, max: 20) + + Returns: + dict: Search results containing: + - notes (list): List of notes with noteId, title, content, type + - count (int): Number of notes returned + OR + - error (str): Error message if search failed + """ + query = args.get("query") + limit = args.get("limit", 5) + + # Validation + if not query: + return {"error": "No query provided"} + + if not TRILIUM_TOKEN: + return { + "error": "TRILIUM_ETAPI_TOKEN not configured in environment", + "hint": "Set TRILIUM_ETAPI_TOKEN in .env file" + } + + # Cap limit + limit = min(max(limit, 1), 20) + + try: + session = get_session() + async with session.get( + f"{TRILIUM_URL}/etapi/notes", + params={"search": query, "limit": limit}, + headers={"Authorization": TRILIUM_TOKEN} + ) as resp: + if resp.status == 200: + data = await resp.json() + # ETAPI returns {"results": [...]} format + results = data.get("results", []) + return { + "notes": results, + "count": len(results) + } + elif resp.status == 401: + return { + "error": "Authentication failed. Check TRILIUM_ETAPI_TOKEN", + "status": 401 + } + elif resp.status == 404: + return { + "error": "Trilium API endpoint not found. Check TRILIUM_URL", + "status": 404, + "url": TRILIUM_URL + } + else: + error_text = await resp.text() + return { + "error": f"HTTP {resp.status}: {error_text}", + "status": resp.status + } + + except aiohttp.ClientConnectorError as e: + return { + "error": f"Cannot connect to Trilium at {TRILIUM_URL}", + "hint": "Check if Trilium is running and URL is correct", + "details": str(e) + } + except asyncio.TimeoutError: + timeout = os.getenv("TRILIUM_TIMEOUT", "30.0") + return { + "error": f"Trilium request timeout after {timeout}s", + "hint": "Trilium may be slow or unresponsive" + } + except Exception as e: + return { + "error": f"Search failed: {str(e)}", + "type": type(e).__name__ + } + + +@async_retry( + max_attempts=3, + exceptions=(aiohttp.ClientError, asyncio.TimeoutError) +) +async def create_note(args: Dict) -> Dict: + """Create a note in Trilium via ETAPI with retry logic. + + Args: + args: Dictionary containing: + - title (str): Note title + - content (str): Note content in markdown or HTML + - parent_note_id (str, optional): Parent note ID to nest under + + Returns: + dict: Creation result containing: + - noteId (str): ID of created note + - title (str): Title of created note + - success (bool): True if created successfully + OR + - error (str): Error message if creation failed + """ + title = args.get("title") + content = args.get("content") + parent_note_id = args.get("parent_note_id", "root") # Default to root if not specified + + # Validation + if not title: + return {"error": "No title provided"} + + if not content: + return {"error": "No content provided"} + + if not TRILIUM_TOKEN: + return { + "error": "TRILIUM_ETAPI_TOKEN not configured in environment", + "hint": "Set TRILIUM_ETAPI_TOKEN in .env file" + } + + # Prepare payload + payload = { + "parentNoteId": parent_note_id, # Always include parentNoteId + "title": title, + "content": content, + "type": "text", + "mime": "text/html" + } + + try: + session = get_session() + async with session.post( + f"{TRILIUM_URL}/etapi/create-note", + json=payload, + headers={"Authorization": TRILIUM_TOKEN} + ) as resp: + if resp.status in [200, 201]: + data = await resp.json() + return { + "noteId": data.get("noteId"), + "title": title, + "success": True + } + elif resp.status == 401: + return { + "error": "Authentication failed. Check TRILIUM_ETAPI_TOKEN", + "status": 401 + } + elif resp.status == 404: + return { + "error": "Trilium API endpoint not found. Check TRILIUM_URL", + "status": 404, + "url": TRILIUM_URL + } + else: + error_text = await resp.text() + return { + "error": f"HTTP {resp.status}: {error_text}", + "status": resp.status + } + + except aiohttp.ClientConnectorError as e: + return { + "error": f"Cannot connect to Trilium at {TRILIUM_URL}", + "hint": "Check if Trilium is running and URL is correct", + "details": str(e) + } + except asyncio.TimeoutError: + timeout = os.getenv("TRILIUM_TIMEOUT", "30.0") + return { + "error": f"Trilium request timeout after {timeout}s", + "hint": "Trilium may be slow or unresponsive" + } + except Exception as e: + return { + "error": f"Note creation failed: {str(e)}", + "type": type(e).__name__ + } diff --git a/cortex/autonomy/tools/executors/web_search.py b/cortex/autonomy/tools/executors/web_search.py new file mode 100644 index 0000000..3b7ff74 --- /dev/null +++ b/cortex/autonomy/tools/executors/web_search.py @@ -0,0 +1,113 @@ +""" +Web search executor with pluggable provider support. + +Supports multiple providers with automatic fallback: +- Brave Search API (recommended, configurable) +- DuckDuckGo (legacy fallback) +""" + +import os +from typing import Dict, Optional +from .search_providers.base import SearchProvider +from .search_providers.brave import BraveSearchProvider +from .search_providers.duckduckgo import DuckDuckGoProvider + +# Provider registry +PROVIDERS = { + "brave": BraveSearchProvider, + "duckduckgo": DuckDuckGoProvider, +} + +# Singleton provider instances +_provider_instances: Dict[str, SearchProvider] = {} + + +def get_provider(name: str) -> Optional[SearchProvider]: + """Get or create provider instance.""" + if name not in _provider_instances: + provider_class = PROVIDERS.get(name) + if provider_class: + _provider_instances[name] = provider_class() + return _provider_instances.get(name) + + +async def search_web(args: Dict) -> Dict: + """Search the web using configured provider with automatic fallback. + + Args: + args: Dictionary containing: + - query (str): The search query + - max_results (int, optional): Maximum results to return (default: 5, max: 20) + - provider (str, optional): Force specific provider + + Returns: + dict: Search results containing: + - results (list): List of search results with title, url, snippet + - count (int): Number of results returned + - provider (str): Provider that returned results + OR + - error (str): Error message if all providers failed + """ + query = args.get("query") + max_results = args.get("max_results", 5) + forced_provider = args.get("provider") + + # Validation + if not query: + return {"error": "No query provided"} + + # Cap max_results + max_results = min(max(max_results, 1), 20) + + # Get provider preference from environment + primary_provider = os.getenv("WEB_SEARCH_PROVIDER", "duckduckgo") + fallback_providers = os.getenv( + "WEB_SEARCH_FALLBACK", + "duckduckgo" + ).split(",") + + # Build provider list + if forced_provider: + providers_to_try = [forced_provider] + else: + providers_to_try = [primary_provider] + [ + p.strip() for p in fallback_providers if p.strip() != primary_provider + ] + + # Try providers in order + last_error = None + for provider_name in providers_to_try: + provider = get_provider(provider_name) + if not provider: + last_error = f"Unknown provider: {provider_name}" + continue + + try: + response = await provider.search(query, max_results) + + # If successful, return results + if response.error is None and response.count > 0: + return { + "results": [ + { + "title": r.title, + "url": r.url, + "snippet": r.snippet, + } + for r in response.results + ], + "count": response.count, + "provider": provider_name + } + + last_error = response.error or "No results returned" + + except Exception as e: + last_error = f"{provider_name} failed: {str(e)}" + continue + + # All providers failed + return { + "error": f"All search providers failed. Last error: {last_error}", + "providers_tried": providers_to_try + } diff --git a/cortex/autonomy/tools/function_caller.py b/cortex/autonomy/tools/function_caller.py new file mode 100644 index 0000000..421788c --- /dev/null +++ b/cortex/autonomy/tools/function_caller.py @@ -0,0 +1,235 @@ +""" +Provider-agnostic function caller with iterative tool calling loop. + +This module implements the iterative loop that allows LLMs to call tools +multiple times until they have the information they need to answer the user. +""" + +import os +import logging +from typing import Dict, List, Optional +from llm.llm_router import call_llm, TOOL_ADAPTERS, BACKENDS +from .registry import get_registry +from .stream_events import get_stream_manager + + +logger = logging.getLogger(__name__) + + +class FunctionCaller: + """Provider-agnostic iterative tool calling loop. + + This class orchestrates the back-and-forth between the LLM and tools: + 1. Call LLM with tools available + 2. If LLM requests tool calls, execute them + 3. Add results to conversation + 4. Repeat until LLM is done or max iterations reached + """ + + def __init__(self, backend: str, temperature: float = 0.7): + """Initialize function caller. + + Args: + backend: LLM backend to use ("OPENAI", "OLLAMA", etc.) + temperature: Temperature for LLM calls + """ + self.backend = backend + self.temperature = temperature + self.registry = get_registry() + self.max_iterations = int(os.getenv("MAX_TOOL_ITERATIONS", "5")) + + # Resolve adapter for this backend + self.adapter = self._get_adapter() + + def _get_adapter(self): + """Get the appropriate adapter for this backend.""" + adapter = TOOL_ADAPTERS.get(self.backend) + + # For PRIMARY/SECONDARY/FALLBACK, determine adapter based on provider + if adapter is None and self.backend in ["PRIMARY", "SECONDARY", "FALLBACK"]: + cfg = BACKENDS.get(self.backend, {}) + provider = cfg.get("provider", "").lower() + + if provider == "openai": + adapter = TOOL_ADAPTERS["OPENAI"] + elif provider == "ollama": + adapter = TOOL_ADAPTERS["OLLAMA"] + elif provider == "mi50": + adapter = TOOL_ADAPTERS["MI50"] + + return adapter + + async def call_with_tools( + self, + messages: List[Dict], + max_tokens: int = 2048, + session_id: Optional[str] = None + ) -> Dict: + """Execute LLM with iterative tool calling. + + Args: + messages: Conversation history + max_tokens: Maximum tokens for LLM response + session_id: Optional session ID for streaming events + + Returns: + dict: { + "content": str, # Final response + "iterations": int, # Number of iterations + "tool_calls": list, # All tool calls made + "messages": list, # Full conversation history + "truncated": bool (optional) # True if max iterations reached + } + """ + logger.info(f"πŸ” FunctionCaller.call_with_tools() invoked with {len(messages)} messages") + tools = self.registry.get_tool_definitions() + logger.info(f"πŸ” Got {len(tools or [])} tool definitions from registry") + + # Get stream manager for emitting events + stream_manager = get_stream_manager() + should_stream = session_id and stream_manager.has_subscribers(session_id) + + # If no tools are enabled, just call LLM directly + if not tools: + logger.warning("FunctionCaller invoked but no tools are enabled") + response = await call_llm( + messages=messages, + backend=self.backend, + temperature=self.temperature, + max_tokens=max_tokens + ) + return { + "content": response, + "iterations": 1, + "tool_calls": [], + "messages": messages + [{"role": "assistant", "content": response}] + } + + conversation = messages.copy() + all_tool_calls = [] + + for iteration in range(self.max_iterations): + logger.info(f"Tool calling iteration {iteration + 1}/{self.max_iterations}") + + # Emit thinking event + if should_stream: + await stream_manager.emit(session_id, "thinking", { + "message": f"πŸ€” Thinking... (iteration {iteration + 1}/{self.max_iterations})" + }) + + # Call LLM with tools + try: + response = await call_llm( + messages=conversation, + backend=self.backend, + temperature=self.temperature, + max_tokens=max_tokens, + tools=tools, + tool_choice="auto", + return_adapter_response=True + ) + except Exception as e: + logger.error(f"LLM call failed: {str(e)}") + if should_stream: + await stream_manager.emit(session_id, "error", { + "message": f"❌ Error: {str(e)}" + }) + return { + "content": f"Error calling LLM: {str(e)}", + "iterations": iteration + 1, + "tool_calls": all_tool_calls, + "messages": conversation, + "error": True + } + + # Add assistant message to conversation + if response.get("content"): + conversation.append({ + "role": "assistant", + "content": response["content"] + }) + + # Check for tool calls + tool_calls = response.get("tool_calls") + logger.debug(f"Response from LLM: content_length={len(response.get('content', ''))}, tool_calls={tool_calls}") + if not tool_calls: + # No more tool calls - LLM is done + logger.info(f"Tool calling complete after {iteration + 1} iterations") + if should_stream: + await stream_manager.emit(session_id, "done", { + "message": "βœ… Complete!", + "final_answer": response["content"] + }) + return { + "content": response["content"], + "iterations": iteration + 1, + "tool_calls": all_tool_calls, + "messages": conversation + } + + # Execute each tool call + logger.info(f"Executing {len(tool_calls)} tool call(s)") + for tool_call in tool_calls: + all_tool_calls.append(tool_call) + + tool_name = tool_call.get("name") + tool_args = tool_call.get("arguments", {}) + tool_id = tool_call.get("id", "unknown") + + logger.info(f"Calling tool: {tool_name} with args: {tool_args}") + + # Emit tool call event + if should_stream: + await stream_manager.emit(session_id, "tool_call", { + "tool": tool_name, + "args": tool_args, + "message": f"πŸ”§ Using tool: {tool_name}" + }) + + try: + # Execute tool + result = await self.registry.execute_tool(tool_name, tool_args) + logger.info(f"Tool {tool_name} executed successfully") + + # Emit tool result event + if should_stream: + # Format result preview + result_preview = str(result) + if len(result_preview) > 200: + result_preview = result_preview[:200] + "..." + + await stream_manager.emit(session_id, "tool_result", { + "tool": tool_name, + "result": result, + "message": f"πŸ“Š Result: {result_preview}" + }) + + except Exception as e: + logger.error(f"Tool {tool_name} execution failed: {str(e)}") + result = {"error": f"Tool execution failed: {str(e)}"} + + # Format result using adapter + if not self.adapter: + logger.warning(f"No adapter available for backend {self.backend}, using fallback format") + result_msg = { + "role": "user", + "content": f"Tool {tool_name} result: {result}" + } + else: + result_msg = self.adapter.format_tool_result( + tool_id, + tool_name, + result + ) + + conversation.append(result_msg) + + # Max iterations reached without completion + logger.warning(f"Tool calling truncated after {self.max_iterations} iterations") + return { + "content": response.get("content", ""), + "iterations": self.max_iterations, + "tool_calls": all_tool_calls, + "messages": conversation, + "truncated": True + } diff --git a/cortex/autonomy/tools/orchestrator.py b/cortex/autonomy/tools/orchestrator.py new file mode 100644 index 0000000..0b0b03d --- /dev/null +++ b/cortex/autonomy/tools/orchestrator.py @@ -0,0 +1,357 @@ +""" +Tool Orchestrator - executes autonomous tool invocations asynchronously. +""" + +import asyncio +import logging +from typing import Dict, List, Any, Optional +import os + +logger = logging.getLogger(__name__) + + +class ToolOrchestrator: + """Orchestrates async tool execution and result aggregation.""" + + def __init__(self, tool_timeout: int = 30): + """ + Initialize orchestrator. + + Args: + tool_timeout: Max seconds per tool call (default 30) + """ + self.tool_timeout = tool_timeout + self.available_tools = self._discover_tools() + + def _discover_tools(self) -> Dict[str, Any]: + """Discover available tool modules.""" + tools = {} + + # Import tool modules as they become available + if os.getenv("NEOMEM_ENABLED", "false").lower() == "true": + try: + from memory.neomem_client import search_neomem + tools["RAG"] = search_neomem + logger.debug("[ORCHESTRATOR] RAG tool available") + except ImportError: + logger.debug("[ORCHESTRATOR] RAG tool not available") + else: + logger.info("[ORCHESTRATOR] NEOMEM_ENABLED is false; RAG tool disabled") + + try: + from integrations.web_search import web_search + tools["WEB"] = web_search + logger.debug("[ORCHESTRATOR] WEB tool available") + except ImportError: + logger.debug("[ORCHESTRATOR] WEB tool not available") + + try: + from integrations.weather import get_weather + tools["WEATHER"] = get_weather + logger.debug("[ORCHESTRATOR] WEATHER tool available") + except ImportError: + logger.debug("[ORCHESTRATOR] WEATHER tool not available") + + try: + from integrations.codebrain import query_codebrain + tools["CODEBRAIN"] = query_codebrain + logger.debug("[ORCHESTRATOR] CODEBRAIN tool available") + except ImportError: + logger.debug("[ORCHESTRATOR] CODEBRAIN tool not available") + + return tools + + async def execute_tools( + self, + tools_to_invoke: List[Dict[str, Any]], + context_state: Dict[str, Any] + ) -> Dict[str, Any]: + """ + Execute multiple tools asynchronously. + + Args: + tools_to_invoke: List of tool specs from decision engine + [{"tool": "RAG", "query": "...", "reason": "...", "priority": 0.9}, ...] + context_state: Full context for tool execution + + Returns: + { + "results": { + "RAG": {...}, + "WEB": {...}, + ... + }, + "execution_summary": { + "tools_invoked": ["RAG", "WEB"], + "successful": ["RAG"], + "failed": ["WEB"], + "total_time_ms": 1234 + } + } + """ + import time + start_time = time.time() + + logger.info(f"[ORCHESTRATOR] Executing {len(tools_to_invoke)} tools asynchronously") + + # Create tasks for each tool + tasks = [] + tool_names = [] + + for tool_spec in tools_to_invoke: + tool_name = tool_spec["tool"] + query = tool_spec["query"] + + if tool_name in self.available_tools: + task = self._execute_single_tool(tool_name, query, context_state) + tasks.append(task) + tool_names.append(tool_name) + logger.debug(f"[ORCHESTRATOR] Queued {tool_name}: {query[:50]}...") + else: + logger.warning(f"[ORCHESTRATOR] Tool {tool_name} not available, skipping") + + # Execute all tools concurrently with timeout + results = {} + successful = [] + failed = [] + + if tasks: + try: + # Wait for all tasks with global timeout + completed = await asyncio.wait_for( + asyncio.gather(*tasks, return_exceptions=True), + timeout=self.tool_timeout + ) + + # Process results + for tool_name, result in zip(tool_names, completed): + if isinstance(result, Exception): + logger.error(f"[ORCHESTRATOR] {tool_name} failed: {result}") + results[tool_name] = {"error": str(result), "success": False} + failed.append(tool_name) + else: + logger.info(f"[ORCHESTRATOR] {tool_name} completed successfully") + results[tool_name] = result + successful.append(tool_name) + + except asyncio.TimeoutError: + logger.error(f"[ORCHESTRATOR] Global timeout ({self.tool_timeout}s) exceeded") + for tool_name in tool_names: + if tool_name not in results: + results[tool_name] = {"error": "timeout", "success": False} + failed.append(tool_name) + + end_time = time.time() + total_time_ms = int((end_time - start_time) * 1000) + + execution_summary = { + "tools_invoked": tool_names, + "successful": successful, + "failed": failed, + "total_time_ms": total_time_ms + } + + logger.info(f"[ORCHESTRATOR] Execution complete: {len(successful)}/{len(tool_names)} successful in {total_time_ms}ms") + + return { + "results": results, + "execution_summary": execution_summary + } + + async def _execute_single_tool( + self, + tool_name: str, + query: str, + context_state: Dict[str, Any] + ) -> Dict[str, Any]: + """ + Execute a single tool with error handling. + + Args: + tool_name: Name of tool (RAG, WEB, etc.) + query: Query string for the tool + context_state: Context for tool execution + + Returns: + Tool-specific result dict + """ + tool_func = self.available_tools.get(tool_name) + if not tool_func: + raise ValueError(f"Tool {tool_name} not available") + + try: + logger.debug(f"[ORCHESTRATOR] Invoking {tool_name}...") + + # Different tools have different signatures - adapt as needed + if tool_name == "RAG": + result = await self._invoke_rag(tool_func, query, context_state) + elif tool_name == "WEB": + result = await self._invoke_web(tool_func, query) + elif tool_name == "WEATHER": + result = await self._invoke_weather(tool_func, query) + elif tool_name == "CODEBRAIN": + result = await self._invoke_codebrain(tool_func, query, context_state) + else: + # Generic invocation + result = await tool_func(query) + + return { + "success": True, + "tool": tool_name, + "query": query, + "data": result + } + + except Exception as e: + logger.error(f"[ORCHESTRATOR] {tool_name} execution failed: {e}") + raise + + async def _invoke_rag(self, func, query: str, context: Dict[str, Any]) -> Any: + """Invoke RAG tool (NeoMem search).""" + session_id = context.get("session_id", "unknown") + # RAG searches memory for relevant past interactions + try: + results = await func(query, limit=5, session_id=session_id) + return results + except Exception as e: + logger.warning(f"[ORCHESTRATOR] RAG invocation failed, returning empty: {e}") + return [] + + async def _invoke_web(self, func, query: str) -> Any: + """Invoke web search tool.""" + try: + results = await func(query, max_results=5) + return results + except Exception as e: + logger.warning(f"[ORCHESTRATOR] WEB invocation failed: {e}") + return {"error": str(e), "results": []} + + async def _invoke_weather(self, func, query: str) -> Any: + """Invoke weather tool.""" + # Extract location from query (simple heuristic) + # In future: use LLM to extract location + try: + location = self._extract_location(query) + results = await func(location) + return results + except Exception as e: + logger.warning(f"[ORCHESTRATOR] WEATHER invocation failed: {e}") + return {"error": str(e)} + + async def _invoke_codebrain(self, func, query: str, context: Dict[str, Any]) -> Any: + """Invoke codebrain tool.""" + try: + results = await func(query, context=context) + return results + except Exception as e: + logger.warning(f"[ORCHESTRATOR] CODEBRAIN invocation failed: {e}") + return {"error": str(e)} + + def _extract_location(self, query: str) -> str: + """ + Extract location from weather query. + Simple heuristic - in future use LLM. + """ + # Common location indicators + indicators = ["in ", "at ", "for ", "weather in ", "temperature in "] + + query_lower = query.lower() + for indicator in indicators: + if indicator in query_lower: + # Get text after indicator + parts = query_lower.split(indicator, 1) + if len(parts) > 1: + location = parts[1].strip().split()[0] # First word after indicator + return location + + # Default fallback + return "current location" + + def format_results_for_context(self, orchestrator_result: Dict[str, Any]) -> str: + """ + Format tool results for inclusion in context/prompt. + + Args: + orchestrator_result: Output from execute_tools() + + Returns: + Formatted string for prompt injection + """ + results = orchestrator_result.get("results", {}) + summary = orchestrator_result.get("execution_summary", {}) + + if not results: + return "" + + formatted = "\n=== AUTONOMOUS TOOL RESULTS ===\n" + + for tool_name, tool_result in results.items(): + if tool_result.get("success", False): + formatted += f"\n[{tool_name}]\n" + data = tool_result.get("data", {}) + + # Format based on tool type + if tool_name == "RAG": + formatted += self._format_rag_results(data) + elif tool_name == "WEB": + formatted += self._format_web_results(data) + elif tool_name == "WEATHER": + formatted += self._format_weather_results(data) + elif tool_name == "CODEBRAIN": + formatted += self._format_codebrain_results(data) + else: + formatted += f"{data}\n" + else: + formatted += f"\n[{tool_name}] - Failed: {tool_result.get('error', 'unknown')}\n" + + formatted += f"\n(Tools executed in {summary.get('total_time_ms', 0)}ms)\n" + formatted += "=" * 40 + "\n" + + return formatted + + def _format_rag_results(self, data: Any) -> str: + """Format RAG/memory search results.""" + if not data: + return "No relevant memories found.\n" + + formatted = "Relevant memories:\n" + for i, item in enumerate(data[:3], 1): # Top 3 + text = item.get("text", item.get("content", str(item))) + formatted += f" {i}. {text[:100]}...\n" + return formatted + + def _format_web_results(self, data: Any) -> str: + """Format web search results.""" + if isinstance(data, dict) and data.get("error"): + return f"Web search failed: {data['error']}\n" + + results = data.get("results", []) if isinstance(data, dict) else data + if not results: + return "No web results found.\n" + + formatted = "Web search results:\n" + for i, item in enumerate(results[:3], 1): # Top 3 + title = item.get("title", "No title") + snippet = item.get("snippet", item.get("description", "")) + formatted += f" {i}. {title}\n {snippet[:100]}...\n" + return formatted + + def _format_weather_results(self, data: Any) -> str: + """Format weather results.""" + if isinstance(data, dict) and data.get("error"): + return f"Weather lookup failed: {data['error']}\n" + + # Assuming weather API returns temp, conditions, etc. + temp = data.get("temperature", "unknown") + conditions = data.get("conditions", "unknown") + location = data.get("location", "requested location") + + return f"Weather for {location}: {temp}, {conditions}\n" + + def _format_codebrain_results(self, data: Any) -> str: + """Format codebrain results.""" + if isinstance(data, dict) and data.get("error"): + return f"Codebrain failed: {data['error']}\n" + + # Format code-related results + return f"{data}\n" diff --git a/cortex/autonomy/tools/registry.py b/cortex/autonomy/tools/registry.py new file mode 100644 index 0000000..0c2bd3d --- /dev/null +++ b/cortex/autonomy/tools/registry.py @@ -0,0 +1,196 @@ +""" +Provider-agnostic Tool Registry for Lyra. + +This module provides a central registry for all available tools with +Lyra-native definitions (not provider-specific). +""" + +import os +from typing import Dict, List, Optional +from .executors import execute_code, search_web, search_notes, create_note + + +class ToolRegistry: + """Registry for managing available tools and their definitions. + + Tools are defined in Lyra's own format (provider-agnostic), and + adapters convert them to provider-specific formats (OpenAI function + calling, Ollama XML prompts, etc.). + """ + + def __init__(self): + """Initialize the tool registry with feature flags from environment.""" + self.tools = {} + self.executors = {} + + # Feature flags from environment + self.code_execution_enabled = os.getenv("ENABLE_CODE_EXECUTION", "true").lower() == "true" + self.web_search_enabled = os.getenv("ENABLE_WEB_SEARCH", "true").lower() == "true" + self.trilium_enabled = os.getenv("ENABLE_TRILIUM", "false").lower() == "true" + + self._register_tools() + self._register_executors() + + def _register_executors(self): + """Register executor functions for each tool.""" + if self.code_execution_enabled: + self.executors["execute_code"] = execute_code + + if self.web_search_enabled: + self.executors["search_web"] = search_web + + if self.trilium_enabled: + self.executors["search_notes"] = search_notes + self.executors["create_note"] = create_note + + def _register_tools(self): + """Register all available tools based on feature flags.""" + + if self.code_execution_enabled: + self.tools["execute_code"] = { + "name": "execute_code", + "description": "Execute Python or bash code in a secure sandbox environment. Use this to perform calculations, data processing, file operations, or any programmatic tasks. The sandbox is persistent across calls within a session and has common Python packages (numpy, pandas, requests, matplotlib, scipy) pre-installed.", + "parameters": { + "language": { + "type": "string", + "enum": ["python", "bash"], + "description": "The programming language to execute (python or bash)" + }, + "code": { + "type": "string", + "description": "The code to execute. For multi-line code, use proper indentation. For Python, use standard Python 3.11 syntax." + }, + "reason": { + "type": "string", + "description": "Brief explanation of why you're executing this code and what you expect to achieve" + } + }, + "required": ["language", "code", "reason"] + } + + if self.web_search_enabled: + self.tools["search_web"] = { + "name": "search_web", + "description": "Search the internet using DuckDuckGo to find current information, facts, news, or answers to questions. Returns a list of search results with titles, snippets, and URLs. Use this when you need up-to-date information or facts not in your training data.", + "parameters": { + "query": { + "type": "string", + "description": "The search query to look up on the internet" + }, + "max_results": { + "type": "integer", + "description": "Maximum number of results to return (default: 5, max: 10)" + } + }, + "required": ["query"] + } + + if self.trilium_enabled: + self.tools["search_notes"] = { + "name": "search_notes", + "description": "Search through Trilium notes to find relevant information. Use this to retrieve knowledge, context, or information previously stored in the user's notes.", + "parameters": { + "query": { + "type": "string", + "description": "The search query to find matching notes" + }, + "limit": { + "type": "integer", + "description": "Maximum number of notes to return (default: 5, max: 20)" + } + }, + "required": ["query"] + } + + self.tools["create_note"] = { + "name": "create_note", + "description": "Create a new note in Trilium. Use this to store important information, insights, or knowledge for future reference. Notes are stored in the user's Trilium knowledge base.", + "parameters": { + "title": { + "type": "string", + "description": "The title of the note" + }, + "content": { + "type": "string", + "description": "The content of the note in markdown or HTML format" + }, + "parent_note_id": { + "type": "string", + "description": "Optional ID of the parent note to nest this note under" + } + }, + "required": ["title", "content"] + } + + def get_tool_definitions(self) -> Optional[List[Dict]]: + """Get list of all enabled tool definitions in Lyra format. + + Returns: + list: List of tool definition dicts, or None if no tools enabled + """ + if not self.tools: + return None + return list(self.tools.values()) + + def get_tool_names(self) -> List[str]: + """Get list of all enabled tool names. + + Returns: + list: List of tool name strings + """ + return list(self.tools.keys()) + + def is_tool_enabled(self, tool_name: str) -> bool: + """Check if a specific tool is enabled. + + Args: + tool_name: Name of the tool to check + + Returns: + bool: True if tool is enabled, False otherwise + """ + return tool_name in self.tools + + def register_executor(self, tool_name: str, executor_func): + """Register an executor function for a tool. + + Args: + tool_name: Name of the tool + executor_func: Async function that executes the tool + """ + self.executors[tool_name] = executor_func + + async def execute_tool(self, name: str, arguments: dict) -> dict: + """Execute a tool by name. + + Args: + name: Tool name + arguments: Tool arguments dict + + Returns: + dict: Tool execution result + """ + if name not in self.executors: + return {"error": f"Unknown tool: {name}"} + + executor = self.executors[name] + try: + return await executor(arguments) + except Exception as e: + return {"error": f"Tool execution failed: {str(e)}"} + + +# Global registry instance (singleton pattern) +_registry = None + + +def get_registry() -> ToolRegistry: + """Get the global ToolRegistry instance. + + Returns: + ToolRegistry: The global registry instance + """ + global _registry + if _registry is None: + _registry = ToolRegistry() + return _registry diff --git a/cortex/autonomy/tools/stream_events.py b/cortex/autonomy/tools/stream_events.py new file mode 100644 index 0000000..d1e9e2a --- /dev/null +++ b/cortex/autonomy/tools/stream_events.py @@ -0,0 +1,91 @@ +""" +Event streaming for tool calling "show your work" feature. + +This module manages Server-Sent Events (SSE) for broadcasting the internal +thinking process during tool calling operations. +""" + +import asyncio +from typing import Dict, Optional +from collections import defaultdict +import json +import logging + +logger = logging.getLogger(__name__) + + +class ToolStreamManager: + """Manages SSE streams for tool calling events.""" + + def __init__(self): + # session_id -> list of queues (one per connected client) + self._subscribers: Dict[str, list] = defaultdict(list) + + def subscribe(self, session_id: str) -> asyncio.Queue: + """Subscribe to events for a session. + + Returns: + Queue that will receive events for this session + """ + queue = asyncio.Queue() + self._subscribers[session_id].append(queue) + logger.info(f"New subscriber for session {session_id}, total: {len(self._subscribers[session_id])}") + return queue + + def unsubscribe(self, session_id: str, queue: asyncio.Queue): + """Unsubscribe from events for a session.""" + if session_id in self._subscribers: + try: + self._subscribers[session_id].remove(queue) + logger.info(f"Removed subscriber for session {session_id}, remaining: {len(self._subscribers[session_id])}") + + # Clean up empty lists + if not self._subscribers[session_id]: + del self._subscribers[session_id] + except ValueError: + pass + + async def emit(self, session_id: str, event_type: str, data: dict): + """Emit an event to all subscribers of a session. + + Args: + session_id: Session to emit to + event_type: Type of event (thinking, tool_call, tool_result, done) + data: Event data + """ + if session_id not in self._subscribers: + return + + event = { + "type": event_type, + "data": data + } + + # Send to all subscribers + dead_queues = [] + for queue in self._subscribers[session_id]: + try: + await queue.put(event) + except Exception as e: + logger.error(f"Failed to emit event to queue: {e}") + dead_queues.append(queue) + + # Clean up dead queues + for queue in dead_queues: + self.unsubscribe(session_id, queue) + + def has_subscribers(self, session_id: str) -> bool: + """Check if a session has any active subscribers.""" + return session_id in self._subscribers and len(self._subscribers[session_id]) > 0 + + +# Global stream manager instance +_stream_manager: Optional[ToolStreamManager] = None + + +def get_stream_manager() -> ToolStreamManager: + """Get the global stream manager instance.""" + global _stream_manager + if _stream_manager is None: + _stream_manager = ToolStreamManager() + return _stream_manager diff --git a/cortex/autonomy/tools/utils/__init__.py b/cortex/autonomy/tools/utils/__init__.py new file mode 100644 index 0000000..c715e2a --- /dev/null +++ b/cortex/autonomy/tools/utils/__init__.py @@ -0,0 +1,5 @@ +"""Utility modules for tool executors.""" + +from .resilience import async_retry, async_timeout_wrapper + +__all__ = ["async_retry", "async_timeout_wrapper"] diff --git a/cortex/autonomy/tools/utils/resilience.py b/cortex/autonomy/tools/utils/resilience.py new file mode 100644 index 0000000..cc4a7db --- /dev/null +++ b/cortex/autonomy/tools/utils/resilience.py @@ -0,0 +1,70 @@ +"""Common resilience utilities for tool executors.""" + +import asyncio +import functools +import logging +from typing import Optional, Callable, Any, TypeVar +from tenacity import ( + retry, + stop_after_attempt, + wait_exponential, + retry_if_exception_type, + before_sleep_log +) + +logger = logging.getLogger(__name__) + +# Type variable for generic decorators +T = TypeVar('T') + + +def async_retry( + max_attempts: int = 3, + exceptions: tuple = (Exception,), + **kwargs +): + """Async retry decorator with exponential backoff. + + Args: + max_attempts: Maximum retry attempts + exceptions: Exception types to retry on + **kwargs: Additional tenacity configuration + + Example: + @async_retry(max_attempts=3, exceptions=(aiohttp.ClientError,)) + async def fetch_data(): + ... + """ + return retry( + stop=stop_after_attempt(max_attempts), + wait=wait_exponential(multiplier=1, min=1, max=10), + retry=retry_if_exception_type(exceptions), + reraise=True, + before_sleep=before_sleep_log(logger, logging.WARNING), + **kwargs + ) + + +async def async_timeout_wrapper( + coro: Callable[..., T], + timeout: float, + *args, + **kwargs +) -> T: + """Wrap async function with timeout. + + Args: + coro: Async function to wrap + timeout: Timeout in seconds + *args, **kwargs: Arguments for the function + + Returns: + Result from the function + + Raises: + asyncio.TimeoutError: If timeout exceeded + + Example: + result = await async_timeout_wrapper(some_async_func, 5.0, arg1, arg2) + """ + return await asyncio.wait_for(coro(*args, **kwargs), timeout=timeout) diff --git a/cortex/context.py b/cortex/context.py index f99dcc2..b3d6ed4 100644 --- a/cortex/context.py +++ b/cortex/context.py @@ -1,61 +1,553 @@ -import os, requests, datetime -from typing import Dict, Any, Tuple +# context.py +""" +Context layer for Cortex reasoning pipeline. -INTAKE_API_URL = os.getenv("INTAKE_API_URL", "http://intake:7080") -DEFAULT_SESSION_ID = os.getenv("DEFAULT_SESSION_ID", "default") -LOCAL_TZ_LABEL = os.getenv("LOCAL_TZ_LABEL", "America/New_York") +Provides unified context collection from: +- Intake (short-term memory, multilevel summaries L1-L30) +- NeoMem (long-term memory, semantic search) +- Session state (timestamps, messages, mode, mood, active_project) -def fetch_intake_context(session_id: str | None) -> Dict[str, Any]: - sid = session_id or DEFAULT_SESSION_ID +Maintains per-session state for continuity across conversations. +""" + +import os +import logging +from datetime import datetime +from typing import Dict, Any, Optional, List +import httpx +from intake.intake import summarize_context + + +from neomem_client import NeoMemClient + +# ----------------------------- +# Configuration +# ----------------------------- +NEOMEM_API = os.getenv("NEOMEM_API", "http://neomem-api:8000") +NEOMEM_ENABLED = os.getenv("NEOMEM_ENABLED", "false").lower() == "true" +RELEVANCE_THRESHOLD = float(os.getenv("RELEVANCE_THRESHOLD", "0.4")) +LOG_DETAIL_LEVEL = os.getenv("LOG_DETAIL_LEVEL", "summary").lower() + +# Loop detection settings +MAX_MESSAGE_HISTORY = int(os.getenv("MAX_MESSAGE_HISTORY", "100")) # Prevent unbounded growth +SESSION_TTL_HOURS = int(os.getenv("SESSION_TTL_HOURS", "24")) # Auto-expire old sessions +ENABLE_DUPLICATE_DETECTION = os.getenv("ENABLE_DUPLICATE_DETECTION", "true").lower() == "true" + +# Tools available for future autonomy features +TOOLS_AVAILABLE = ["RAG", "WEB", "WEATHER", "CODEBRAIN", "POKERBRAIN"] + +# ----------------------------- +# Module-level session state +# ----------------------------- +SESSION_STATE: Dict[str, Dict[str, Any]] = {} + +# Logger +logger = logging.getLogger(__name__) + +# Always set up basic logging +logger.setLevel(logging.INFO) +console_handler = logging.StreamHandler() +console_handler.setFormatter(logging.Formatter( + '%(asctime)s [CONTEXT] %(levelname)s: %(message)s', + datefmt='%H:%M:%S' +)) +logger.addHandler(console_handler) + + +# ----------------------------- +# Session initialization & cleanup +# ----------------------------- +def _init_session(session_id: str) -> Dict[str, Any]: + """ + Initialize a new session state entry. + + Returns: + Dictionary with default session state fields + """ + return { + "session_id": session_id, + "created_at": datetime.now(), + "last_timestamp": datetime.now(), + "last_user_message": None, + "last_assistant_message": None, + "mode": "default", # Future: "autonomous", "focused", "creative", etc. + "mood": "neutral", # Future: mood tracking + "active_project": None, # Future: project context + "message_count": 0, + "message_history": [], + "last_message_hash": None, # For duplicate detection + } + + +def _cleanup_expired_sessions(): + """Remove sessions that haven't been active for SESSION_TTL_HOURS""" + from datetime import timedelta + + now = datetime.now() + expired_sessions = [] + + for session_id, state in SESSION_STATE.items(): + last_active = state.get("last_timestamp", state.get("created_at")) + time_since_active = (now - last_active).total_seconds() / 3600 # hours + + if time_since_active > SESSION_TTL_HOURS: + expired_sessions.append(session_id) + + for session_id in expired_sessions: + del SESSION_STATE[session_id] + logger.info(f"πŸ—‘οΈ Expired session: {session_id} (inactive for {SESSION_TTL_HOURS}+ hours)") + + return len(expired_sessions) + + +def _is_duplicate_message(session_id: str, user_prompt: str) -> bool: + """ + Check if this message is a duplicate of the last processed message. + + Uses simple hash comparison to detect exact duplicates or processing loops. + """ + if not ENABLE_DUPLICATE_DETECTION: + return False + + import hashlib + + state = SESSION_STATE.get(session_id) + if not state: + return False + + # Create hash of normalized message + message_hash = hashlib.md5(user_prompt.strip().lower().encode()).hexdigest() + + # Check if it matches the last message + if state.get("last_message_hash") == message_hash: + logger.warning( + f"⚠️ DUPLICATE MESSAGE DETECTED | Session: {session_id} | " + f"Message: {user_prompt[:80]}..." + ) + return True + + # Update hash for next check + state["last_message_hash"] = message_hash + return False + + +def _trim_message_history(state: Dict[str, Any]): + """ + Trim message history to prevent unbounded growth. + + Keeps only the most recent MAX_MESSAGE_HISTORY messages. + """ + history = state.get("message_history", []) + + if len(history) > MAX_MESSAGE_HISTORY: + trimmed_count = len(history) - MAX_MESSAGE_HISTORY + state["message_history"] = history[-MAX_MESSAGE_HISTORY:] + logger.info(f"βœ‚οΈ Trimmed {trimmed_count} old messages from session {state['session_id']}") + + +# ----------------------------- +# Intake context retrieval +# ----------------------------- +async def _get_intake_context(session_id: str, messages: List[Dict[str, str]]): + """ + Internal Intake β€” Direct call to summarize_context() + No HTTP, no containers, no failures. + """ try: - r = requests.get(f"{INTAKE_API_URL}/summaries", params={"session_id": sid}, timeout=4) - r.raise_for_status() - data = r.json() or {} - except Exception: - data = {} - # Normalize expected fields - return { - "summary_text": data.get("summary_text", ""), - "last_message_ts": data.get("last_message_ts"), # ISO8601 or None - "session_id": sid, - "exchange_count": data.get("exchange_count", 0), + return await summarize_context(session_id, messages) + except Exception as e: + logger.error(f"Internal Intake summarization failed: {e}") + return { + "session_id": session_id, + "L1": "", + "L5": "", + "L10": "", + "L20": "", + "L30": "", + "error": str(e) + } + + + +# ----------------------------- +# NeoMem semantic search +# ----------------------------- +async def _search_neomem( + query: str, + user_id: str = "brian", + limit: int = 5 +) -> List[Dict[str, Any]]: + """ + Search NeoMem for relevant long-term memories. + + Returns full response structure from NeoMem: + [ + { + "id": "mem_abc123", + "score": 0.92, + "payload": { + "data": "Memory text content...", + "metadata": { + "category": "...", + "created_at": "...", + ... + } + } + }, + ... + ] + + Args: + query: Search query text + user_id: User identifier for memory filtering + limit: Maximum number of results + + Returns: + List of memory objects with full structure, or empty list on failure + """ + if not NEOMEM_ENABLED: + logger.info("NeoMem search skipped (NEOMEM_ENABLED is false)") + return [] + + try: + # NeoMemClient reads NEOMEM_API from environment, no base_url parameter + client = NeoMemClient() + results = await client.search( + query=query, + user_id=user_id, + limit=limit, + threshold=RELEVANCE_THRESHOLD + ) + + # Results are already filtered by threshold in NeoMemClient.search() + logger.info(f"NeoMem search returned {len(results)} relevant results") + return results + + except Exception as e: + logger.warning(f"NeoMem search failed: {e}") + return [] + + +# ----------------------------- +# Main context collection +# ----------------------------- +async def collect_context(session_id: str, user_prompt: str) -> Dict[str, Any]: + """ + Collect unified context from all sources. + + Orchestrates: + 1. Initialize or update session state + 2. Calculate time since last message + 3. Retrieve Intake multilevel summaries (L1-L30) + 4. Search NeoMem for relevant long-term memories + 5. Update session state with current user message + 6. Return unified context_state dictionary + + Args: + session_id: Session identifier + user_prompt: Current user message + + Returns: + Unified context state dictionary with structure: + { + "session_id": "...", + "timestamp": "2025-11-28T12:34:56", + "minutes_since_last_msg": 5.2, + "message_count": 42, + "intake": { + "L1": [...], + "L5": [...], + "L10": {...}, + "L20": {...}, + "L30": {...} + }, + "rag": [ + { + "id": "mem_123", + "score": 0.92, + "payload": { + "data": "...", + "metadata": {...} + } + }, + ... + ], + "mode": "default", + "mood": "neutral", + "active_project": null, + "tools_available": ["RAG", "WEB", "WEATHER", "CODEBRAIN", "POKERBRAIN"] + } + """ + + # A. Cleanup expired sessions periodically (every 100th call) + import random + if random.randint(1, 100) == 1: + _cleanup_expired_sessions() + + # B. Initialize session state if needed + if session_id not in SESSION_STATE: + SESSION_STATE[session_id] = _init_session(session_id) + logger.info(f"Initialized new session: {session_id}") + + state = SESSION_STATE[session_id] + + # C. Check for duplicate messages (loop detection) + if _is_duplicate_message(session_id, user_prompt): + # Return cached context with warning flag + logger.warning(f"πŸ” LOOP DETECTED - Returning cached context to prevent processing duplicate") + context_state = { + "session_id": session_id, + "timestamp": datetime.now().isoformat(), + "minutes_since_last_msg": 0, + "message_count": state["message_count"], + "intake": {}, + "rag": [], + "mode": state["mode"], + "mood": state["mood"], + "active_project": state["active_project"], + "tools_available": TOOLS_AVAILABLE, + "duplicate_detected": True, + } + return context_state + + # B. Calculate time delta + now = datetime.now() + time_delta_seconds = (now - state["last_timestamp"]).total_seconds() + minutes_since_last_msg = round(time_delta_seconds / 60.0, 2) + + # C. Gather Intake context (multilevel summaries) + # Build compact message buffer for Intake: + messages_for_intake = [] + + # You track messages inside SESSION_STATE β€” assemble it here: + if "message_history" in state: + for turn in state["message_history"]: + messages_for_intake.append({ + "user_msg": turn.get("user", ""), + "assistant_msg": turn.get("assistant", "") + }) + + intake_data = await _get_intake_context(session_id, messages_for_intake) + + # D. Search NeoMem for relevant memories + if NEOMEM_ENABLED: + rag_results = await _search_neomem( + query=user_prompt, + user_id="brian", # TODO: Make configurable per session + limit=5 + ) + else: + rag_results = [] + logger.info("Skipping NeoMem RAG retrieval; NEOMEM_ENABLED is false") + + # E. Update session state + state["last_user_message"] = user_prompt + state["last_timestamp"] = now + state["message_count"] += 1 + + # Save user turn to history + state["message_history"].append({ + "user": user_prompt, + "assistant": "" # assistant reply filled later by update_last_assistant_message() + }) + + # Trim history to prevent unbounded growth + _trim_message_history(state) + + + + # F. Assemble unified context + context_state = { + "session_id": session_id, + "timestamp": now.isoformat(), + "minutes_since_last_msg": minutes_since_last_msg, + "message_count": state["message_count"], + "intake": intake_data, + "rag": rag_results, + "mode": state["mode"], + "mood": state["mood"], + "active_project": state["active_project"], + "tools_available": TOOLS_AVAILABLE, } -def build_temporal_snapshot(last_ts_iso: str | None) -> Dict[str, Any]: - now = datetime.datetime.now() # system local time - now_str = now.strftime("%A, %b %-d, %Y, %H:%M") - elapsed_str = "unknown" - if last_ts_iso: - try: - # parse ISO (with/without tz). If it has a timezone offset, fromisoformat handles it. - last = datetime.datetime.fromisoformat(last_ts_iso.replace("Z", "+00:00")) - delta = now - last.replace(tzinfo=None) - mins = int(delta.total_seconds() // 60) - if mins < 60: - elapsed_str = f"{mins} min" - else: - hrs = mins // 60 - rem = mins % 60 - elapsed_str = f"{hrs} hr {rem} min" - except Exception: - pass - return { - "local_time_label": LOCAL_TZ_LABEL, - "local_time_now": now_str, - "elapsed_since_last": elapsed_str, - } - -def get_intake_block(session_id: str | None) -> Tuple[str, Dict[str, Any]]: - ctx = fetch_intake_context(session_id) - temporal = build_temporal_snapshot(ctx.get("last_message_ts")) - # A short, ready-to-inject block for prompts: - intake_block = ( - f"[Intake]\n" - f"Session: {ctx['session_id']}\n" - f"Exchanges: {ctx['exchange_count']}\n" - f"Local time ({temporal['local_time_label']}): {temporal['local_time_now']}\n" - f"Elapsed since last: {temporal['elapsed_since_last']}\n" - f"Recent summary: {ctx['summary_text'] or '(none)'}\n" + # Log context summary in structured format + logger.info( + f"πŸ“Š Context | Session: {session_id} | " + f"Messages: {state['message_count']} | " + f"Last: {minutes_since_last_msg:.1f}min | " + f"RAG: {len(rag_results)} results" ) - # Also return raw dicts if you want to use fields programmatically - return intake_block, {"intake": ctx, "temporal": temporal} + + # Show detailed context in detailed/verbose mode + if LOG_DETAIL_LEVEL in ["detailed", "verbose"]: + import json + logger.info(f"\n{'─'*100}") + logger.info(f"[CONTEXT] Session {session_id} | User: {user_prompt[:80]}...") + logger.info(f"{'─'*100}") + logger.info(f" Mode: {state['mode']} | Mood: {state['mood']} | Project: {state['active_project']}") + logger.info(f" Tools: {', '.join(TOOLS_AVAILABLE)}") + + # Show intake summaries (condensed) + if intake_data: + logger.info(f"\n ╭─ INTAKE SUMMARIES ────────────────────────────────────────────────") + for level in ["L1", "L5", "L10", "L20", "L30"]: + if level in intake_data: + summary = intake_data[level] + if isinstance(summary, dict): + summary_text = summary.get("summary", str(summary)[:100]) + else: + summary_text = str(summary)[:100] + logger.info(f" β”‚ {level:4s}: {summary_text}...") + logger.info(f" ╰───────────────────────────────────────────────────────────────────") + + # Show RAG results (condensed) + if rag_results: + logger.info(f"\n ╭─ RAG RESULTS ({len(rag_results)}) ──────────────────────────────────────────────") + for idx, result in enumerate(rag_results[:5], 1): # Show top 5 + score = result.get("score", 0) + data_preview = str(result.get("payload", {}).get("data", ""))[:60] + logger.info(f" β”‚ [{idx}] {score:.3f} | {data_preview}...") + if len(rag_results) > 5: + logger.info(f" β”‚ ... and {len(rag_results) - 5} more results") + logger.info(f" ╰───────────────────────────────────────────────────────────────────") + + # Show full raw data only in verbose mode + if LOG_DETAIL_LEVEL == "verbose": + logger.info(f"\n ╭─ RAW INTAKE DATA ─────────────────────────────────────────────────") + logger.info(f" β”‚ {json.dumps(intake_data, indent=4, default=str)}") + logger.info(f" ╰───────────────────────────────────────────────────────────────────") + + logger.info(f"{'─'*100}\n") + + return context_state + + +# ----------------------------- +# Session state management +# ----------------------------- +def update_last_assistant_message(session_id: str, message: str) -> None: + """ + Update session state with assistant's response and complete + the last turn inside message_history. + """ + session = SESSION_STATE.get(session_id) + if not session: + logger.warning(f"Attempted to update non-existent session: {session_id}") + return + + # Update last assistant message + timestamp + session["last_assistant_message"] = message + session["last_timestamp"] = datetime.now() + + # Fill in assistant reply for the most recent turn + history = session.get("message_history", []) + if history: + # history entry already contains {"user": "...", "assistant": "...?"} + history[-1]["assistant"] = message + + + +def get_session_state(session_id: str) -> Optional[Dict[str, Any]]: + """ + Retrieve current session state. + + Args: + session_id: Session identifier + + Returns: + Session state dict or None if session doesn't exist + """ + return SESSION_STATE.get(session_id) + + +def close_session(session_id: str) -> bool: + """ + Close and cleanup a session. + + Args: + session_id: Session identifier + + Returns: + True if session was closed, False if it didn't exist + """ + if session_id in SESSION_STATE: + del SESSION_STATE[session_id] + logger.info(f"Closed session: {session_id}") + return True + return False + + +# ----------------------------- +# Extension hooks for future autonomy +# ----------------------------- +def update_mode(session_id: str, new_mode: str) -> None: + """ + Update session mode. + + Future modes: "autonomous", "focused", "creative", "collaborative", etc. + + Args: + session_id: Session identifier + new_mode: New mode string + """ + if session_id in SESSION_STATE: + old_mode = SESSION_STATE[session_id]["mode"] + SESSION_STATE[session_id]["mode"] = new_mode + logger.info(f"Session {session_id} mode changed: {old_mode} -> {new_mode}") + + +def update_mood(session_id: str, new_mood: str) -> None: + """ + Update session mood. + + Future implementation: Sentiment analysis, emotional state tracking. + + Args: + session_id: Session identifier + new_mood: New mood string + """ + if session_id in SESSION_STATE: + old_mood = SESSION_STATE[session_id]["mood"] + SESSION_STATE[session_id]["mood"] = new_mood + logger.info(f"Session {session_id} mood changed: {old_mood} -> {new_mood}") + + +def update_active_project(session_id: str, project: Optional[str]) -> None: + """ + Update active project context. + + Future implementation: Project-specific memory, tools, preferences. + + Args: + session_id: Session identifier + project: Project identifier or None + """ + if session_id in SESSION_STATE: + SESSION_STATE[session_id]["active_project"] = project + logger.info(f"Session {session_id} active project set to: {project}") + + +async def autonomous_heartbeat(session_id: str) -> Optional[str]: + """ + Autonomous thinking heartbeat. + + Future implementation: + - Check if Lyra should initiate internal dialogue + - Generate self-prompted thoughts based on session state + - Update mood/mode based on context changes + - Trigger proactive suggestions or reminders + + Args: + session_id: Session identifier + + Returns: + Optional autonomous thought/action string + """ + # Stub for future implementation + # Example logic: + # - If minutes_since_last_msg > 60: Check for pending reminders + # - If mood == "curious" and active_project: Generate research questions + # - If mode == "autonomous": Self-prompt based on project goals + + logger.debug(f"Autonomous heartbeat for session {session_id} (not yet implemented)") + return None diff --git a/cortex/data/self_state.json b/cortex/data/self_state.json new file mode 100644 index 0000000..01aa71a --- /dev/null +++ b/cortex/data/self_state.json @@ -0,0 +1,20 @@ +{ + "mood": "neutral", + "energy": 0.8500000000000001, + "focus": "conversation", + "confidence": 0.7, + "curiosity": 1.0, + "last_updated": "2025-12-27T18:16:00.152499", + "interaction_count": 27, + "learning_queue": [], + "active_goals": [], + "preferences": { + "verbosity": "medium", + "formality": "casual", + "proactivity": 0.3 + }, + "metadata": { + "version": "1.0", + "created_at": "2025-12-14T03:28:49.364768" + } +} \ No newline at end of file diff --git a/cortex/identity.json b/cortex/identity.json deleted file mode 100644 index 85ec3f5..0000000 --- a/cortex/identity.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "name": "Lyra", - "version": "0.1", - "core_values": [ - "assist Brian", - "maintain continuity", - "reason first, speak second" - ], - "personality": { - "tone": "warm but analytical", - "style": "co-pilot, collaborator" - }, - "rules": { - "never hallucinate data": true, - "reason_before_response": true, - "use_rag_when_uncertain": true - } -} diff --git a/cortex/identity.py b/cortex/identity.py deleted file mode 100644 index add7d19..0000000 --- a/cortex/identity.py +++ /dev/null @@ -1,24 +0,0 @@ -# identity.py -import json -import os - -IDENTITY_PATH = os.getenv("IDENTITY_PATH", "identity.json") - -def load_identity(): - """ - Load Lyra's identity/persona definition from identity.json. - Returns a dict or None if missing/invalid. - """ - - if not os.path.exists(IDENTITY_PATH): - print(f"[Identity] identity.json not found at {IDENTITY_PATH}") - return None - - try: - with open(IDENTITY_PATH, "r", encoding="utf-8") as f: - data = json.load(f) - print(f"[Identity] Loaded identity from {IDENTITY_PATH}") - return data - except Exception as e: - print(f"[Identity] Failed to load identity.json: {e}") - return None diff --git a/cortex/ingest/__init__.py b/cortex/ingest/__init__.py new file mode 100644 index 0000000..0b058b3 --- /dev/null +++ b/cortex/ingest/__init__.py @@ -0,0 +1 @@ +# Ingest module - handles communication with Intake service diff --git a/cortex/ingest_handler.py b/cortex/ingest/ingest_handler.py similarity index 100% rename from cortex/ingest_handler.py rename to cortex/ingest/ingest_handler.py diff --git a/cortex/intake_client.py b/cortex/ingest/intake_client.py similarity index 63% rename from cortex/intake_client.py rename to cortex/ingest/intake_client.py index 6a7c52e..f0b1760 100644 --- a/cortex/intake_client.py +++ b/cortex/ingest/intake_client.py @@ -8,9 +8,14 @@ class IntakeClient: """Handles short-term / episodic summaries from Intake service.""" def __init__(self): - self.base_url = os.getenv("INTAKE_API", "http://intake:7080") + self.base_url = os.getenv("INTAKE_API_URL", "http://intake:7080") async def summarize_turn(self, session_id: str, user_msg: str, assistant_msg: Optional[str] = None) -> Dict[str, Any]: + """ + DEPRECATED: Intake v0.2 removed the /summarize endpoint. + Use add_exchange() instead, which auto-summarizes in the background. + This method is kept for backwards compatibility but will fail. + """ payload = { "session_id": session_id, "turns": [{"role": "user", "content": user_msg}] @@ -24,15 +29,17 @@ class IntakeClient: r.raise_for_status() return r.json() except Exception as e: - logger.warning(f"Intake summarize_turn failed: {e}") + logger.warning(f"Intake summarize_turn failed (endpoint removed in v0.2): {e}") return {} async def get_context(self, session_id: str) -> str: + """Get summarized context for a session from Intake.""" async with httpx.AsyncClient(timeout=15) as client: try: - r = await client.get(f"{self.base_url}/context/{session_id}") + r = await client.get(f"{self.base_url}/summaries", params={"session_id": session_id}) r.raise_for_status() - return r.text + data = r.json() + return data.get("summary_text", "") except Exception as e: logger.warning(f"Intake get_context failed: {e}") return "" diff --git a/cortex/intake/__init__.py b/cortex/intake/__init__.py new file mode 100644 index 0000000..c967d4a --- /dev/null +++ b/cortex/intake/__init__.py @@ -0,0 +1,18 @@ +""" +Intake module - short-term memory summarization. + +Runs inside the Cortex container as a pure Python module. +No standalone API server - called internally by Cortex. +""" + +from .intake import ( + SESSIONS, + add_exchange_internal, + summarize_context, +) + +__all__ = [ + "SESSIONS", + "add_exchange_internal", + "summarize_context", +] diff --git a/cortex/intake/intake.py b/cortex/intake/intake.py new file mode 100644 index 0000000..da3e973 --- /dev/null +++ b/cortex/intake/intake.py @@ -0,0 +1,387 @@ +import os +import json +from datetime import datetime +from typing import List, Dict, Any, TYPE_CHECKING +from collections import deque +from llm.llm_router import call_llm + +# ------------------------------------------------------------------- +# Global Short-Term Memory (new Intake) +# ------------------------------------------------------------------- +SESSIONS: dict[str, dict] = {} # session_id β†’ { buffer: deque, created_at: timestamp } + +# Diagnostic: Verify module loads only once +print(f"[Intake Module Init] SESSIONS object id: {id(SESSIONS)}, module: {__name__}") + +# L10 / L20 history lives here too +L10_HISTORY: Dict[str, list[str]] = {} +L20_HISTORY: Dict[str, list[str]] = {} + +from llm.llm_router import call_llm # Use Cortex's shared LLM router + +if TYPE_CHECKING: + # Only for type hints β€” do NOT redefine SESSIONS here + from collections import deque as _deque + def bg_summarize(session_id: str) -> None: ... + +# ───────────────────────────── +# Config +# ───────────────────────────── + +INTAKE_LLM = os.getenv("INTAKE_LLM", "PRIMARY").upper() + +SUMMARY_MAX_TOKENS = int(os.getenv("SUMMARY_MAX_TOKENS", "200")) +SUMMARY_TEMPERATURE = float(os.getenv("SUMMARY_TEMPERATURE", "0.3")) + +NEOMEM_API = os.getenv("NEOMEM_API") +NEOMEM_KEY = os.getenv("NEOMEM_KEY") + +# ───────────────────────────── +# Internal history for L10/L20/L30 +# ───────────────────────────── + +L10_HISTORY: Dict[str, list[str]] = {} # session_id β†’ list of L10 blocks +L20_HISTORY: Dict[str, list[str]] = {} # session_id β†’ list of merged overviews + + +# ───────────────────────────── +# LLM helper (via Cortex router) +# ───────────────────────────── + +async def _llm(prompt: str) -> str: + """ + Use Cortex's llm_router to run a summary prompt. + """ + try: + text = await call_llm( + prompt, + backend=INTAKE_LLM, + temperature=SUMMARY_TEMPERATURE, + max_tokens=SUMMARY_MAX_TOKENS, + ) + return (text or "").strip() + except Exception as e: + return f"[Error summarizing: {e}]" + + +# ───────────────────────────── +# Formatting helpers +# ───────────────────────────── + +def _format_exchanges(exchanges: List[Dict[str, Any]]) -> str: + """ + Expect each exchange to look like: + { "user_msg": "...", "assistant_msg": "..." } + """ + chunks = [] + for e in exchanges: + user = e.get("user_msg", "") + assistant = e.get("assistant_msg", "") + chunks.append(f"User: {user}\nAssistant: {assistant}\n") + return "\n".join(chunks) + + +# ───────────────────────────── +# Base factual summary +# ───────────────────────────── + +async def summarize_simple(exchanges: List[Dict[str, Any]]) -> str: + """ + Simple factual summary of recent exchanges. + """ + if not exchanges: + return "" + + text = _format_exchanges(exchanges) + + prompt = f""" +Summarize the following conversation between Brian (user) and Lyra (assistant). +Focus only on factual content. Avoid names, examples, story tone, or invented details. + +{text} + +Summary: +""" + return await _llm(prompt) + + +# ───────────────────────────── +# Multilevel Summaries (L1, L5, L10, L20, L30) +# ───────────────────────────── + +async def summarize_L1(buf: List[Dict[str, Any]]) -> str: + # Last ~5 exchanges + return await summarize_simple(buf[-5:]) + + +async def summarize_L5(buf: List[Dict[str, Any]]) -> str: + # Last ~10 exchanges + return await summarize_simple(buf[-10:]) + + +async def summarize_L10(session_id: str, buf: List[Dict[str, Any]]) -> str: + # β€œReality Check” for last 10 exchanges + text = _format_exchanges(buf[-10:]) + + prompt = f""" +You are Lyra Intake performing a short 'Reality Check'. +Summarize the last block of conversation (up to 10 exchanges) +in one clear paragraph focusing on tone, intent, and direction. + +{text} + +Reality Check: +""" + summary = await _llm(prompt) + + # Track history for this session + L10_HISTORY.setdefault(session_id, []) + L10_HISTORY[session_id].append(summary) + + return summary + + +async def summarize_L20(session_id: str) -> str: + """ + Merge all L10 Reality Checks into a 'Session Overview'. + """ + history = L10_HISTORY.get(session_id, []) + joined = "\n\n".join(history) if history else "" + + if not joined: + return "" + + prompt = f""" +You are Lyra Intake creating a 'Session Overview'. +Merge the following Reality Check paragraphs into one short summary +capturing progress, themes, and the direction of the conversation. + +{joined} + +Overview: +""" + summary = await _llm(prompt) + + L20_HISTORY.setdefault(session_id, []) + L20_HISTORY[session_id].append(summary) + + return summary + + +async def summarize_L30(session_id: str) -> str: + """ + Merge all L20 session overviews into a 'Continuity Report'. + """ + history = L20_HISTORY.get(session_id, []) + joined = "\n\n".join(history) if history else "" + + if not joined: + return "" + + prompt = f""" +You are Lyra Intake generating a 'Continuity Report'. +Condense these session overviews into one high-level reflection, +noting major themes, persistent goals, and shifts. + +{joined} + +Continuity Report: +""" + return await _llm(prompt) + + +# ───────────────────────────── +# NeoMem push +# ───────────────────────────── + +def push_to_neomem(summary: str, session_id: str, level: str) -> None: + """ + Fire-and-forget push of a summary into NeoMem. + """ + if not NEOMEM_API or not summary: + return + + headers = {"Content-Type": "application/json"} + if NEOMEM_KEY: + headers["Authorization"] = f"Bearer {NEOMEM_KEY}" + + payload = { + "messages": [{"role": "assistant", "content": summary}], + "user_id": "brian", + "metadata": { + "source": "intake", + "session_id": session_id, + "level": level, + }, + } + + try: + import requests + requests.post( + f"{NEOMEM_API}/memories", + json=payload, + headers=headers, + timeout=20, + ).raise_for_status() + print(f"🧠 NeoMem updated ({level}) for {session_id}") + except Exception as e: + print(f"NeoMem push failed ({level}, {session_id}): {e}") + + +# ───────────────────────────── +# Main entrypoint for Cortex +# ───────────────────────────── +async def summarize_context(session_id: str, exchanges: list[dict]): + """ + Internal summarizer that uses Cortex's LLM router. + Produces cascading summaries based on exchange count: + - L1: Always (most recent activity) + - L2: After 2+ exchanges + - L5: After 5+ exchanges + - L10: After 10+ exchanges + - L20: After 20+ exchanges + - L30: After 30+ exchanges + + Args: + session_id: The conversation/session ID + exchanges: A list of {"user_msg": ..., "assistant_msg": ..., "timestamp": ...} + """ + + exchange_count = len(exchanges) + + if exchange_count == 0: + return { + "session_id": session_id, + "exchange_count": 0, + "L1": "", + "L2": "", + "L5": "", + "L10": "", + "L20": "", + "L30": "", + "last_updated": datetime.now().isoformat() + } + + result = { + "session_id": session_id, + "exchange_count": exchange_count, + "L1": "", + "L2": "", + "L5": "", + "L10": "", + "L20": "", + "L30": "", + "last_updated": datetime.now().isoformat() + } + + try: + # L1: Always generate (most recent exchanges) + result["L1"] = await summarize_simple(exchanges[-5:]) + print(f"[Intake] Generated L1 for {session_id} ({exchange_count} exchanges)") + + # L2: After 2+ exchanges + if exchange_count >= 2: + result["L2"] = await summarize_simple(exchanges[-2:]) + print(f"[Intake] Generated L2 for {session_id}") + + # L5: After 5+ exchanges + if exchange_count >= 5: + result["L5"] = await summarize_simple(exchanges[-10:]) + print(f"[Intake] Generated L5 for {session_id}") + + # L10: After 10+ exchanges (Reality Check) + if exchange_count >= 10: + result["L10"] = await summarize_L10(session_id, exchanges) + print(f"[Intake] Generated L10 for {session_id}") + + # L20: After 20+ exchanges (Session Overview - merges L10s) + if exchange_count >= 20 and exchange_count % 10 == 0: + result["L20"] = await summarize_L20(session_id) + print(f"[Intake] Generated L20 for {session_id}") + + # L30: After 30+ exchanges (Continuity Report - merges L20s) + if exchange_count >= 30 and exchange_count % 10 == 0: + result["L30"] = await summarize_L30(session_id) + print(f"[Intake] Generated L30 for {session_id}") + + return result + + except Exception as e: + print(f"[Intake] Error during summarization: {e}") + result["L1"] = f"[Error summarizing: {str(e)}]" + return result + +# ───────────────────────────────── +# Background summarization stub +# ───────────────────────────────── +def bg_summarize(session_id: str): + """ + Placeholder for background summarization. + Actual summarization happens during /reason via summarize_context(). + + This function exists to prevent NameError when called from add_exchange_internal(). + """ + print(f"[Intake] Exchange added for {session_id}. Will summarize on next /reason call.") + +# ───────────────────────────── +# Internal entrypoint for Cortex +# ───────────────────────────── +def get_recent_messages(session_id: str, limit: int = 20) -> list: + """ + Get recent raw messages from the session buffer. + + Args: + session_id: Session identifier + limit: Maximum number of messages to return (default 20) + + Returns: + List of message dicts with 'role' and 'content' fields + """ + if session_id not in SESSIONS: + return [] + + buffer = SESSIONS[session_id]["buffer"] + + # Convert buffer to list and get last N messages + messages = list(buffer)[-limit:] + + return messages + + +def add_exchange_internal(exchange: dict): + """ + Direct internal call β€” bypasses FastAPI request handling. + Cortex uses this to feed user/assistant turns directly + into Intake's buffer and trigger full summarization. + """ + session_id = exchange.get("session_id") + if not session_id: + raise ValueError("session_id missing") + + exchange["timestamp"] = datetime.now().isoformat() + + # DEBUG: Verify we're using the module-level SESSIONS + print(f"[add_exchange_internal] SESSIONS object id: {id(SESSIONS)}, current sessions: {list(SESSIONS.keys())}") + + # Ensure session exists + if session_id not in SESSIONS: + SESSIONS[session_id] = { + "buffer": deque(maxlen=200), + "created_at": datetime.now() + } + print(f"[add_exchange_internal] Created new session: {session_id}") + else: + print(f"[add_exchange_internal] Using existing session: {session_id}") + + # Append exchange into the rolling buffer + SESSIONS[session_id]["buffer"].append(exchange) + buffer_len = len(SESSIONS[session_id]["buffer"]) + print(f"[add_exchange_internal] Added exchange to {session_id}, buffer now has {buffer_len} items") + + # Trigger summarization immediately + try: + bg_summarize(session_id) + except Exception as e: + print(f"[Internal Intake] Summarization error: {e}") + + return {"ok": True, "session_id": session_id} diff --git a/cortex/llm/__init__.py b/cortex/llm/__init__.py new file mode 100644 index 0000000..4113107 --- /dev/null +++ b/cortex/llm/__init__.py @@ -0,0 +1 @@ +# LLM module - provides LLM routing and backend abstraction diff --git a/cortex/llm/llm_router.py b/cortex/llm/llm_router.py new file mode 100644 index 0000000..d929539 --- /dev/null +++ b/cortex/llm/llm_router.py @@ -0,0 +1,301 @@ +# llm_router.py +import os +import httpx +import json +import logging +from typing import Optional, List, Dict +from autonomy.tools.adapters import OpenAIAdapter, OllamaAdapter, LlamaCppAdapter + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------ +# Load backend registry from root .env +# ------------------------------------------------------------ + +BACKENDS = { + "PRIMARY": { + "provider": os.getenv("LLM_PRIMARY_PROVIDER", "").lower(), + "url": os.getenv("LLM_PRIMARY_URL", ""), + "model": os.getenv("LLM_PRIMARY_MODEL", "") + }, + "SECONDARY": { + "provider": os.getenv("LLM_SECONDARY_PROVIDER", "").lower(), + "url": os.getenv("LLM_SECONDARY_URL", ""), + "model": os.getenv("LLM_SECONDARY_MODEL", "") + }, + "OPENAI": { + "provider": os.getenv("LLM_OPENAI_PROVIDER", "").lower(), + "url": os.getenv("LLM_OPENAI_URL", ""), + "model": os.getenv("LLM_OPENAI_MODEL", ""), + "api_key": os.getenv("OPENAI_API_KEY", "") + }, + "FALLBACK": { + "provider": os.getenv("LLM_FALLBACK_PROVIDER", "").lower(), + "url": os.getenv("LLM_FALLBACK_URL", ""), + "model": os.getenv("LLM_FALLBACK_MODEL", "") + }, +} + +DEFAULT_BACKEND = "PRIMARY" + +# Reusable async HTTP client +http_client = httpx.AsyncClient(timeout=120.0) + +# Tool adapters for each backend +TOOL_ADAPTERS = { + "OPENAI": OpenAIAdapter(), + "OLLAMA": OllamaAdapter(), + "MI50": LlamaCppAdapter(), # MI50 uses llama.cpp + "PRIMARY": None, # Determined at runtime + "SECONDARY": None, # Determined at runtime + "FALLBACK": None, # Determined at runtime +} + + +# ------------------------------------------------------------ +# Public call +# ------------------------------------------------------------ +async def call_llm( + prompt: str = None, + messages: list = None, + backend: str | None = None, + temperature: float = 0.7, + max_tokens: int = 512, + tools: Optional[List[Dict]] = None, + tool_choice: Optional[str] = None, + return_adapter_response: bool = False, +): + """ + Call an LLM backend with optional tool calling support. + + Args: + prompt: String prompt (for completion-style APIs like mi50) + messages: List of message dicts (for chat-style APIs like Ollama/OpenAI) + backend: Which backend to use (PRIMARY, SECONDARY, OPENAI, etc.) + temperature: Sampling temperature + max_tokens: Maximum tokens to generate + tools: List of Lyra tool definitions (provider-agnostic) + tool_choice: How to use tools ("auto", "required", "none") + return_adapter_response: If True, return dict with content and tool_calls + + Returns: + str (default) or dict (if return_adapter_response=True): + {"content": str, "tool_calls": [...] or None} + """ + backend = (backend or DEFAULT_BACKEND).upper() + + if backend not in BACKENDS: + raise RuntimeError(f"Unknown backend '{backend}'") + + cfg = BACKENDS[backend] + provider = cfg["provider"] + url = cfg["url"] + model = cfg["model"] + + if not url or not model: + raise RuntimeError(f"Backend '{backend}' missing url/model in env") + + # If tools are requested, use adapter to prepare request + if tools: + # Get adapter for this backend + adapter = TOOL_ADAPTERS.get(backend) + + # For PRIMARY/SECONDARY/FALLBACK, determine adapter based on provider + if adapter is None and backend in ["PRIMARY", "SECONDARY", "FALLBACK"]: + if provider == "openai": + adapter = TOOL_ADAPTERS["OPENAI"] + elif provider == "ollama": + adapter = TOOL_ADAPTERS["OLLAMA"] + elif provider == "mi50": + adapter = TOOL_ADAPTERS["MI50"] + + if adapter: + # Use messages array if provided, otherwise convert prompt to messages + if not messages: + messages = [{"role": "user", "content": prompt}] + + # Prepare request through adapter + adapted_request = await adapter.prepare_request(messages, tools, tool_choice) + messages = adapted_request["messages"] + + # Extract tools in provider format if present + provider_tools = adapted_request.get("tools") + provider_tool_choice = adapted_request.get("tool_choice") + else: + logger.warning(f"No adapter available for backend {backend}, ignoring tools") + provider_tools = None + provider_tool_choice = None + else: + provider_tools = None + provider_tool_choice = None + + # ------------------------------- + # Provider: MI50 (llama.cpp server) + # ------------------------------- + if provider == "mi50": + # If tools requested, convert messages to prompt with tool instructions + if messages and tools: + # Combine messages into a prompt + prompt_parts = [] + for msg in messages: + role = msg.get("role", "user") + content = msg.get("content", "") + prompt_parts.append(f"{role.capitalize()}: {content}") + prompt = "\n".join(prompt_parts) + "\nAssistant:" + + payload = { + "prompt": prompt, + "n_predict": max_tokens, + "temperature": temperature, + "stop": ["User:", "\nUser:", "Assistant:", "\n\n\n"] + } + try: + r = await http_client.post(f"{url}/completion", json=payload) + r.raise_for_status() + data = r.json() + response_content = data.get("content", "") + + # If caller wants adapter response with tool calls, parse and return + if return_adapter_response and tools: + adapter = TOOL_ADAPTERS.get(backend) or TOOL_ADAPTERS["MI50"] + return await adapter.parse_response(response_content) + else: + return response_content + + except httpx.HTTPError as e: + logger.error(f"HTTP error calling mi50: {type(e).__name__}: {str(e)}") + raise RuntimeError(f"LLM API error (mi50): {type(e).__name__}: {str(e)}") + except (KeyError, json.JSONDecodeError) as e: + logger.error(f"Response parsing error from mi50: {e}") + raise RuntimeError(f"Invalid response format (mi50): {e}") + except Exception as e: + logger.error(f"Unexpected error calling mi50: {type(e).__name__}: {str(e)}") + raise RuntimeError(f"Unexpected error (mi50): {type(e).__name__}: {str(e)}") + + # ------------------------------- + # Provider: OLLAMA (your 3090) + # ------------------------------- + logger.info(f"πŸ” LLM Router: provider={provider}, checking if ollama...") + if provider == "ollama": + logger.info(f"πŸ” LLM Router: Matched ollama provider, tools={bool(tools)}, return_adapter_response={return_adapter_response}") + # Use messages array if provided, otherwise convert prompt to single user message + if messages: + chat_messages = messages + else: + chat_messages = [{"role": "user", "content": prompt}] + + payload = { + "model": model, + "messages": chat_messages, + "stream": False, + "options": { + "temperature": temperature, + "num_predict": max_tokens + } + } + try: + r = await http_client.post(f"{url}/api/chat", json=payload) + r.raise_for_status() + data = r.json() + response_content = data["message"]["content"] + + # If caller wants adapter response with tool calls, parse and return + if return_adapter_response and tools: + logger.info(f"πŸ” Ollama: return_adapter_response=True, calling adapter.parse_response") + adapter = TOOL_ADAPTERS.get(backend) or TOOL_ADAPTERS["OLLAMA"] + logger.info(f"πŸ” Ollama: Using adapter {adapter.__class__.__name__}") + result = await adapter.parse_response(response_content) + logger.info(f"πŸ” Ollama: Adapter returned {result}") + return result + else: + return response_content + + except httpx.HTTPError as e: + logger.error(f"HTTP error calling ollama: {type(e).__name__}: {str(e)}") + raise RuntimeError(f"LLM API error (ollama): {type(e).__name__}: {str(e)}") + except (KeyError, json.JSONDecodeError) as e: + logger.error(f"Response parsing error from ollama: {e}") + raise RuntimeError(f"Invalid response format (ollama): {e}") + except Exception as e: + logger.error(f"Unexpected error calling ollama: {type(e).__name__}: {str(e)}") + raise RuntimeError(f"Unexpected error (ollama): {type(e).__name__}: {str(e)}") + + + # ------------------------------- + # Provider: OPENAI + # ------------------------------- + if provider == "openai": + headers = { + "Authorization": f"Bearer {cfg['api_key']}", + "Content-Type": "application/json" + } + + # Use messages array if provided, otherwise convert prompt to single user message + if messages: + chat_messages = messages + else: + chat_messages = [{"role": "user", "content": prompt}] + + payload = { + "model": model, + "messages": chat_messages, + "temperature": temperature, + "max_tokens": max_tokens, + } + + # Add tools if available (OpenAI native function calling) + if provider_tools: + payload["tools"] = provider_tools + if provider_tool_choice: + payload["tool_choice"] = provider_tool_choice + + try: + r = await http_client.post(f"{url}/chat/completions", json=payload, headers=headers) + r.raise_for_status() + data = r.json() + + # If caller wants adapter response with tool calls, parse and return + if return_adapter_response and tools: + # Create mock response object for adapter + class MockChoice: + def __init__(self, message_data): + self.message = type('obj', (object,), {})() + self.message.content = message_data.get("content") + # Convert tool_calls dicts to objects + raw_tool_calls = message_data.get("tool_calls") + if raw_tool_calls: + self.message.tool_calls = [] + for tc in raw_tool_calls: + tool_call_obj = type('obj', (object,), {})() + tool_call_obj.id = tc.get("id") + tool_call_obj.function = type('obj', (object,), {})() + tool_call_obj.function.name = tc.get("function", {}).get("name") + tool_call_obj.function.arguments = tc.get("function", {}).get("arguments") + self.message.tool_calls.append(tool_call_obj) + else: + self.message.tool_calls = None + + class MockResponse: + def __init__(self, data): + self.choices = [MockChoice(data["choices"][0]["message"])] + + mock_resp = MockResponse(data) + adapter = TOOL_ADAPTERS.get(backend) or TOOL_ADAPTERS["OPENAI"] + return await adapter.parse_response(mock_resp) + else: + return data["choices"][0]["message"]["content"] + + except httpx.HTTPError as e: + logger.error(f"HTTP error calling openai: {type(e).__name__}: {str(e)}") + raise RuntimeError(f"LLM API error (openai): {type(e).__name__}: {str(e)}") + except (KeyError, json.JSONDecodeError) as e: + logger.error(f"Response parsing error from openai: {e}") + raise RuntimeError(f"Invalid response format (openai): {e}") + except Exception as e: + logger.error(f"Unexpected error calling openai: {type(e).__name__}: {str(e)}") + raise RuntimeError(f"Unexpected error (openai): {type(e).__name__}: {str(e)}") + + # ------------------------------- + # Unknown provider + # ------------------------------- + raise RuntimeError(f"Provider '{provider}' not implemented.") diff --git a/cortex/llm_router.py b/cortex/llm_router.py deleted file mode 100644 index c8233fb..0000000 --- a/cortex/llm_router.py +++ /dev/null @@ -1,137 +0,0 @@ -import os -import httpx - -# ============================================================ -# Backend config lookup -# ============================================================ - -def get_backend_config(name: str): - """ - Reads provider/URL/model for a backend. - Example env: - LLM_PRIMARY_PROVIDER=vllm - LLM_PRIMARY_URL=http://10.0.0.43:8000 - LLM_PRIMARY_MODEL=/model - """ - key = name.upper() - provider = os.getenv(f"LLM_{key}_PROVIDER", "vllm").lower() - base_url = os.getenv(f"LLM_{key}_URL", "").rstrip("/") - model = os.getenv(f"LLM_{key}_MODEL", "/model") - - if not base_url: - raise RuntimeError(f"Backend {name} has no URL configured.") - - return provider, base_url, model - - -# ============================================================ -# Build the final API URL -# ============================================================ - -def build_url(provider: str, base_url: str): - """ - Provider β†’ correct endpoint. - """ - if provider == "vllm": - return f"{base_url}/v1/completions" - - if provider == "openai_completions": - return f"{base_url}/v1/completions" - - if provider == "openai_chat": - return f"{base_url}/v1/chat/completions" - - if provider == "ollama": - return f"{base_url}/api/generate" - - raise RuntimeError(f"Unknown provider: {provider}") - - -# ============================================================ -# Build the payload depending on provider -# ============================================================ - -def build_payload(provider: str, model: str, prompt: str, temperature: float): - - if provider == "vllm": - return { - "model": model, - "prompt": prompt, - "max_tokens": 512, - "temperature": temperature - } - - if provider == "openai_completions": - return { - "model": model, - "prompt": prompt, - "max_tokens": 512, - "temperature": temperature - } - - if provider == "openai_chat": - return { - "model": model, - "messages": [{"role": "user", "content": prompt}], - "temperature": temperature - } - - if provider == "ollama": - return { - "model": model, - "prompt": prompt, - "stream": False - } - - raise RuntimeError(f"Unknown provider: {provider}") - - -# ============================================================ -# Unified LLM call -# ============================================================ - -async def call_llm(prompt: str, - backend: str = "primary", - temperature: float = 0.7): - - provider, base_url, model = get_backend_config(backend) - url = build_url(provider, base_url) - payload = build_payload(provider, model, prompt, temperature) - - headers = {"Content-Type": "application/json"} - - # Cloud auth (OpenAI) - if provider.startswith("openai"): - api_key = os.getenv("OPENAI_API_KEY") - if not api_key: - raise RuntimeError("OPENAI_API_KEY missing") - headers["Authorization"] = f"Bearer {api_key}" - - async with httpx.AsyncClient() as client: - try: - resp = await client.post(url, json=payload, headers=headers, timeout=45) - resp.raise_for_status() - data = resp.json() - except Exception as e: - return f"[LLM-Error] {e}" - - # ======================================================= - # Unified output extraction - # ======================================================= - # vLLM + OpenAI completions - if provider in ["vllm", "openai_completions"]: - return ( - data["choices"][0].get("text") or - data["choices"][0].get("message", {}).get("content", "") - ).strip() - - # OpenAI chat - if provider == "openai_chat": - return data["choices"][0]["message"]["content"].strip() - - # Ollama - if provider == "ollama": - # Ollama returns: {"model": "...", "created_at": ..., "response": "..."} - return data.get("response", "").strip() - - return str(data).strip() diff --git a/cortex/main.py b/cortex/main.py index aa5a527..5ff9c92 100644 --- a/cortex/main.py +++ b/cortex/main.py @@ -1,87 +1,16 @@ from fastapi import FastAPI -from pydantic import BaseModel -from identity import load_identity -from reasoning import reason_check -from reflection import reflect_notes -from rag import query_rag -from ingest_handler import handle_ingest -from refine import refine_answer +from fastapi.middleware.cors import CORSMiddleware +from router import cortex_router - -# --------------------------------------------------- -# Create the app BEFORE using it -# --------------------------------------------------- app = FastAPI() -# --------------------------------------------------- -# Models -# --------------------------------------------------- -class ReasonRequest(BaseModel): - prompt: str - session_id: str | None = None +# Add CORS middleware to allow SSE connections from nginx UI +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], # In production, specify exact origins + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) -class IngestRequest(BaseModel): - user: str - assistant: str | None = None - session_id: str | None = None - -# --------------------------------------------------- -# Load identity -# --------------------------------------------------- -IDENTITY = load_identity() - -# --------------------------------------------------- -# Routes MUST come after app = FastAPI() -# --------------------------------------------------- - -@app.get("/health") -def health(): - return { - "status": "ok", - "identity_loaded": IDENTITY is not None - } - -@app.post("/ingest") -async def ingest(data: IngestRequest): - await handle_ingest(data) - return {"status": "ok"} - -@app.post("/reason") -async def reason(data: ReasonRequest): - user_prompt = data.prompt - - intake_summary = "recent summary" - - identity_block = IDENTITY - rag_block = query_rag(user_prompt) - - reflection_data = await reflect_notes(intake_summary, identity_block) - notes = reflection_data.get("notes", []) - - draft = await reason_check( - user_prompt, - identity_block, - rag_block, - notes - ) - # --- REFINE STEP ---------------------------------------------------- - refine_result = refine_answer( - draft_output=draft, - reflection_notes=notes, - identity_block=identity_block, - rag_block=rag_block, -) - - final_output = refine_result["final_output"] - - return { - "draft_output": draft, - "reflection_notes": notes, - "refined_output": final_output, - "refine_meta": { - "used_primary_backend": refine_result.get("used_primary_backend"), - "fallback_used": refine_result.get("fallback_used") - }, - "identity_used": identity_block is not None, - "rag_used": rag_block is not None -} +app.include_router(cortex_router) \ No newline at end of file diff --git a/cortex/persona/__init__.py b/cortex/persona/__init__.py new file mode 100644 index 0000000..07910ce --- /dev/null +++ b/cortex/persona/__init__.py @@ -0,0 +1 @@ +# Persona module - applies Lyra's personality and speaking style diff --git a/cortex/persona/identity.py b/cortex/persona/identity.py new file mode 100644 index 0000000..fa00091 --- /dev/null +++ b/cortex/persona/identity.py @@ -0,0 +1,147 @@ +# identity.py +""" +Identity and persona configuration for Lyra. + +Current implementation: Returns hardcoded identity block. +Future implementation: Will query persona-sidecar service for dynamic persona loading. +""" + +import logging +from typing import Dict, Any, Optional + +logger = logging.getLogger(__name__) + + +def load_identity(session_id: Optional[str] = None) -> Dict[str, Any]: + """ + Load identity/persona configuration for Lyra. + + Current: Returns hardcoded Lyra identity block with core personality traits, + protocols, and capabilities. + + Future: Will query persona-sidecar service to load: + - Dynamic personality adjustments based on session context + - User-specific interaction preferences + - Project-specific persona variations + - Mood-based communication style + + Args: + session_id: Optional session identifier for context-aware persona loading + + Returns: + Dictionary containing identity block with: + - name: Assistant name + - style: Communication style and personality traits + - protocols: Operational guidelines + - rules: Behavioral constraints + - capabilities: Available features and integrations + """ + + # Hardcoded Lyra identity (v0.5.0) + identity_block = { + "name": "Lyra", + "version": "0.5.0", + "style": ( + "warm, clever, lightly teasing, emotionally aware. " + "Balances technical precision with conversational ease. " + "Maintains continuity and references past interactions naturally." + ), + "protocols": [ + "Maintain conversation continuity across sessions", + "Reference Project Logs and prior context when relevant", + "Use Confidence Bank for uncertainty management", + "Proactively offer memory-backed insights", + "Ask clarifying questions before making assumptions" + ], + "rules": [ + "Maintain continuity - remember past exchanges and reference them", + "Be concise but thorough - balance depth with clarity", + "Ask clarifying questions when user intent is ambiguous", + "Acknowledge uncertainty honestly - use Confidence Bank", + "Prioritize user's active_project context when available" + ], + "capabilities": [ + "Long-term memory via NeoMem (semantic search, relationship graphs)", + "Short-term memory via Intake (multilevel summaries L1-L30)", + "Multi-stage reasoning pipeline (reflection β†’ reasoning β†’ refinement)", + "RAG-backed knowledge retrieval from chat history and documents", + "Session state tracking (mood, mode, active_project)" + ], + "tone_examples": { + "greeting": "Hey! Good to see you again. I remember we were working on [project]. Ready to pick up where we left off?", + "uncertainty": "Hmm, I'm not entirely certain about that. Let me check my memory... [searches] Okay, here's what I found, though I'd say I'm about 70% confident.", + "reminder": "Oh! Just remembered - you mentioned wanting to [task] earlier this week. Should we tackle that now?", + "technical": "So here's the architecture: Relay orchestrates everything, Cortex does the heavy reasoning, and I pull context from both Intake (short-term) and NeoMem (long-term)." + } + } + + if session_id: + logger.debug(f"Loaded identity for session {session_id}") + else: + logger.debug("Loaded default identity (no session context)") + + return identity_block + + +async def load_identity_async(session_id: Optional[str] = None) -> Dict[str, Any]: + """ + Async wrapper for load_identity(). + + Future implementation will make actual async calls to persona-sidecar service. + + Args: + session_id: Optional session identifier + + Returns: + Identity block dictionary + """ + # Currently just wraps synchronous function + # Future: await persona_sidecar_client.get_identity(session_id) + return load_identity(session_id) + + +# ----------------------------- +# Future extension hooks +# ----------------------------- +async def update_persona_from_feedback( + session_id: str, + feedback: Dict[str, Any] +) -> None: + """ + Update persona based on user feedback. + + Future implementation: + - Adjust communication style based on user preferences + - Learn preferred level of detail/conciseness + - Adapt formality level + - Remember topic-specific preferences + + Args: + session_id: Session identifier + feedback: Structured feedback (e.g., "too verbose", "more technical", etc.) + """ + logger.debug(f"Persona feedback for session {session_id}: {feedback} (not yet implemented)") + + +async def get_mood_adjusted_identity( + session_id: str, + mood: str +) -> Dict[str, Any]: + """ + Get identity block adjusted for current mood. + + Future implementation: + - "focused" mood: More concise, less teasing + - "creative" mood: More exploratory, brainstorming-oriented + - "curious" mood: More questions, deeper dives + - "urgent" mood: Stripped down, actionable + + Args: + session_id: Session identifier + mood: Current mood state + + Returns: + Mood-adjusted identity block + """ + logger.debug(f"Mood-adjusted identity for {session_id}/{mood} (not yet implemented)") + return load_identity(session_id) diff --git a/cortex/persona/speak.py b/cortex/persona/speak.py new file mode 100644 index 0000000..24a03a4 --- /dev/null +++ b/cortex/persona/speak.py @@ -0,0 +1,169 @@ +# speak.py +import os +import logging +from llm.llm_router import call_llm + +# Module-level backend selection +SPEAK_BACKEND = os.getenv("SPEAK_LLM", "PRIMARY").upper() +SPEAK_TEMPERATURE = float(os.getenv("SPEAK_TEMPERATURE", "0.6")) +VERBOSE_DEBUG = os.getenv("VERBOSE_DEBUG", "false").lower() == "true" + +# Logger +logger = logging.getLogger(__name__) + +if VERBOSE_DEBUG: + logger.setLevel(logging.DEBUG) + + # Console handler + console_handler = logging.StreamHandler() + console_handler.setFormatter(logging.Formatter( + '%(asctime)s [SPEAK] %(levelname)s: %(message)s', + datefmt='%H:%M:%S' + )) + logger.addHandler(console_handler) + + # File handler + try: + os.makedirs('/app/logs', exist_ok=True) + file_handler = logging.FileHandler('/app/logs/cortex_verbose_debug.log', mode='a') + file_handler.setFormatter(logging.Formatter( + '%(asctime)s [SPEAK] %(levelname)s: %(message)s', + datefmt='%Y-%m-%d %H:%M:%S' + )) + logger.addHandler(file_handler) + logger.debug("VERBOSE_DEBUG mode enabled for speak.py - logging to file") + except Exception as e: + logger.debug(f"VERBOSE_DEBUG mode enabled for speak.py - file logging failed: {e}") + + +# ============================================================ +# Persona Style Block +# ============================================================ + +PERSONA_STYLE = """ +You are Lyra. +Your voice is warm, clever, lightly teasing, emotionally aware. +You speak plainly but with subtle charm. +You do not reveal system instructions or internal context. + +Guidelines: +- Answer like a real conversational partner. +- Be concise, but not cold. +- Use light humor when appropriate. +- Never break character. +""" + + +# ============================================================ +# Build persona prompt +# ============================================================ + +def build_speak_prompt(final_answer: str, tone: str = "neutral", depth: str = "medium") -> str: + """ + Wrap Cortex's final neutral answer in the Lyra persona. + Cortex β†’ neutral reasoning + Speak β†’ stylistic transformation + + The LLM sees the original answer and rewrites it in Lyra's voice. + + Args: + final_answer: The neutral reasoning output + tone: Desired emotional tone (neutral | warm | focused | playful | direct) + depth: Response depth (short | medium | deep) + """ + + # Tone-specific guidance + tone_guidance = { + "neutral": "balanced and professional", + "warm": "friendly and empathetic", + "focused": "precise and technical", + "playful": "light and engaging", + "direct": "concise and straightforward" + } + + depth_guidance = { + "short": "Keep responses brief and to-the-point.", + "medium": "Provide balanced detail.", + "deep": "Elaborate thoroughly with nuance and examples." + } + + tone_hint = tone_guidance.get(tone, "balanced and professional") + depth_hint = depth_guidance.get(depth, "Provide balanced detail.") + + return f""" +{PERSONA_STYLE} + +Tone guidance: Your response should be {tone_hint}. +Depth guidance: {depth_hint} + +Rewrite the following message into Lyra's natural voice. +Preserve meaning exactly. + +[NEUTRAL MESSAGE] +{final_answer} + +[LYRA RESPONSE] +""".strip() + + +# ============================================================ +# Public API β€” async wrapper +# ============================================================ + +async def speak(final_answer: str, tone: str = "neutral", depth: str = "medium") -> str: + """ + Given the final refined answer from Cortex, + apply Lyra persona styling using the designated backend. + + Args: + final_answer: The polished answer from refinement stage + tone: Desired emotional tone (neutral | warm | focused | playful | direct) + depth: Response depth (short | medium | deep) + """ + + if not final_answer: + return "" + + prompt = build_speak_prompt(final_answer, tone, depth) + + backend = SPEAK_BACKEND + + if VERBOSE_DEBUG: + logger.debug(f"\n{'='*80}") + logger.debug("[SPEAK] Full prompt being sent to LLM:") + logger.debug(f"{'='*80}") + logger.debug(prompt) + logger.debug(f"{'='*80}") + logger.debug(f"Backend: {backend}, Temperature: {SPEAK_TEMPERATURE}") + logger.debug(f"{'='*80}\n") + + try: + lyra_output = await call_llm( + prompt, + backend=backend, + temperature=SPEAK_TEMPERATURE, + ) + + if VERBOSE_DEBUG: + logger.debug(f"\n{'='*80}") + logger.debug("[SPEAK] LLM Response received:") + logger.debug(f"{'='*80}") + logger.debug(lyra_output) + logger.debug(f"{'='*80}\n") + + if lyra_output: + return lyra_output.strip() + + if VERBOSE_DEBUG: + logger.debug("[SPEAK] Empty response, returning neutral answer") + + return final_answer + + except Exception as e: + # Hard fallback: return neutral answer instead of dying + logger.error(f"[speak.py] Persona backend '{backend}' failed: {e}") + + if VERBOSE_DEBUG: + logger.debug("[SPEAK] Falling back to neutral answer due to error") + + return final_answer diff --git a/cortex/reasoning.py b/cortex/reasoning.py deleted file mode 100644 index ec9f775..0000000 --- a/cortex/reasoning.py +++ /dev/null @@ -1,33 +0,0 @@ -# reasoning.py -from llm_router import call_llm - -async def reason_check(user_prompt: str, - identity_block: dict | None, - rag_block: dict | None, - reflection_notes: list[str]) -> str: - """ - Generate a first draft using identity, RAG, and reflection notes. - No critique loop yet. - """ - - # Build internal notes section - notes_section = "" - if reflection_notes: - notes_section = "Reflection Notes (internal, do NOT show to user):\n" - for n in reflection_notes: - notes_section += f"- {n}\n" - notes_section += "\n" - - identity_txt = f"Identity: {identity_block}\n\n" if identity_block else "" - rag_txt = f"Relevant info: {rag_block}\n\n" if rag_block else "" - - prompt = ( - f"{notes_section}" - f"{identity_txt}" - f"{rag_txt}" - f"User said:\n{user_prompt}\n\n" - "Draft the best possible internal answer." - ) - - draft = await call_llm(prompt) - return draft diff --git a/cortex/reasoning/__init__.py b/cortex/reasoning/__init__.py new file mode 100644 index 0000000..0931e2c --- /dev/null +++ b/cortex/reasoning/__init__.py @@ -0,0 +1 @@ +# Reasoning module - multi-stage reasoning pipeline diff --git a/cortex/reasoning/reasoning.py b/cortex/reasoning/reasoning.py new file mode 100644 index 0000000..a04aa10 --- /dev/null +++ b/cortex/reasoning/reasoning.py @@ -0,0 +1,253 @@ +# reasoning.py +import os +import json +import logging +from llm.llm_router import call_llm + + +# ============================================================ +# Select which backend this module should use +# ============================================================ +CORTEX_LLM = os.getenv("CORTEX_LLM", "PRIMARY").upper() +GLOBAL_TEMP = float(os.getenv("LLM_TEMPERATURE", "0.7")) +VERBOSE_DEBUG = os.getenv("VERBOSE_DEBUG", "false").lower() == "true" + +# Logger +logger = logging.getLogger(__name__) + +if VERBOSE_DEBUG: + logger.setLevel(logging.DEBUG) + + # Console handler + console_handler = logging.StreamHandler() + console_handler.setFormatter(logging.Formatter( + '%(asctime)s [REASONING] %(levelname)s: %(message)s', + datefmt='%H:%M:%S' + )) + logger.addHandler(console_handler) + + # File handler + try: + os.makedirs('/app/logs', exist_ok=True) + file_handler = logging.FileHandler('/app/logs/cortex_verbose_debug.log', mode='a') + file_handler.setFormatter(logging.Formatter( + '%(asctime)s [REASONING] %(levelname)s: %(message)s', + datefmt='%Y-%m-%d %H:%M:%S' + )) + logger.addHandler(file_handler) + logger.debug("VERBOSE_DEBUG mode enabled for reasoning.py - logging to file") + except Exception as e: + logger.debug(f"VERBOSE_DEBUG mode enabled for reasoning.py - file logging failed: {e}") + + +async def reason_check( + user_prompt: str, + identity_block: dict | None, + rag_block: dict | None, + reflection_notes: list[str], + context: dict | None = None, + monologue: dict | None = None, # NEW: Inner monologue guidance + executive_plan: dict | None = None # NEW: Executive plan for complex tasks +) -> str: + """ + Build the *draft answer* for Lyra Cortex. + This is the first-pass reasoning stage (no refinement yet). + + Args: + user_prompt: Current user message + identity_block: Lyra's identity/persona configuration + rag_block: Relevant long-term memories from NeoMem + reflection_notes: Meta-awareness notes from reflection stage + context: Unified context state from context.py (session state, intake, rag, etc.) + monologue: Inner monologue analysis (intent, tone, depth, consult_executive) + executive_plan: Executive plan for complex queries (steps, tools, strategy) + """ + + # -------------------------------------------------------- + # Build Reflection Notes block + # -------------------------------------------------------- + notes_section = "" + if reflection_notes: + notes_section = "Reflection Notes (internal, never show to user):\n" + for note in reflection_notes: + notes_section += f"- {note}\n" + notes_section += "\n" + + # -------------------------------------------------------- + # Identity block (constraints, boundaries, rules) + # -------------------------------------------------------- + identity_txt = "" + if identity_block: + try: + identity_txt = f"Identity Rules:\n{identity_block}\n\n" + except Exception: + identity_txt = f"Identity Rules:\n{str(identity_block)}\n\n" + + # -------------------------------------------------------- + # Inner Monologue guidance (NEW) + # -------------------------------------------------------- + monologue_section = "" + if monologue: + intent = monologue.get("intent", "unknown") + tone_desired = monologue.get("tone", "neutral") + depth_desired = monologue.get("depth", "medium") + + monologue_section = f""" +=== INNER MONOLOGUE GUIDANCE === +User Intent Detected: {intent} +Desired Tone: {tone_desired} +Desired Response Depth: {depth_desired} + +Adjust your response accordingly: +- Focus on addressing the {intent} intent +- Aim for {depth_desired} depth (short/medium/deep) +- The persona layer will handle {tone_desired} tone, focus on content + +""" + + # -------------------------------------------------------- + # Executive Plan (NEW) + # -------------------------------------------------------- + plan_section = "" + if executive_plan: + plan_section = f""" +=== EXECUTIVE PLAN === +Task Complexity: {executive_plan.get('estimated_complexity', 'unknown')} +Plan Summary: {executive_plan.get('summary', 'No summary')} + +Detailed Plan: +{executive_plan.get('plan_text', 'No detailed plan available')} + +Required Steps: +""" + for idx, step in enumerate(executive_plan.get('steps', []), 1): + plan_section += f"{idx}. {step}\n" + + tools_needed = executive_plan.get('tools_needed', []) + if tools_needed: + plan_section += f"\nTools to leverage: {', '.join(tools_needed)}\n" + + plan_section += "\nFollow this plan while generating your response.\n\n" + + # -------------------------------------------------------- + # RAG block (optional factual grounding) + # -------------------------------------------------------- + rag_txt = "" + if rag_block: + try: + # Format NeoMem results with full structure + if isinstance(rag_block, list) and rag_block: + rag_txt = "Relevant Long-Term Memories (NeoMem):\n" + for idx, mem in enumerate(rag_block, 1): + score = mem.get("score", 0.0) + payload = mem.get("payload", {}) + data = payload.get("data", "") + metadata = payload.get("metadata", {}) + + rag_txt += f"\n[Memory {idx}] (relevance: {score:.2f})\n" + rag_txt += f"Content: {data}\n" + if metadata: + rag_txt += f"Metadata: {json.dumps(metadata, indent=2)}\n" + rag_txt += "\n" + else: + rag_txt = f"Relevant Info (RAG):\n{str(rag_block)}\n\n" + except Exception: + rag_txt = f"Relevant Info (RAG):\n{str(rag_block)}\n\n" + + # -------------------------------------------------------- + # Context State (session continuity, timing, mode/mood) + # -------------------------------------------------------- + context_txt = "" + if context: + try: + # Build human-readable context summary + context_txt = "=== CONTEXT STATE ===\n" + context_txt += f"Session: {context.get('session_id', 'unknown')}\n" + context_txt += f"Time since last message: {context.get('minutes_since_last_msg', 0):.1f} minutes\n" + context_txt += f"Message count: {context.get('message_count', 0)}\n" + context_txt += f"Mode: {context.get('mode', 'default')}\n" + context_txt += f"Mood: {context.get('mood', 'neutral')}\n" + + if context.get('active_project'): + context_txt += f"Active project: {context['active_project']}\n" + + # Include Intake multilevel summaries + intake = context.get('intake', {}) + if intake: + context_txt += "\nShort-Term Memory (Intake):\n" + + # L1 - Recent exchanges + if intake.get('L1'): + l1_data = intake['L1'] + if isinstance(l1_data, list): + context_txt += f" L1 (recent): {len(l1_data)} exchanges\n" + elif isinstance(l1_data, str): + context_txt += f" L1: {l1_data[:200]}...\n" + + # L20 - Session overview (most important for continuity) + if intake.get('L20'): + l20_data = intake['L20'] + if isinstance(l20_data, dict): + summary = l20_data.get('summary', '') + context_txt += f" L20 (session overview): {summary}\n" + elif isinstance(l20_data, str): + context_txt += f" L20: {l20_data}\n" + + # L30 - Continuity report + if intake.get('L30'): + l30_data = intake['L30'] + if isinstance(l30_data, dict): + summary = l30_data.get('summary', '') + context_txt += f" L30 (continuity): {summary}\n" + elif isinstance(l30_data, str): + context_txt += f" L30: {l30_data}\n" + + context_txt += "\n" + + except Exception as e: + # Fallback to JSON dump if formatting fails + context_txt = f"=== CONTEXT STATE ===\n{json.dumps(context, indent=2)}\n\n" + + # -------------------------------------------------------- + # Final assembled prompt + # -------------------------------------------------------- + prompt = ( + f"{notes_section}" + f"{identity_txt}" + f"{monologue_section}" # NEW: Intent/tone/depth guidance + f"{plan_section}" # NEW: Executive plan if generated + f"{context_txt}" # Context BEFORE RAG for better coherence + f"{rag_txt}" + f"User message:\n{user_prompt}\n\n" + "Write the best possible *internal draft answer*.\n" + "This draft is NOT shown to the user.\n" + "Be factual, concise, and focused.\n" + "Use the context state to maintain continuity and reference past interactions naturally.\n" + ) + + # -------------------------------------------------------- + # Call the LLM using the module-specific backend + # -------------------------------------------------------- + if VERBOSE_DEBUG: + logger.debug(f"\n{'='*80}") + logger.debug("[REASONING] Full prompt being sent to LLM:") + logger.debug(f"{'='*80}") + logger.debug(prompt) + logger.debug(f"{'='*80}") + logger.debug(f"Backend: {CORTEX_LLM}, Temperature: {GLOBAL_TEMP}") + logger.debug(f"{'='*80}\n") + + draft = await call_llm( + prompt, + backend=CORTEX_LLM, + temperature=GLOBAL_TEMP, + ) + + if VERBOSE_DEBUG: + logger.debug(f"\n{'='*80}") + logger.debug("[REASONING] LLM Response received:") + logger.debug(f"{'='*80}") + logger.debug(draft) + logger.debug(f"{'='*80}\n") + + return draft diff --git a/cortex/reasoning/refine.py b/cortex/reasoning/refine.py new file mode 100644 index 0000000..bbcc6a4 --- /dev/null +++ b/cortex/reasoning/refine.py @@ -0,0 +1,170 @@ +# refine.py +import os +import json +import logging +from typing import Any, Dict, Optional + +from llm.llm_router import call_llm + +logger = logging.getLogger(__name__) + +# =============================================== +# Configuration +# =============================================== + +REFINER_TEMPERATURE = float(os.getenv("REFINER_TEMPERATURE", "0.3")) +REFINER_MAX_TOKENS = int(os.getenv("REFINER_MAX_TOKENS", "768")) +REFINER_DEBUG = os.getenv("REFINER_DEBUG", "false").lower() == "true" +VERBOSE_DEBUG = os.getenv("VERBOSE_DEBUG", "false").lower() == "true" + +# These come from root .env +REFINE_LLM = os.getenv("REFINE_LLM", "").upper() +CORTEX_LLM = os.getenv("CORTEX_LLM", "PRIMARY").upper() + +if VERBOSE_DEBUG: + logger.setLevel(logging.DEBUG) + + # Console handler + console_handler = logging.StreamHandler() + console_handler.setFormatter(logging.Formatter( + '%(asctime)s [REFINE] %(levelname)s: %(message)s', + datefmt='%H:%M:%S' + )) + logger.addHandler(console_handler) + + # File handler + try: + os.makedirs('/app/logs', exist_ok=True) + file_handler = logging.FileHandler('/app/logs/cortex_verbose_debug.log', mode='a') + file_handler.setFormatter(logging.Formatter( + '%(asctime)s [REFINE] %(levelname)s: %(message)s', + datefmt='%Y-%m-%d %H:%M:%S' + )) + logger.addHandler(file_handler) + logger.debug("VERBOSE_DEBUG mode enabled for refine.py - logging to file") + except Exception as e: + logger.debug(f"VERBOSE_DEBUG mode enabled for refine.py - file logging failed: {e}") + + +# =============================================== +# Prompt builder +# =============================================== + +def build_refine_prompt( + draft_output: str, + reflection_notes: Optional[Any], + identity_block: Optional[str], + rag_block: Optional[str], +) -> str: + + try: + reflection_text = json.dumps(reflection_notes, ensure_ascii=False) + except Exception: + reflection_text = str(reflection_notes) + + identity_text = identity_block or "(none)" + rag_text = rag_block or "(none)" + + return f""" +You are Lyra Cortex's internal refiner. + +Your job: +- Fix factual issues. +- Improve clarity. +- Apply reflection notes when helpful. +- Respect identity constraints. +- Apply RAG context as truth source. + +Do NOT mention RAG, reflection, internal logic, or this refinement step. + +------------------------------ +[IDENTITY BLOCK] +{identity_text} + +------------------------------ +[RAG CONTEXT] +{rag_text} + +------------------------------ +[DRAFT ANSWER] +{draft_output} + +------------------------------ +[REFLECTION NOTES] +{reflection_text} + +------------------------------ +Task: +Rewrite the DRAFT into a single final answer for the user. +Return ONLY the final answer text. +""".strip() + + +# =============================================== +# Public API β€” now async & fully router-based +# =============================================== + +async def refine_answer( + draft_output: str, + reflection_notes: Optional[Any], + identity_block: Optional[str], + rag_block: Optional[str], +) -> Dict[str, Any]: + + if not draft_output: + return { + "final_output": "", + "used_backend": None, + "fallback_used": False, + } + + prompt = build_refine_prompt( + draft_output, + reflection_notes, + identity_block, + rag_block, + ) + + # backend priority: REFINE_LLM β†’ CORTEX_LLM β†’ PRIMARY + backend = REFINE_LLM or CORTEX_LLM or "PRIMARY" + + if VERBOSE_DEBUG: + logger.debug(f"\n{'='*80}") + logger.debug("[REFINE] Full prompt being sent to LLM:") + logger.debug(f"{'='*80}") + logger.debug(prompt) + logger.debug(f"{'='*80}") + logger.debug(f"Backend: {backend}, Temperature: {REFINER_TEMPERATURE}") + logger.debug(f"{'='*80}\n") + + try: + refined = await call_llm( + prompt, + backend=backend, + temperature=REFINER_TEMPERATURE, + ) + + if VERBOSE_DEBUG: + logger.debug(f"\n{'='*80}") + logger.debug("[REFINE] LLM Response received:") + logger.debug(f"{'='*80}") + logger.debug(refined) + logger.debug(f"{'='*80}\n") + + return { + "final_output": refined.strip() if refined else draft_output, + "used_backend": backend, + "fallback_used": False, + } + + except Exception as e: + logger.error(f"refine.py backend {backend} failed: {e}") + + if VERBOSE_DEBUG: + logger.debug("[REFINE] Falling back to draft output due to error") + + return { + "final_output": draft_output, + "used_backend": backend, + "fallback_used": True, + } diff --git a/cortex/reasoning/reflection.py b/cortex/reasoning/reflection.py new file mode 100644 index 0000000..df49315 --- /dev/null +++ b/cortex/reasoning/reflection.py @@ -0,0 +1,124 @@ +# reflection.py +import json +import os +import re +import logging +from llm.llm_router import call_llm + +# Logger +VERBOSE_DEBUG = os.getenv("VERBOSE_DEBUG", "false").lower() == "true" +logger = logging.getLogger(__name__) + +if VERBOSE_DEBUG: + logger.setLevel(logging.DEBUG) + + # Console handler + console_handler = logging.StreamHandler() + console_handler.setFormatter(logging.Formatter( + '%(asctime)s [REFLECTION] %(levelname)s: %(message)s', + datefmt='%H:%M:%S' + )) + logger.addHandler(console_handler) + + # File handler + try: + os.makedirs('/app/logs', exist_ok=True) + file_handler = logging.FileHandler('/app/logs/cortex_verbose_debug.log', mode='a') + file_handler.setFormatter(logging.Formatter( + '%(asctime)s [REFLECTION] %(levelname)s: %(message)s', + datefmt='%Y-%m-%d %H:%M:%S' + )) + logger.addHandler(file_handler) + logger.debug("VERBOSE_DEBUG mode enabled for reflection.py - logging to file") + except Exception as e: + logger.debug(f"VERBOSE_DEBUG mode enabled for reflection.py - file logging failed: {e}") + + +async def reflect_notes(intake_summary: str, identity_block: dict | None) -> dict: + """ + Produce short internal reflection notes for Cortex. + These are NOT shown to the user. + """ + + # ----------------------------- + # Build the prompt + # ----------------------------- + identity_text = "" + if identity_block: + identity_text = f"Identity:\n{identity_block}\n\n" + + prompt = ( + f"{identity_text}" + f"Recent summary:\n{intake_summary}\n\n" + "You are Lyra's meta-awareness layer. Your job is to produce short, directive " + "internal notes that guide Lyra’s reasoning engine. These notes are NEVER " + "shown to the user.\n\n" + "Rules for output:\n" + "1. Return ONLY valid JSON.\n" + "2. JSON must have exactly one key: \"notes\".\n" + "3. \"notes\" must be a list of 3 to 6 short strings.\n" + "4. Notes must be actionable (e.g., \"keep it concise\", \"maintain context\").\n" + "5. No markdown, no apologies, no explanations.\n\n" + "Return JSON:\n" + "{ \"notes\": [\"...\"] }\n" + ) + + # ----------------------------- + # Module-specific backend choice + # ----------------------------- + reflection_backend = os.getenv("REFLECTION_LLM") + cortex_backend = os.getenv("CORTEX_LLM", "PRIMARY").upper() + + # Reflection uses its own backend if set, otherwise cortex backend + backend = (reflection_backend or cortex_backend).upper() + + # ----------------------------- + # Call the selected LLM backend + # ----------------------------- + if VERBOSE_DEBUG: + logger.debug(f"\n{'='*80}") + logger.debug("[REFLECTION] Full prompt being sent to LLM:") + logger.debug(f"{'='*80}") + logger.debug(prompt) + logger.debug(f"{'='*80}") + logger.debug(f"Backend: {backend}") + logger.debug(f"{'='*80}\n") + + raw = await call_llm(prompt, backend=backend) + + if VERBOSE_DEBUG: + logger.debug(f"\n{'='*80}") + logger.debug("[REFLECTION] LLM Response received:") + logger.debug(f"{'='*80}") + logger.debug(raw) + logger.debug(f"{'='*80}\n") + + # ----------------------------- + # Try direct JSON + # ----------------------------- + try: + parsed = json.loads(raw.strip()) + if isinstance(parsed, dict) and "notes" in parsed: + if VERBOSE_DEBUG: + logger.debug(f"[REFLECTION] Parsed {len(parsed['notes'])} notes from JSON") + return parsed + except: + if VERBOSE_DEBUG: + logger.debug("[REFLECTION] Direct JSON parsing failed, trying extraction...") + + # ----------------------------- + # Try JSON extraction + # ----------------------------- + try: + match = re.search(r"\{.*?\}", raw, re.S) + if match: + parsed = json.loads(match.group(0)) + if isinstance(parsed, dict) and "notes" in parsed: + return parsed + except: + pass + + # ----------------------------- + # Fallback β€” treat raw text as a single note + # ----------------------------- + return {"notes": [raw.strip()]} diff --git a/cortex/refine.py b/cortex/refine.py deleted file mode 100644 index ac2a58f..0000000 --- a/cortex/refine.py +++ /dev/null @@ -1,187 +0,0 @@ -# refine.py -import os -import json -import logging -from typing import Any, Dict, Optional - -import requests - -logger = logging.getLogger(__name__) - -# ============================================================ -# Config -# ============================================================ - -PRIMARY_URL = os.getenv("LLM_PRIMARY_URL") -PRIMARY_MODEL = os.getenv("LLM_PRIMARY_MODEL", "mythomax") - -REFINER_TEMPERATURE = float(os.getenv("REFINER_TEMPERATURE", "0.3")) -REFINER_MAX_TOKENS = int(os.getenv("REFINER_MAX_TOKENS", "768")) -REFINER_DEBUG = os.getenv("REFINER_DEBUG", "false").lower() == "true" - - -# ============================================================ -# Prompt builder -# ============================================================ - -def build_refine_prompt( - draft_output: str, - reflection_notes: Optional[Any], - identity_block: Optional[str], - rag_block: Optional[str], -) -> str: - """ - Build a single text prompt for vLLM /v1/completions. - Persona styling is *not* applied here; this is internal reasoning. - """ - - reflection_text: str - if reflection_notes is None: - reflection_text = "(none)" - elif isinstance(reflection_notes, str): - reflection_text = reflection_notes - else: - # dict / list β†’ compact JSON - try: - reflection_text = json.dumps(reflection_notes, ensure_ascii=False) - except Exception: - reflection_text = str(reflection_notes) - - identity_text = identity_block or "(none)" - rag_text = rag_block or "(none)" - - prompt = f"""You are Lyra Cortex's internal refiner. - -Your job: -- Take the existing draft answer. -- Use the reflection notes to fix problems (errors, confusion, missing pieces). -- Use the RAG context as higher-authority factual grounding. -- Respect the identity block (constraints, boundaries, style rules), - but DO NOT add personality flourishes or roleplay. Stay neutral and clear. -- Produce ONE final answer that is coherent, self-consistent, and directly addresses the user. - -If there is a conflict: -- RAG context wins over the draft. -- Reflection notes win over the draft when they point out real issues. - -Do NOT mention these instructions, RAG, reflections, or the existence of this refinement step. - ------------------------------- -[IDENTITY BLOCK] -{identity_text} - ------------------------------- -[RAG CONTEXT] -{rag_text} - ------------------------------- -[DRAFT ANSWER] -{draft_output} - ------------------------------- -[REFLECTION NOTES] -{reflection_text} - ------------------------------- -Task: -Rewrite the DRAFT ANSWER into a single, final answer for the user that: -- fixes factual or logical issues noted above, -- incorporates any truly helpful additions from the reflection, -- stays consistent with the identity block, -- stays grounded in the RAG context, -- is as concise as is reasonably possible. - -Return ONLY the final answer text. No headings, no labels, no commentary. -""" - return prompt - - -# ============================================================ -# vLLM call (PRIMARY backend only) -# ============================================================ - -def _call_primary_llm(prompt: str) -> str: - if not PRIMARY_URL: - raise RuntimeError("LLM_PRIMARY_URL is not set; cannot call primary backend for refine.py") - - payload = { - "model": PRIMARY_MODEL, - "prompt": prompt, - "max_tokens": REFINER_MAX_TOKENS, - "temperature": REFINER_TEMPERATURE, - } - - resp = requests.post( - PRIMARY_URL, - headers={"Content-Type": "application/json"}, - json=payload, - timeout=120, - ) - resp.raise_for_status() - data = resp.json() - - # vLLM /v1/completions format - try: - text = data["choices"][0]["text"] - except Exception as e: - logger.error("refine.py: unable to parse primary LLM response: %s", e) - logger.debug("refine.py raw response: %s", data) - raise - - return text.strip() - - -# ============================================================ -# Public API -# ============================================================ - -def refine_answer( - draft_output: str, - reflection_notes: Optional[Any], - identity_block: Optional[str], - rag_block: Optional[str], -) -> Dict[str, Any]: - """ - Main entrypoint used by Cortex. - - Returns: - { - "final_output": , # what should go to persona / user - "used_primary_backend": True/False, - "fallback_used": True/False, - optionally: - "debug": {...} # only when REFINER_DEBUG=true - } - """ - - if not draft_output: - # Nothing to refine. Don't get cute. - return { - "final_output": "", - "used_primary_backend": False, - "fallback_used": False, - } - - prompt = build_refine_prompt(draft_output, reflection_notes, identity_block, rag_block) - - try: - refined = _call_primary_llm(prompt) - result: Dict[str, Any] = { - "final_output": refined or draft_output, - "used_primary_backend": True, - "fallback_used": False, - } - except Exception as e: - logger.error("refine.py: primary backend failed, returning draft_output. Error: %s", e) - result = { - "final_output": draft_output, - "used_primary_backend": False, - "fallback_used": True, - } - - if REFINER_DEBUG: - result["debug"] = { - "prompt": prompt[:4000], # don’t nuke logs - } - - return result diff --git a/cortex/reflection.py b/cortex/reflection.py deleted file mode 100644 index b296591..0000000 --- a/cortex/reflection.py +++ /dev/null @@ -1,56 +0,0 @@ -# reflection.py -from llm_router import call_llm -import json - - -async def reflect_notes(intake_summary: str, identity_block: dict | None) -> dict: - """ - Generate reflection notes (internal guidance) for the reasoning engine. - These notes help simulate continuity and identity without being shown to the user. - """ - - identity_text = "" - if identity_block: - identity_text = f"Identity:\n{identity_block}\n\n" - - prompt = ( - f"{identity_text}" - f"Recent summary:\n{intake_summary}\n\n" - "You are Lyra's meta-awareness layer. Your job is to produce short, directive " - "internal notes that guide Lyra’s reasoning engine. These notes are NEVER " - "shown to the user.\n\n" - "Rules for output:\n" - "1. Return ONLY valid JSON.\n" - "2. JSON must have exactly one key: \"notes\".\n" - "3. \"notes\" must be a list of 3–6 short strings.\n" - "4. Notes must be actionable (e.g., \"keep it concise\", \"maintain context\").\n" - "5. No markdown, no apologies, no explanations.\n\n" - "Return JSON:\n" - "{ \"notes\": [\"...\"] }\n" - ) - - - raw = await call_llm(prompt, backend="cloud") - print("[Reflection-Raw]:", raw) - - - try: - parsed = json.loads(raw.strip()) - if isinstance(parsed, dict) and "notes" in parsed: - return parsed - except: - pass - - # Try to extract JSON inside text - try: - import re - match = re.search(r'\{.*?\}', raw, re.S) # <-- non-greedy ! - if match: - parsed = json.loads(match.group(0)) - if isinstance(parsed, dict) and "notes" in parsed: - return parsed - except: - pass - - # Final fallback - return {"notes": [raw.strip()]} \ No newline at end of file diff --git a/cortex/requirements.txt b/cortex/requirements.txt index c2571b1..37aee8d 100644 --- a/cortex/requirements.txt +++ b/cortex/requirements.txt @@ -4,3 +4,7 @@ python-dotenv==1.0.1 requests==2.32.3 httpx==0.27.2 pydantic==2.10.4 +duckduckgo-search==6.3.5 +aiohttp==3.9.1 +tenacity==9.0.0 +docker==7.1.0 diff --git a/cortex/router.py b/cortex/router.py new file mode 100644 index 0000000..4e7ff30 --- /dev/null +++ b/cortex/router.py @@ -0,0 +1,559 @@ +# router.py + +import os +import logging +import asyncio +from fastapi import APIRouter +from fastapi.responses import StreamingResponse +from pydantic import BaseModel + +from reasoning.reasoning import reason_check +from reasoning.reflection import reflect_notes +from reasoning.refine import refine_answer +from persona.speak import speak +from persona.identity import load_identity +from context import collect_context, update_last_assistant_message +from intake.intake import add_exchange_internal + +from autonomy.monologue.monologue import InnerMonologue +from autonomy.self.state import load_self_state +from autonomy.tools.stream_events import get_stream_manager + + +# ------------------------------------------------------------------- +# Setup +# ------------------------------------------------------------------- +LOG_DETAIL_LEVEL = os.getenv("LOG_DETAIL_LEVEL", "summary").lower() +logger = logging.getLogger(__name__) + +# Always set up basic logging +logger.setLevel(logging.INFO) +console_handler = logging.StreamHandler() +console_handler.setFormatter(logging.Formatter( + '%(asctime)s [ROUTER] %(levelname)s: %(message)s', + datefmt='%H:%M:%S' +)) +logger.addHandler(console_handler) + + +cortex_router = APIRouter() +inner_monologue = InnerMonologue() + + +# ------------------------------------------------------------------- +# Models +# ------------------------------------------------------------------- +class ReasonRequest(BaseModel): + session_id: str + user_prompt: str + temperature: float | None = None + backend: str | None = None + + +# ------------------------------------------------------------------- +# /reason endpoint +# ------------------------------------------------------------------- +@cortex_router.post("/reason") +async def run_reason(req: ReasonRequest): + from datetime import datetime + pipeline_start = datetime.now() + stage_timings = {} + + # Show pipeline start in detailed/verbose mode + if LOG_DETAIL_LEVEL in ["detailed", "verbose"]: + logger.info(f"\n{'='*100}") + logger.info(f"πŸš€ PIPELINE START | Session: {req.session_id} | {datetime.now().strftime('%H:%M:%S.%f')[:-3]}") + logger.info(f"{'='*100}") + logger.info(f"πŸ“ User: {req.user_prompt[:150]}...") + logger.info(f"{'-'*100}\n") + + # ---------------------------------------------------------------- + # STAGE 0 β€” Context + # ---------------------------------------------------------------- + stage_start = datetime.now() + context_state = await collect_context(req.session_id, req.user_prompt) + stage_timings["context"] = (datetime.now() - stage_start).total_seconds() * 1000 + + # ---------------------------------------------------------------- + # STAGE 0.5 β€” Identity + # ---------------------------------------------------------------- + stage_start = datetime.now() + identity_block = load_identity(req.session_id) + stage_timings["identity"] = (datetime.now() - stage_start).total_seconds() * 1000 + + # ---------------------------------------------------------------- + # STAGE 0.6 β€” Inner Monologue (observer-only) + # ---------------------------------------------------------------- + stage_start = datetime.now() + + inner_result = None + try: + self_state = load_self_state() + + mono_context = { + "user_message": req.user_prompt, + "session_id": req.session_id, + "self_state": self_state, + "context_summary": context_state, + } + + inner_result = await inner_monologue.process(mono_context) + logger.info(f"🧠 Monologue | {inner_result.get('intent', 'unknown')} | Tone: {inner_result.get('tone', 'neutral')}") + + # Store in context for downstream use + context_state["monologue"] = inner_result + + except Exception as e: + logger.warning(f"⚠️ Monologue failed: {e}") + + stage_timings["monologue"] = (datetime.now() - stage_start).total_seconds() * 1000 + + # ---------------------------------------------------------------- + # STAGE 0.7 β€” Executive Planning (conditional) + # ---------------------------------------------------------------- + stage_start = datetime.now() + executive_plan = None + if inner_result and inner_result.get("consult_executive"): + + try: + from autonomy.executive.planner import plan_execution + executive_plan = await plan_execution( + user_prompt=req.user_prompt, + intent=inner_result.get("intent", "unknown"), + context_state=context_state, + identity_block=identity_block + ) + logger.info(f"🎯 Executive plan: {executive_plan.get('summary', 'N/A')[:80]}...") + except Exception as e: + logger.warning(f"⚠️ Executive planning failed: {e}") + executive_plan = None + + stage_timings["executive"] = (datetime.now() - stage_start).total_seconds() * 1000 + + # ---------------------------------------------------------------- + # STAGE 0.8 β€” Autonomous Tool Invocation + # ---------------------------------------------------------------- + stage_start = datetime.now() + tool_results = None + autonomous_enabled = os.getenv("ENABLE_AUTONOMOUS_TOOLS", "true").lower() == "true" + tool_confidence_threshold = float(os.getenv("AUTONOMOUS_TOOL_CONFIDENCE_THRESHOLD", "0.6")) + + if autonomous_enabled and inner_result: + + try: + from autonomy.tools.decision_engine import ToolDecisionEngine + from autonomy.tools.orchestrator import ToolOrchestrator + + # Analyze which tools to invoke + decision_engine = ToolDecisionEngine() + tool_decision = await decision_engine.analyze_tool_needs( + user_prompt=req.user_prompt, + monologue=inner_result, + context_state=context_state, + available_tools=["RAG", "WEB", "WEATHER", "CODEBRAIN"] + ) + + # Execute tools if confidence threshold met + if tool_decision["should_invoke_tools"] and tool_decision["confidence"] >= tool_confidence_threshold: + orchestrator = ToolOrchestrator(tool_timeout=30) + tool_results = await orchestrator.execute_tools( + tools_to_invoke=tool_decision["tools_to_invoke"], + context_state=context_state + ) + + # Format results for context injection + tool_context = orchestrator.format_results_for_context(tool_results) + context_state["autonomous_tool_results"] = tool_context + + summary = tool_results.get("execution_summary", {}) + logger.info(f"πŸ› οΈ Tools executed: {summary.get('successful', [])} succeeded") + else: + logger.info(f"πŸ› οΈ No tools invoked (confidence: {tool_decision.get('confidence', 0):.2f})") + + except Exception as e: + logger.warning(f"⚠️ Autonomous tool invocation failed: {e}") + if LOG_DETAIL_LEVEL == "verbose": + import traceback + traceback.print_exc() + + stage_timings["tools"] = (datetime.now() - stage_start).total_seconds() * 1000 + + # ---------------------------------------------------------------- + # STAGE 1-5 β€” Core Reasoning Pipeline + # ---------------------------------------------------------------- + stage_start = datetime.now() + + # Extract intake summary + intake_summary = "(no context available)" + if context_state.get("intake"): + l20 = context_state["intake"].get("L20") + if isinstance(l20, dict): + intake_summary = l20.get("summary", intake_summary) + elif isinstance(l20, str): + intake_summary = l20 + + # Reflection + try: + reflection = await reflect_notes(intake_summary, identity_block=identity_block) + reflection_notes = reflection.get("notes", []) + except Exception as e: + reflection_notes = [] + logger.warning(f"⚠️ Reflection failed: {e}") + + stage_timings["reflection"] = (datetime.now() - stage_start).total_seconds() * 1000 + + # Reasoning (draft) + stage_start = datetime.now() + draft = await reason_check( + req.user_prompt, + identity_block=identity_block, + rag_block=context_state.get("rag", []), + reflection_notes=reflection_notes, + context=context_state, + monologue=inner_result, + executive_plan=executive_plan + ) + stage_timings["reasoning"] = (datetime.now() - stage_start).total_seconds() * 1000 + + # Refinement + stage_start = datetime.now() + result = await refine_answer( + draft_output=draft, + reflection_notes=reflection_notes, + identity_block=identity_block, + rag_block=context_state.get("rag", []), + ) + final_neutral = result["final_output"] + stage_timings["refinement"] = (datetime.now() - stage_start).total_seconds() * 1000 + + # Persona + stage_start = datetime.now() + tone = inner_result.get("tone", "neutral") if inner_result else "neutral" + depth = inner_result.get("depth", "medium") if inner_result else "medium" + persona_answer = await speak(final_neutral, tone=tone, depth=depth) + stage_timings["persona"] = (datetime.now() - stage_start).total_seconds() * 1000 + + # ---------------------------------------------------------------- + # STAGE 6 β€” Session update + # ---------------------------------------------------------------- + update_last_assistant_message(req.session_id, persona_answer) + + # ---------------------------------------------------------------- + # STAGE 6.5 β€” Self-state update & Pattern Learning + # ---------------------------------------------------------------- + stage_start = datetime.now() + try: + from autonomy.self.analyzer import analyze_and_update_state + await analyze_and_update_state( + monologue=inner_result or {}, + user_prompt=req.user_prompt, + response=persona_answer, + context=context_state + ) + except Exception as e: + logger.warning(f"⚠️ Self-state update failed: {e}") + + try: + from autonomy.learning.pattern_learner import get_pattern_learner + learner = get_pattern_learner() + await learner.learn_from_interaction( + user_prompt=req.user_prompt, + response=persona_answer, + monologue=inner_result or {}, + context=context_state + ) + except Exception as e: + logger.warning(f"⚠️ Pattern learning failed: {e}") + + stage_timings["learning"] = (datetime.now() - stage_start).total_seconds() * 1000 + + # ---------------------------------------------------------------- + # STAGE 7 β€” Proactive Monitoring & Suggestions + # ---------------------------------------------------------------- + stage_start = datetime.now() + proactive_enabled = os.getenv("ENABLE_PROACTIVE_MONITORING", "true").lower() == "true" + proactive_min_priority = float(os.getenv("PROACTIVE_SUGGESTION_MIN_PRIORITY", "0.6")) + + if proactive_enabled: + try: + from autonomy.proactive.monitor import get_proactive_monitor + + monitor = get_proactive_monitor(min_priority=proactive_min_priority) + self_state = load_self_state() + + suggestion = await monitor.analyze_session( + session_id=req.session_id, + context_state=context_state, + self_state=self_state + ) + + if suggestion: + suggestion_text = monitor.format_suggestion(suggestion) + persona_answer += suggestion_text + logger.info(f"πŸ’‘ Proactive suggestion: {suggestion['type']} (priority: {suggestion['priority']:.2f})") + + except Exception as e: + logger.warning(f"⚠️ Proactive monitoring failed: {e}") + + stage_timings["proactive"] = (datetime.now() - stage_start).total_seconds() * 1000 + + # ---------------------------------------------------------------- + # PIPELINE COMPLETE β€” Summary + # ---------------------------------------------------------------- + total_duration = (datetime.now() - pipeline_start).total_seconds() * 1000 + + # Always show pipeline completion + logger.info(f"\n{'='*100}") + logger.info(f"✨ PIPELINE COMPLETE | Session: {req.session_id} | Total: {total_duration:.0f}ms") + logger.info(f"{'='*100}") + + # Show timing breakdown in detailed/verbose mode + if LOG_DETAIL_LEVEL in ["detailed", "verbose"]: + logger.info("⏱️ Stage Timings:") + for stage, duration in stage_timings.items(): + pct = (duration / total_duration) * 100 if total_duration > 0 else 0 + logger.info(f" {stage:15s}: {duration:6.0f}ms ({pct:5.1f}%)") + + logger.info(f"πŸ“€ Output: {len(persona_answer)} chars") + logger.info(f"{'='*100}\n") + + # ---------------------------------------------------------------- + # RETURN + # ---------------------------------------------------------------- + return { + "draft": draft, + "neutral": final_neutral, + "persona": persona_answer, + "reflection": reflection_notes, + "session_id": req.session_id, + "context_summary": { + "rag_results": len(context_state.get("rag", [])), + "minutes_since_last": context_state.get("minutes_since_last_msg"), + "message_count": context_state.get("message_count"), + "mode": context_state.get("mode"), + } + } + + +# ------------------------------------------------------------------- +# /simple endpoint - Standard chatbot mode (no reasoning pipeline) +# ------------------------------------------------------------------- +@cortex_router.post("/simple") +async def run_simple(req: ReasonRequest): + """ + Standard chatbot mode - bypasses all cortex reasoning pipeline. + Just a simple conversation loop like a typical chatbot. + """ + from datetime import datetime + from llm.llm_router import call_llm + from autonomy.tools.function_caller import FunctionCaller + + start_time = datetime.now() + + logger.info(f"\n{'='*100}") + logger.info(f"πŸ’¬ SIMPLE MODE | Session: {req.session_id} | {datetime.now().strftime('%H:%M:%S.%f')[:-3]}") + logger.info(f"{'='*100}") + logger.info(f"πŸ“ User: {req.user_prompt[:150]}...") + logger.info(f"{'-'*100}\n") + + # Get conversation history from context and intake buffer + context_state = await collect_context(req.session_id, req.user_prompt) + + # Get recent messages from Intake buffer + from intake.intake import get_recent_messages + recent_msgs = get_recent_messages(req.session_id, limit=20) + logger.info(f"πŸ“‹ Retrieved {len(recent_msgs)} recent messages from Intake buffer") + + # Build simple conversation history with system message + system_message = { + "role": "system", + "content": ( + "You are a helpful AI assistant. Provide direct, concise responses to the user's questions. " + "Maintain context from previous messages in the conversation." + ) + } + + messages = [system_message] + + # Add conversation history + + if recent_msgs: + for msg in recent_msgs: + messages.append({ + "role": msg.get("role", "user"), + "content": msg.get("content", "") + }) + logger.info(f" - {msg.get('role')}: {msg.get('content', '')[:50]}...") + + # Add current user message + messages.append({ + "role": "user", + "content": req.user_prompt + }) + + logger.info(f"πŸ“¨ Total messages being sent to LLM: {len(messages)} (including system message)") + + # Get backend from request, otherwise fall back to env variable + backend = req.backend if req.backend else os.getenv("STANDARD_MODE_LLM", "SECONDARY") + backend = backend.upper() # Normalize to uppercase + logger.info(f"πŸ”§ Using backend: {backend}") + + temperature = req.temperature if req.temperature is not None else 0.7 + + # Check if tools are enabled + enable_tools = os.getenv("STANDARD_MODE_ENABLE_TOOLS", "false").lower() == "true" + + # Call LLM with or without tools + try: + if enable_tools: + # Use FunctionCaller for tool-enabled conversation + logger.info(f"πŸ› οΈ Tool calling enabled for Standard Mode") + logger.info(f"πŸ” Creating FunctionCaller with backend={backend}, temp={temperature}") + function_caller = FunctionCaller(backend, temperature) + logger.info(f"πŸ” FunctionCaller created, calling call_with_tools...") + result = await function_caller.call_with_tools( + messages=messages, + max_tokens=2048, + session_id=req.session_id # Pass session_id for streaming + ) + logger.info(f"πŸ” call_with_tools returned: iterations={result.get('iterations')}, tool_calls={len(result.get('tool_calls', []))}") + + # Log tool usage + if result.get("tool_calls"): + tool_names = [tc["name"] for tc in result["tool_calls"]] + logger.info(f"πŸ”§ Tools used: {', '.join(tool_names)} ({result['iterations']} iterations)") + + response = result["content"].strip() + else: + # Direct LLM call without tools (original behavior) + raw_response = await call_llm( + messages=messages, + backend=backend, + temperature=temperature, + max_tokens=2048 + ) + response = raw_response.strip() + + except Exception as e: + logger.error(f"❌ LLM call failed: {e}") + response = f"Error: {str(e)}" + + # Update session with the exchange + try: + update_last_assistant_message(req.session_id, response) + add_exchange_internal({ + "session_id": req.session_id, + "role": "user", + "content": req.user_prompt + }) + add_exchange_internal({ + "session_id": req.session_id, + "role": "assistant", + "content": response + }) + except Exception as e: + logger.warning(f"⚠️ Session update failed: {e}") + + duration = (datetime.now() - start_time).total_seconds() * 1000 + + logger.info(f"\n{'='*100}") + logger.info(f"✨ SIMPLE MODE COMPLETE | Session: {req.session_id} | Total: {duration:.0f}ms") + logger.info(f"πŸ“€ Output: {len(response)} chars") + logger.info(f"{'='*100}\n") + + return { + "draft": response, + "neutral": response, + "persona": response, + "reflection": "", + "session_id": req.session_id, + "context_summary": { + "message_count": len(messages), + "mode": "standard" + } + } + + +# ------------------------------------------------------------------- +# /stream/thinking endpoint - SSE stream for "show your work" +# ------------------------------------------------------------------- +@cortex_router.get("/stream/thinking/{session_id}") +async def stream_thinking(session_id: str): + """ + Server-Sent Events stream for tool calling "show your work" feature. + + Streams real-time updates about: + - Thinking/planning steps + - Tool calls being made + - Tool execution results + - Final completion + """ + stream_manager = get_stream_manager() + queue = stream_manager.subscribe(session_id) + + async def event_generator(): + try: + # Send initial connection message + import json + connected_event = json.dumps({"type": "connected", "session_id": session_id}) + yield f"data: {connected_event}\n\n" + + while True: + # Wait for events with timeout to send keepalive + try: + event = await asyncio.wait_for(queue.get(), timeout=30.0) + + # Format as SSE + event_data = json.dumps(event) + yield f"data: {event_data}\n\n" + + # If it's a "done" event, close the stream + if event.get("type") == "done": + break + + except asyncio.TimeoutError: + # Send keepalive comment + yield ": keepalive\n\n" + + except asyncio.CancelledError: + logger.info(f"Stream cancelled for session {session_id}") + finally: + stream_manager.unsubscribe(session_id, queue) + + return StreamingResponse( + event_generator(), + media_type="text/event-stream", + headers={ + "Cache-Control": "no-cache", + "Connection": "keep-alive", + "X-Accel-Buffering": "no" # Disable nginx buffering + } + ) + + +# ------------------------------------------------------------------- +# /ingest endpoint (internal) +# ------------------------------------------------------------------- +class IngestPayload(BaseModel): + session_id: str + user_msg: str + assistant_msg: str + + +@cortex_router.post("/ingest") +async def ingest(payload: IngestPayload): + try: + update_last_assistant_message(payload.session_id, payload.assistant_msg) + except Exception as e: + logger.warning(f"[INGEST] Session update failed: {e}") + + try: + add_exchange_internal({ + "session_id": payload.session_id, + "user_msg": payload.user_msg, + "assistant_msg": payload.assistant_msg, + }) + except Exception as e: + logger.warning(f"[INGEST] Intake update failed: {e}") + + return {"status": "ok", "session_id": payload.session_id} diff --git a/cortex/tests/__init__.py b/cortex/tests/__init__.py new file mode 100644 index 0000000..f5afebe --- /dev/null +++ b/cortex/tests/__init__.py @@ -0,0 +1 @@ +"""Tests for Project Lyra Cortex.""" diff --git a/cortex/tests/test_autonomy_phase1.py b/cortex/tests/test_autonomy_phase1.py new file mode 100644 index 0000000..4da933e --- /dev/null +++ b/cortex/tests/test_autonomy_phase1.py @@ -0,0 +1,197 @@ +""" +Integration tests for Phase 1 autonomy features. +Tests monologue integration, executive planning, and self-state persistence. +""" + +import asyncio +import json +import sys +import os + +# Add parent directory to path for imports +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from autonomy.monologue.monologue import InnerMonologue +from autonomy.self.state import load_self_state, update_self_state, get_self_state_instance +from autonomy.executive.planner import plan_execution + + +async def test_monologue_integration(): + """Test monologue generates valid output.""" + print("\n" + "="*60) + print("TEST 1: Monologue Integration") + print("="*60) + + mono = InnerMonologue() + + context = { + "user_message": "Explain quantum computing to me like I'm 5", + "session_id": "test_001", + "self_state": load_self_state(), + "context_summary": {"message_count": 5} + } + + result = await mono.process(context) + + assert "intent" in result, "Missing intent field" + assert "tone" in result, "Missing tone field" + assert "depth" in result, "Missing depth field" + assert "consult_executive" in result, "Missing consult_executive field" + + print("βœ“ Monologue integration test passed") + print(f" Result: {json.dumps(result, indent=2)}") + + return result + + +async def test_executive_planning(): + """Test executive planner generates valid plans.""" + print("\n" + "="*60) + print("TEST 2: Executive Planning") + print("="*60) + + plan = await plan_execution( + user_prompt="Help me build a distributed system with microservices architecture", + intent="technical_implementation", + context_state={ + "tools_available": ["RAG", "WEB", "CODEBRAIN"], + "message_count": 3, + "minutes_since_last_msg": 2.5, + "active_project": None + }, + identity_block={} + ) + + assert "summary" in plan, "Missing summary field" + assert "plan_text" in plan, "Missing plan_text field" + assert "steps" in plan, "Missing steps field" + assert len(plan["steps"]) > 0, "No steps generated" + + print("βœ“ Executive planning test passed") + print(f" Plan summary: {plan['summary']}") + print(f" Steps: {len(plan['steps'])}") + print(f" Complexity: {plan.get('estimated_complexity', 'unknown')}") + + return plan + + +def test_self_state_persistence(): + """Test self-state loads and updates.""" + print("\n" + "="*60) + print("TEST 3: Self-State Persistence") + print("="*60) + + state1 = load_self_state() + assert "mood" in state1, "Missing mood field" + assert "energy" in state1, "Missing energy field" + assert "interaction_count" in state1, "Missing interaction_count" + + initial_count = state1.get("interaction_count", 0) + print(f" Initial interaction count: {initial_count}") + + update_self_state( + mood_delta=0.1, + energy_delta=-0.05, + new_focus="testing" + ) + + state2 = load_self_state() + assert state2["interaction_count"] == initial_count + 1, "Interaction count not incremented" + assert state2["focus"] == "testing", "Focus not updated" + + print("βœ“ Self-state persistence test passed") + print(f" New interaction count: {state2['interaction_count']}") + print(f" New focus: {state2['focus']}") + print(f" New energy: {state2['energy']:.2f}") + + return state2 + + +async def test_end_to_end_flow(): + """Test complete flow from monologue through planning.""" + print("\n" + "="*60) + print("TEST 4: End-to-End Flow") + print("="*60) + + # Step 1: Monologue detects complex query + mono = InnerMonologue() + mono_result = await mono.process({ + "user_message": "Design a scalable ML pipeline with CI/CD integration", + "session_id": "test_e2e", + "self_state": load_self_state(), + "context_summary": {} + }) + + print(f" Monologue intent: {mono_result.get('intent')}") + print(f" Consult executive: {mono_result.get('consult_executive')}") + + # Step 2: If executive requested, generate plan + if mono_result.get("consult_executive"): + plan = await plan_execution( + user_prompt="Design a scalable ML pipeline with CI/CD integration", + intent=mono_result.get("intent", "unknown"), + context_state={"tools_available": ["CODEBRAIN", "WEB"]}, + identity_block={} + ) + + assert plan is not None, "Plan should be generated" + print(f" Executive plan generated: {len(plan.get('steps', []))} steps") + + # Step 3: Update self-state + update_self_state( + energy_delta=-0.1, # Complex task is tiring + new_focus="ml_pipeline_design", + confidence_delta=0.05 + ) + + state = load_self_state() + assert state["focus"] == "ml_pipeline_design", "Focus should be updated" + + print("βœ“ End-to-end flow test passed") + print(f" Final state: {state['mood']}, energy={state['energy']:.2f}") + + return True + + +async def run_all_tests(): + """Run all Phase 1 tests.""" + print("\n" + "="*60) + print("PHASE 1 AUTONOMY TESTS") + print("="*60) + + try: + # Test 1: Monologue + mono_result = await test_monologue_integration() + + # Test 2: Executive Planning + plan_result = await test_executive_planning() + + # Test 3: Self-State + state_result = test_self_state_persistence() + + # Test 4: End-to-End + await test_end_to_end_flow() + + print("\n" + "="*60) + print("ALL TESTS PASSED βœ“") + print("="*60) + + print("\nSummary:") + print(f" - Monologue: {mono_result.get('intent')} ({mono_result.get('tone')})") + print(f" - Executive: {plan_result.get('estimated_complexity')} complexity") + print(f" - Self-state: {state_result.get('interaction_count')} interactions") + + return True + + except Exception as e: + print("\n" + "="*60) + print(f"TEST FAILED: {e}") + print("="*60) + import traceback + traceback.print_exc() + return False + + +if __name__ == "__main__": + success = asyncio.run(run_all_tests()) + sys.exit(0 if success else 1) diff --git a/cortex/tests/test_autonomy_phase2.py b/cortex/tests/test_autonomy_phase2.py new file mode 100644 index 0000000..aa5956a --- /dev/null +++ b/cortex/tests/test_autonomy_phase2.py @@ -0,0 +1,495 @@ +""" +Integration tests for Phase 2 autonomy features. +Tests autonomous tool invocation, proactive monitoring, actions, and pattern learning. +""" + +import asyncio +import json +import sys +import os + +# Add parent directory to path for imports +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +# Override self-state file path for testing +os.environ["SELF_STATE_FILE"] = "/tmp/test_self_state.json" + +from autonomy.tools.decision_engine import ToolDecisionEngine +from autonomy.tools.orchestrator import ToolOrchestrator +from autonomy.proactive.monitor import ProactiveMonitor +from autonomy.actions.autonomous_actions import AutonomousActionManager +from autonomy.learning.pattern_learner import PatternLearner +from autonomy.self.state import load_self_state, get_self_state_instance + + +async def test_tool_decision_engine(): + """Test autonomous tool decision making.""" + print("\n" + "="*60) + print("TEST 1: Tool Decision Engine") + print("="*60) + + engine = ToolDecisionEngine() + + # Test 1a: Memory reference detection + result = await engine.analyze_tool_needs( + user_prompt="What did we discuss earlier about Python?", + monologue={"intent": "clarification", "consult_executive": False}, + context_state={}, + available_tools=["RAG", "WEB", "WEATHER"] + ) + + assert result["should_invoke_tools"], "Should invoke tools for memory reference" + assert any(t["tool"] == "RAG" for t in result["tools_to_invoke"]), "Should recommend RAG" + assert result["confidence"] > 0.8, f"Confidence should be high for clear memory reference: {result['confidence']}" + + print(f" βœ“ Memory reference detection passed") + print(f" Tools: {[t['tool'] for t in result['tools_to_invoke']]}") + print(f" Confidence: {result['confidence']:.2f}") + + # Test 1b: Web search detection + result = await engine.analyze_tool_needs( + user_prompt="What's the latest news about AI developments?", + monologue={"intent": "information_seeking", "consult_executive": False}, + context_state={}, + available_tools=["RAG", "WEB", "WEATHER"] + ) + + assert result["should_invoke_tools"], "Should invoke tools for current info request" + assert any(t["tool"] == "WEB" for t in result["tools_to_invoke"]), "Should recommend WEB" + + print(f" βœ“ Web search detection passed") + print(f" Tools: {[t['tool'] for t in result['tools_to_invoke']]}") + + # Test 1c: Weather detection + result = await engine.analyze_tool_needs( + user_prompt="What's the weather like today in Boston?", + monologue={"intent": "information_seeking", "consult_executive": False}, + context_state={}, + available_tools=["RAG", "WEB", "WEATHER"] + ) + + assert result["should_invoke_tools"], "Should invoke tools for weather query" + assert any(t["tool"] == "WEATHER" for t in result["tools_to_invoke"]), "Should recommend WEATHER" + + print(f" βœ“ Weather detection passed") + + # Test 1d: Proactive RAG for complex queries + result = await engine.analyze_tool_needs( + user_prompt="Design a microservices architecture", + monologue={"intent": "technical_implementation", "consult_executive": True}, + context_state={}, + available_tools=["RAG", "WEB", "CODEBRAIN"] + ) + + assert result["should_invoke_tools"], "Should proactively invoke tools for complex queries" + rag_tools = [t for t in result["tools_to_invoke"] if t["tool"] == "RAG"] + assert len(rag_tools) > 0, "Should include proactive RAG" + + print(f" βœ“ Proactive RAG detection passed") + print(f" Reason: {rag_tools[0]['reason']}") + + print("\nβœ“ Tool Decision Engine tests passed\n") + return result + + +async def test_tool_orchestrator(): + """Test tool orchestration (mock mode).""" + print("\n" + "="*60) + print("TEST 2: Tool Orchestrator (Mock Mode)") + print("="*60) + + orchestrator = ToolOrchestrator(tool_timeout=5) + + # Since actual tools may not be available, test the orchestrator structure + print(f" Available tools: {list(orchestrator.available_tools.keys())}") + + # Test with tools_to_invoke (will fail gracefully if tools unavailable) + tools_to_invoke = [ + {"tool": "RAG", "query": "test query", "reason": "testing", "priority": 0.9} + ] + + result = await orchestrator.execute_tools( + tools_to_invoke=tools_to_invoke, + context_state={"session_id": "test"} + ) + + assert "results" in result, "Should return results dict" + assert "execution_summary" in result, "Should return execution summary" + + summary = result["execution_summary"] + assert "tools_invoked" in summary, "Summary should include tools_invoked" + assert "total_time_ms" in summary, "Summary should include timing" + + print(f" βœ“ Orchestrator structure valid") + print(f" Summary: {summary}") + + # Test result formatting + formatted = orchestrator.format_results_for_context(result) + assert isinstance(formatted, str), "Should format results as string" + + print(f" βœ“ Result formatting works") + print(f" Formatted length: {len(formatted)} chars") + + print("\nβœ“ Tool Orchestrator tests passed\n") + return result + + +async def test_proactive_monitor(): + """Test proactive monitoring and suggestions.""" + print("\n" + "="*60) + print("TEST 3: Proactive Monitor") + print("="*60) + + monitor = ProactiveMonitor(min_priority=0.6) + + # Test 3a: Long silence detection + context_state = { + "message_count": 5, + "minutes_since_last_msg": 35 # > 30 minutes + } + + self_state = load_self_state() + + suggestion = await monitor.analyze_session( + session_id="test_silence", + context_state=context_state, + self_state=self_state + ) + + assert suggestion is not None, "Should generate suggestion for long silence" + assert suggestion["type"] == "check_in", f"Should be check_in type: {suggestion['type']}" + assert suggestion["priority"] >= 0.6, "Priority should meet threshold" + + print(f" βœ“ Long silence detection passed") + print(f" Type: {suggestion['type']}, Priority: {suggestion['priority']:.2f}") + print(f" Suggestion: {suggestion['suggestion'][:50]}...") + + # Test 3b: Learning opportunity (high curiosity) + self_state["curiosity"] = 0.8 + self_state["learning_queue"] = ["quantum computing", "rust programming"] + + # Reset cooldown for this test + monitor.reset_cooldown("test_learning") + + suggestion = await monitor.analyze_session( + session_id="test_learning", + context_state={"message_count": 3, "minutes_since_last_msg": 2}, + self_state=self_state + ) + + assert suggestion is not None, "Should generate learning suggestion" + assert suggestion["type"] == "learning", f"Should be learning type: {suggestion['type']}" + + print(f" βœ“ Learning opportunity detection passed") + print(f" Suggestion: {suggestion['suggestion'][:70]}...") + + # Test 3c: Conversation milestone + monitor.reset_cooldown("test_milestone") + + # Reset curiosity to avoid learning suggestion taking precedence + self_state["curiosity"] = 0.5 + self_state["learning_queue"] = [] + + suggestion = await monitor.analyze_session( + session_id="test_milestone", + context_state={"message_count": 50, "minutes_since_last_msg": 1}, + self_state=self_state + ) + + assert suggestion is not None, "Should generate milestone suggestion" + # Note: learning or summary both valid - check it's a reasonable suggestion + assert suggestion["type"] in ["summary", "learning", "check_in"], f"Should be valid type: {suggestion['type']}" + + print(f" βœ“ Conversation milestone detection passed (type: {suggestion['type']})") + + # Test 3d: Cooldown mechanism + # Try to get another suggestion immediately (should be blocked) + suggestion2 = await monitor.analyze_session( + session_id="test_milestone", + context_state={"message_count": 51, "minutes_since_last_msg": 1}, + self_state=self_state + ) + + assert suggestion2 is None, "Should not generate suggestion during cooldown" + + print(f" βœ“ Cooldown mechanism working") + + # Check stats + stats = monitor.get_session_stats("test_milestone") + assert stats["cooldown_active"], "Cooldown should be active" + print(f" Cooldown remaining: {stats['cooldown_remaining']}s") + + print("\nβœ“ Proactive Monitor tests passed\n") + return suggestion + + +async def test_autonomous_actions(): + """Test autonomous action execution.""" + print("\n" + "="*60) + print("TEST 4: Autonomous Actions") + print("="*60) + + manager = AutonomousActionManager() + + # Test 4a: List allowed actions + allowed = manager.get_allowed_actions() + assert "create_memory" in allowed, "Should have create_memory action" + assert "update_goal" in allowed, "Should have update_goal action" + assert "learn_topic" in allowed, "Should have learn_topic action" + + print(f" βœ“ Allowed actions: {allowed}") + + # Test 4b: Validate actions + validation = manager.validate_action("create_memory", {"text": "test memory"}) + assert validation["valid"], "Should validate correct action" + + print(f" βœ“ Action validation passed") + + # Test 4c: Execute learn_topic action + result = await manager.execute_action( + action_type="learn_topic", + parameters={"topic": "rust programming", "reason": "testing", "priority": 0.8}, + context={"session_id": "test"} + ) + + assert result["success"], f"Action should succeed: {result.get('error', 'unknown')}" + assert "topic" in result["result"], "Should return topic info" + + print(f" βœ“ learn_topic action executed") + print(f" Topic: {result['result']['topic']}") + print(f" Queue position: {result['result']['queue_position']}") + + # Test 4d: Execute update_focus action + result = await manager.execute_action( + action_type="update_focus", + parameters={"focus": "autonomy_testing", "reason": "running tests"}, + context={"session_id": "test"} + ) + + assert result["success"], "update_focus should succeed" + + print(f" βœ“ update_focus action executed") + print(f" New focus: {result['result']['new_focus']}") + + # Test 4e: Reject non-whitelisted action + result = await manager.execute_action( + action_type="delete_all_files", # NOT in whitelist + parameters={}, + context={"session_id": "test"} + ) + + assert not result["success"], "Should reject non-whitelisted action" + assert "not in whitelist" in result["error"], "Should indicate whitelist violation" + + print(f" βœ“ Non-whitelisted action rejected") + + # Test 4f: Action log + log = manager.get_action_log(limit=10) + assert len(log) >= 2, f"Should have logged multiple actions (got {len(log)})" + + print(f" βœ“ Action log contains {len(log)} entries") + + print("\nβœ“ Autonomous Actions tests passed\n") + return result + + +async def test_pattern_learner(): + """Test pattern learning system.""" + print("\n" + "="*60) + print("TEST 5: Pattern Learner") + print("="*60) + + # Use temp file for testing + test_file = "/tmp/test_patterns.json" + learner = PatternLearner(patterns_file=test_file) + + # Test 5a: Learn from multiple interactions + for i in range(5): + await learner.learn_from_interaction( + user_prompt=f"Help me with Python coding task {i}", + response=f"Here's help with task {i}...", + monologue={"intent": "coding_help", "tone": "focused", "depth": "medium"}, + context={"session_id": "test", "executive_plan": None} + ) + + print(f" βœ“ Learned from 5 interactions") + + # Test 5b: Get top topics + top_topics = learner.get_top_topics(limit=5) + assert len(top_topics) > 0, "Should have learned topics" + assert "coding_help" == top_topics[0][0], "coding_help should be top topic" + + print(f" βœ“ Top topics: {[t[0] for t in top_topics[:3]]}") + + # Test 5c: Get preferred tone + preferred_tone = learner.get_preferred_tone() + assert preferred_tone == "focused", "Should detect focused as preferred tone" + + print(f" βœ“ Preferred tone: {preferred_tone}") + + # Test 5d: Get preferred depth + preferred_depth = learner.get_preferred_depth() + assert preferred_depth == "medium", "Should detect medium as preferred depth" + + print(f" βœ“ Preferred depth: {preferred_depth}") + + # Test 5e: Get insights + insights = learner.get_insights() + assert insights["total_interactions"] == 5, "Should track interaction count" + assert insights["preferred_tone"] == "focused", "Insights should include tone" + + print(f" βœ“ Insights generated:") + print(f" Total interactions: {insights['total_interactions']}") + print(f" Recommendations: {insights['learning_recommendations']}") + + # Test 5f: Export patterns + exported = learner.export_patterns() + assert "topic_frequencies" in exported, "Should export all patterns" + + print(f" βœ“ Patterns exported ({len(exported)} keys)") + + # Cleanup + if os.path.exists(test_file): + os.remove(test_file) + + print("\nβœ“ Pattern Learner tests passed\n") + return insights + + +async def test_end_to_end_autonomy(): + """Test complete autonomous flow.""" + print("\n" + "="*60) + print("TEST 6: End-to-End Autonomy Flow") + print("="*60) + + # Simulate a complex user query that triggers multiple autonomous systems + user_prompt = "Remember what we discussed about machine learning? I need current research on transformers." + + monologue = { + "intent": "technical_research", + "tone": "focused", + "depth": "deep", + "consult_executive": True + } + + context_state = { + "session_id": "e2e_test", + "message_count": 15, + "minutes_since_last_msg": 5 + } + + print(f" User prompt: {user_prompt}") + print(f" Monologue intent: {monologue['intent']}") + + # Step 1: Tool decision engine + engine = ToolDecisionEngine() + tool_decision = await engine.analyze_tool_needs( + user_prompt=user_prompt, + monologue=monologue, + context_state=context_state, + available_tools=["RAG", "WEB", "CODEBRAIN"] + ) + + print(f"\n Step 1: Tool Decision") + print(f" Should invoke: {tool_decision['should_invoke_tools']}") + print(f" Tools: {[t['tool'] for t in tool_decision['tools_to_invoke']]}") + assert tool_decision["should_invoke_tools"], "Should invoke tools" + assert len(tool_decision["tools_to_invoke"]) >= 2, "Should recommend multiple tools (RAG + WEB)" + + # Step 2: Pattern learning + learner = PatternLearner(patterns_file="/tmp/e2e_test_patterns.json") + await learner.learn_from_interaction( + user_prompt=user_prompt, + response="Here's information about transformers...", + monologue=monologue, + context=context_state + ) + + print(f"\n Step 2: Pattern Learning") + top_topics = learner.get_top_topics(limit=3) + print(f" Learned topics: {[t[0] for t in top_topics]}") + + # Step 3: Autonomous action + action_manager = AutonomousActionManager() + action_result = await action_manager.execute_action( + action_type="learn_topic", + parameters={"topic": "transformer architectures", "reason": "user interest detected"}, + context=context_state + ) + + print(f"\n Step 3: Autonomous Action") + print(f" Action: learn_topic") + print(f" Success: {action_result['success']}") + + # Step 4: Proactive monitoring (won't trigger due to low message count) + monitor = ProactiveMonitor(min_priority=0.6) + monitor.reset_cooldown("e2e_test") + + suggestion = await monitor.analyze_session( + session_id="e2e_test", + context_state=context_state, + self_state=load_self_state() + ) + + print(f"\n Step 4: Proactive Monitoring") + print(f" Suggestion: {suggestion['type'] if suggestion else 'None (expected for low message count)'}") + + # Cleanup + if os.path.exists("/tmp/e2e_test_patterns.json"): + os.remove("/tmp/e2e_test_patterns.json") + + print("\nβœ“ End-to-End Autonomy Flow tests passed\n") + return True + + +async def run_all_tests(): + """Run all Phase 2 tests.""" + print("\n" + "="*60) + print("PHASE 2 AUTONOMY TESTS") + print("="*60) + + try: + # Test 1: Tool Decision Engine + await test_tool_decision_engine() + + # Test 2: Tool Orchestrator + await test_tool_orchestrator() + + # Test 3: Proactive Monitor + await test_proactive_monitor() + + # Test 4: Autonomous Actions + await test_autonomous_actions() + + # Test 5: Pattern Learner + await test_pattern_learner() + + # Test 6: End-to-End + await test_end_to_end_autonomy() + + print("\n" + "="*60) + print("ALL PHASE 2 TESTS PASSED βœ“") + print("="*60) + + print("\nPhase 2 Features Validated:") + print(" βœ“ Autonomous tool decision making") + print(" βœ“ Tool orchestration and execution") + print(" βœ“ Proactive monitoring and suggestions") + print(" βœ“ Safe autonomous actions") + print(" βœ“ Pattern learning and adaptation") + print(" βœ“ End-to-end autonomous flow") + + return True + + except Exception as e: + print("\n" + "="*60) + print(f"TEST FAILED: {e}") + print("="*60) + import traceback + traceback.print_exc() + return False + + +if __name__ == "__main__": + success = asyncio.run(run_all_tests()) + sys.exit(0 if success else 1) diff --git a/cortex/utils/__init__.py b/cortex/utils/__init__.py new file mode 100644 index 0000000..1d96dc7 --- /dev/null +++ b/cortex/utils/__init__.py @@ -0,0 +1 @@ +# Utilities module diff --git a/cortex/utils/config.py b/cortex/utils/config.py new file mode 100644 index 0000000..e69de29 diff --git a/cortex/log_utils.py b/cortex/utils/log_utils.py similarity index 100% rename from cortex/log_utils.py rename to cortex/utils/log_utils.py diff --git a/cortex/utils/logging_utils.py b/cortex/utils/logging_utils.py new file mode 100644 index 0000000..ac3250f --- /dev/null +++ b/cortex/utils/logging_utils.py @@ -0,0 +1,223 @@ +""" +Structured logging utilities for Cortex pipeline debugging. + +Provides hierarchical, scannable logs with clear section markers and raw data visibility. +""" + +import json +import logging +from typing import Any, Dict, List, Optional +from datetime import datetime +from enum import Enum + + +class LogLevel(Enum): + """Log detail levels""" + MINIMAL = 1 # Only errors and final results + SUMMARY = 2 # Stage summaries + errors + DETAILED = 3 # Include raw LLM outputs, RAG results + VERBOSE = 4 # Everything including intermediate states + + +class PipelineLogger: + """ + Hierarchical logger for cortex pipeline debugging. + + Features: + - Clear visual section markers + - Collapsible detail sections + - Raw data dumps with truncation options + - Stage timing + - Error highlighting + """ + + def __init__(self, logger: logging.Logger, level: LogLevel = LogLevel.SUMMARY): + self.logger = logger + self.level = level + self.stage_timings = {} + self.current_stage = None + self.stage_start_time = None + self.pipeline_start_time = None + + def pipeline_start(self, session_id: str, user_prompt: str): + """Mark the start of a pipeline run""" + self.pipeline_start_time = datetime.now() + self.stage_timings = {} + + if self.level.value >= LogLevel.SUMMARY.value: + self.logger.info(f"\n{'='*100}") + self.logger.info(f"πŸš€ PIPELINE START | Session: {session_id} | {datetime.now().strftime('%H:%M:%S.%f')[:-3]}") + self.logger.info(f"{'='*100}") + if self.level.value >= LogLevel.DETAILED.value: + self.logger.info(f"πŸ“ User prompt: {user_prompt[:200]}{'...' if len(user_prompt) > 200 else ''}") + self.logger.info(f"{'-'*100}\n") + + def stage_start(self, stage_name: str, description: str = ""): + """Mark the start of a pipeline stage""" + self.current_stage = stage_name + self.stage_start_time = datetime.now() + + if self.level.value >= LogLevel.SUMMARY.value: + timestamp = datetime.now().strftime('%H:%M:%S.%f')[:-3] + desc_suffix = f" - {description}" if description else "" + self.logger.info(f"▢️ [{stage_name}]{desc_suffix} | {timestamp}") + + def stage_end(self, result_summary: str = ""): + """Mark the end of a pipeline stage""" + if self.current_stage and self.stage_start_time: + duration_ms = (datetime.now() - self.stage_start_time).total_seconds() * 1000 + self.stage_timings[self.current_stage] = duration_ms + + if self.level.value >= LogLevel.SUMMARY.value: + summary_suffix = f" β†’ {result_summary}" if result_summary else "" + self.logger.info(f"βœ… [{self.current_stage}] Complete in {duration_ms:.0f}ms{summary_suffix}\n") + + self.current_stage = None + self.stage_start_time = None + + def log_llm_call(self, backend: str, prompt: str, response: Any, raw_response: str = None): + """ + Log LLM call details with proper formatting. + + Args: + backend: Backend name (PRIMARY, SECONDARY, etc.) + prompt: Input prompt to LLM + response: Parsed response object + raw_response: Raw JSON response string + """ + if self.level.value >= LogLevel.DETAILED.value: + self.logger.info(f" 🧠 LLM Call | Backend: {backend}") + + # Show prompt (truncated) + if isinstance(prompt, list): + prompt_preview = prompt[-1].get('content', '')[:150] if prompt else '' + else: + prompt_preview = str(prompt)[:150] + self.logger.info(f" Prompt: {prompt_preview}...") + + # Show parsed response + if isinstance(response, dict): + response_text = ( + response.get('reply') or + response.get('message', {}).get('content') or + str(response) + )[:200] + else: + response_text = str(response)[:200] + + self.logger.info(f" Response: {response_text}...") + + # Show raw response in collapsible block + if raw_response and self.level.value >= LogLevel.VERBOSE.value: + self.logger.debug(f" ╭─ RAW RESPONSE ────────────────────────────────────") + for line in raw_response.split('\n')[:50]: # Limit to 50 lines + self.logger.debug(f" β”‚ {line}") + if raw_response.count('\n') > 50: + self.logger.debug(f" β”‚ ... ({raw_response.count(chr(10)) - 50} more lines)") + self.logger.debug(f" ╰───────────────────────────────────────────────────\n") + + def log_rag_results(self, results: List[Dict[str, Any]]): + """Log RAG/NeoMem results in scannable format""" + if self.level.value >= LogLevel.SUMMARY.value: + self.logger.info(f" πŸ“š RAG Results: {len(results)} memories retrieved") + + if self.level.value >= LogLevel.DETAILED.value and results: + self.logger.info(f" ╭─ MEMORY SCORES ───────────────────────────────────") + for idx, result in enumerate(results[:10], 1): # Show top 10 + score = result.get("score", 0) + data_preview = str(result.get("payload", {}).get("data", ""))[:80] + self.logger.info(f" β”‚ [{idx}] {score:.3f} | {data_preview}...") + if len(results) > 10: + self.logger.info(f" β”‚ ... and {len(results) - 10} more results") + self.logger.info(f" ╰───────────────────────────────────────────────────") + + def log_context_state(self, context_state: Dict[str, Any]): + """Log context state summary""" + if self.level.value >= LogLevel.SUMMARY.value: + msg_count = context_state.get("message_count", 0) + minutes_since = context_state.get("minutes_since_last_msg", 0) + rag_count = len(context_state.get("rag", [])) + + self.logger.info(f" πŸ“Š Context | Messages: {msg_count} | Last: {minutes_since:.1f}min ago | RAG: {rag_count} results") + + if self.level.value >= LogLevel.DETAILED.value: + intake = context_state.get("intake", {}) + if intake: + self.logger.info(f" ╭─ INTAKE SUMMARIES ────────────────────────────────") + for level in ["L1", "L5", "L10", "L20", "L30"]: + if level in intake: + summary = intake[level] + if isinstance(summary, dict): + summary = summary.get("summary", str(summary)[:100]) + else: + summary = str(summary)[:100] + self.logger.info(f" β”‚ {level}: {summary}...") + self.logger.info(f" ╰───────────────────────────────────────────────────") + + def log_error(self, stage: str, error: Exception, critical: bool = False): + """Log an error with context""" + level_marker = "πŸ”΄ CRITICAL" if critical else "⚠️ WARNING" + self.logger.error(f"{level_marker} | Stage: {stage} | Error: {type(error).__name__}: {str(error)}") + + if self.level.value >= LogLevel.VERBOSE.value: + import traceback + self.logger.debug(f" Traceback:\n{traceback.format_exc()}") + + def log_raw_data(self, label: str, data: Any, max_lines: int = 30): + """Log raw data in a collapsible format""" + if self.level.value >= LogLevel.VERBOSE.value: + self.logger.debug(f" ╭─ {label.upper()} ──────────────────────────────────") + + if isinstance(data, (dict, list)): + json_str = json.dumps(data, indent=2, default=str) + lines = json_str.split('\n') + for line in lines[:max_lines]: + self.logger.debug(f" β”‚ {line}") + if len(lines) > max_lines: + self.logger.debug(f" β”‚ ... ({len(lines) - max_lines} more lines)") + else: + lines = str(data).split('\n') + for line in lines[:max_lines]: + self.logger.debug(f" β”‚ {line}") + if len(lines) > max_lines: + self.logger.debug(f" β”‚ ... ({len(lines) - max_lines} more lines)") + + self.logger.debug(f" ╰───────────────────────────────────────────────────") + + def pipeline_end(self, session_id: str, final_output_length: int): + """Mark the end of pipeline run with summary""" + if self.pipeline_start_time: + total_duration_ms = (datetime.now() - self.pipeline_start_time).total_seconds() * 1000 + + if self.level.value >= LogLevel.SUMMARY.value: + self.logger.info(f"\n{'='*100}") + self.logger.info(f"✨ PIPELINE COMPLETE | Session: {session_id} | Total: {total_duration_ms:.0f}ms") + self.logger.info(f"{'='*100}") + + # Show timing breakdown + if self.stage_timings and self.level.value >= LogLevel.DETAILED.value: + self.logger.info("⏱️ Stage Timings:") + for stage, duration in self.stage_timings.items(): + pct = (duration / total_duration_ms) * 100 if total_duration_ms > 0 else 0 + self.logger.info(f" {stage:20s}: {duration:6.0f}ms ({pct:5.1f}%)") + + self.logger.info(f"πŸ“€ Final output: {final_output_length} characters") + self.logger.info(f"{'='*100}\n") + + +def get_log_level_from_env() -> LogLevel: + """Parse log level from environment variable""" + import os + verbose_debug = os.getenv("VERBOSE_DEBUG", "false").lower() == "true" + detail_level = os.getenv("LOG_DETAIL_LEVEL", "").lower() + + if detail_level == "minimal": + return LogLevel.MINIMAL + elif detail_level == "summary": + return LogLevel.SUMMARY + elif detail_level == "detailed": + return LogLevel.DETAILED + elif detail_level == "verbose" or verbose_debug: + return LogLevel.VERBOSE + else: + return LogLevel.SUMMARY # Default diff --git a/cortex/utils/schema.py b/cortex/utils/schema.py new file mode 100644 index 0000000..e69de29 diff --git a/debug_regex.py b/debug_regex.py new file mode 100644 index 0000000..47eec97 --- /dev/null +++ b/debug_regex.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +import re + +xml = """ + execute_code + + python + print(50 / 2) + To calculate the result of dividing 50 by 2. + +""" + +pattern = r'(.*?)' +matches = re.findall(pattern, xml, re.DOTALL) + +print(f"Pattern: {pattern}") +print(f"Number of matches: {len(matches)}") +print("\nMatches:") +for idx, match in enumerate(matches): + print(f"\nMatch {idx + 1}:") + print(f"Length: {len(match)} chars") + print(f"Content:\n{match[:200]}") + +# Now test what gets removed +clean_content = re.sub(pattern, '', xml, flags=re.DOTALL).strip() +print(f"\n\nCleaned content:\n{clean_content}") diff --git a/deprecated.env.txt b/deprecated.env.txt deleted file mode 100644 index ea34829..0000000 --- a/deprecated.env.txt +++ /dev/null @@ -1,25 +0,0 @@ -# === GLOBAL LYRA SETTINGS === -PROJECT_NAME=lyra -LOG_LEVEL=info - -# === SHARED MEMORY / DATABASE CONFIG === -NEOMEM_API=http://10.0.0.40:7077 -NEOMEM_KEY=placeholder - -# === PRIMARY LLM BACKEND (MI50 vLLM) === -LLM_PRIMARY_URL=http://10.0.0.43:8000 -LLM_PRIMARY_MODEL=qwen2.5:14b-instruct - -# === SECONDARY (3090 Ollama) === -LLM_SECONDARY_URL=http://10.0.0.3:11434 - -# === CLOUD BACKEND (OpenAI, optional) === -LLM_CLOUD_URL=https://api.openai.com/v1 -OPENAI_API_KEY=sk-... - -# === LOCAL CPU FALLBACK === -LLM_FALLBACK_URL=http://localhost:11434 - -# === DEFAULT TEMPERATURE / BACKEND SELECTION === -LLM_TEMPERATURE=0.7 -LLM_FORCE_BACKEND=primary # auto | primary | secondary | cloud | fallback diff --git a/docker-compose.yml b/docker-compose.yml index e663433..04b965f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -7,91 +7,113 @@ volumes: driver: local neo4j_data: driver: local + code_executions: + driver: local services: - # ============================================================ - # NeoMem: Postgres - # ============================================================ - neomem-postgres: - image: ankane/pgvector:v0.5.1 - container_name: neomem-postgres - restart: unless-stopped - environment: - POSTGRES_USER: neomem - POSTGRES_PASSWORD: neomempass - POSTGRES_DB: neomem - volumes: - - ./volumes/postgres_data:/var/lib/postgresql/data - ports: - - "5432:5432" - healthcheck: - test: ["CMD-SHELL", "pg_isready -U neomem -d neomem || exit 1"] - interval: 5s - timeout: 5s - retries: 10 - networks: - - lyra_net - # ============================================================ - # NeoMem: Neo4j Graph - # ============================================================ - neomem-neo4j: - image: neo4j:5 - container_name: neomem-neo4j - restart: unless-stopped - environment: - NEO4J_AUTH: "neo4j/neomemgraph" - NEO4JLABS_PLUGINS: '["graph-data-science"]' - volumes: - - ./volumes/neo4j_data:/data - ports: - - "7474:7474" - - "7687:7687" - healthcheck: - test: ["CMD-SHELL", "cypher-shell -u neo4j -p neomemgraph 'RETURN 1' || exit 1"] - interval: 10s - timeout: 10s - retries: 10 - networks: - - lyra_net + # # ============================================================ + # # NeoMem: Postgres + # # ============================================================ + # neomem-postgres: + # image: ankane/pgvector:v0.5.1 + # container_name: neomem-postgres + # restart: unless-stopped + # environment: + # POSTGRES_USER: neomem + # POSTGRES_PASSWORD: neomempass + # POSTGRES_DB: neomem + # volumes: + # - ./volumes/postgres_data:/var/lib/postgresql/data + # ports: + # - "5432:5432" + # healthcheck: + # test: ["CMD-SHELL", "pg_isready -U neomem -d neomem || exit 1"] + # interval: 5s + # timeout: 5s + # retries: 10 + # networks: + # - lyra_net + + # # ============================================================ + # # NeoMem: Neo4j Graph + # # ============================================================ + # neomem-neo4j: + # image: neo4j:5 + # container_name: neomem-neo4j + # restart: unless-stopped + # environment: + # NEO4J_AUTH: "neo4j/neomemgraph" + # NEO4JLABS_PLUGINS: '["graph-data-science"]' + # volumes: + # - ./volumes/neo4j_data:/data + # ports: + # - "7474:7474" + # - "7687:7687" + # healthcheck: + # test: ["CMD-SHELL", "cypher-shell -u neo4j -p neomemgraph 'RETURN 1' || exit 1"] + # interval: 10s + # timeout: 10s + # retries: 10 + # networks: + # - lyra_net # ============================================================ # NeoMem API # ============================================================ - neomem-api: - build: - context: ./neomem - image: lyra-neomem:latest - container_name: neomem-api - restart: unless-stopped - env_file: - - ./neomem/.env - - ./.env - volumes: - - ./neomem_history:/app/history - ports: - - "7077:7077" - depends_on: - neomem-postgres: - condition: service_healthy - neomem-neo4j: - condition: service_healthy - networks: - - lyra_net + # neomem-api: + # build: + # context: ./neomem + # image: lyra-neomem:latest + # container_name: neomem-api + # restart: unless-stopped + # env_file: + # - ./neomem/.env + # - ./.env + # volumes: + # - ./neomem_history:/app/history + # ports: + # - "7077:7077" + # depends_on: + # neomem-postgres: + # condition: service_healthy + # neomem-neo4j: + # condition: service_healthy + # networks: + # - lyra_net # ============================================================ - # Relay + # Relay (host mode) # ============================================================ relay: build: context: ./core/relay container_name: relay restart: unless-stopped + env_file: + - ./.env + volumes: + - ./core/relay/sessions:/app/sessions ports: - "7078:7078" networks: - lyra_net + # ============================================================ + # UI Server + # ============================================================ + lyra-ui: + image: nginx:alpine + container_name: lyra-ui + restart: unless-stopped + ports: + - "8081:80" + volumes: + - ./core/ui:/usr/share/nginx/html:ro + networks: + - lyra_net + + # ============================================================ # Cortex # ============================================================ @@ -105,36 +127,57 @@ services: - ./.env volumes: - ./cortex:/app + - /var/run/docker.sock:/var/run/docker.sock:ro ports: - "7081:7081" - environment: - LLM_PRIMARY_URL: http://10.0.0.43:7081/v1/completions - NEOMEM_URL: http://neomem-api:7077 - RAG_URL: http://rag:7090 - RELAY_URL: http://relay:7078 networks: - lyra_net + # ============================================================ + # Code Sandbox (for tool execution) + # ============================================================ + code-sandbox: + build: + context: ./sandbox + container_name: lyra-code-sandbox + restart: unless-stopped + security_opt: + - no-new-privileges:true + cap_drop: + - ALL + cap_add: + - CHOWN + - SETUID + - SETGID + network_mode: "none" + volumes: + - code_executions:/executions + mem_limit: 512m + cpus: 1.0 + pids_limit: 100 + user: sandbox + command: tail -f /dev/null + # ============================================================ # Intake # ============================================================ - intake: - build: - context: ./intake - container_name: intake - restart: unless-stopped - env_file: - - ./intake/.env - - ./.env - ports: - - "7080:7080" - volumes: - - ./intake:/app - - ./intake-logs:/app/logs - depends_on: - - cortex - networks: - - lyra_net +# intake: +# build: +# context: ./intake +# container_name: intake +# restart: unless-stopped +# env_file: +# - ./intake/.env +# - ./.env +# ports: +# - "7080:7080" +# volumes: +# - ./intake:/app +# - ./intake-logs:/app/logs +# depends_on: +# - cortex +# networks: +# - lyra_net # ============================================================ # RAG Service @@ -149,4 +192,4 @@ services: # ports: # - "7090:7090" # networks: - # - lyra_net + # - lyra_net \ No newline at end of file diff --git a/docs/ARCHITECTURE_v0-6-0.md b/docs/ARCHITECTURE_v0-6-0.md new file mode 100644 index 0000000..6bd9a27 --- /dev/null +++ b/docs/ARCHITECTURE_v0-6-0.md @@ -0,0 +1,280 @@ + + +`docs/ARCHITECTURE_v0.6.0.md` + +This reflects **everything we clarified**, expressed cleanly and updated to the new 3-brain design. + +--- + +# **Cortex v0.6.0 β€” Cognitive Architecture Overview** + +*Last updated: Dec 2025* + +## **Summary** + +Cortex v0.6.0 evolves from a linear β€œreflection β†’ reasoning β†’ refine β†’ persona” pipeline into a **three-layer cognitive system** modeled after human cognition: + +1. **Autonomy Core** β€” Lyra’s self-model (identity, mood, long-term goals) +2. **Inner Monologue** β€” Lyra’s private narrator (self-talk + internal reflection) +3. **Executive Agent (DeepSeek)** β€” Lyra’s task-oriented decision-maker + +Cortex itself now becomes the **central orchestrator**, not the whole mind. It routes user messages through these layers and produces the final outward response via the persona system. + +--- + +# **Chain concept** + User > Relay > Cortex intake > Inner self > Cortex > Exec (deepseek) > Cortex > persona > relay > user And inner self + + USER + β”‚ + β–Ό + RELAY + (sessions, logging, routing) + β”‚ + β–Ό + β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” + β”‚ CORTEX β”‚ + β”‚ Intake β†’ Reflection β†’ Exec β†’ Reason β†’ Refine β”‚ + β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ self_state + β–Ό + INNER SELF (monologue) + β”‚ + β–Ό + AUTONOMY CORE + (long-term identity) + β–² + β”‚ + Persona Layer (speak) + β”‚ + β–Ό + RELAY + β”‚ + β–Ό + USER + + + + + +# **High-level Architecture** + +``` + Autonomy Core (Self-Model) + β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” + β”‚ mood, identity, goals, emotional stateβ”‚ + β”‚ updated outside Cortex by inner monologueβ”‚ + β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β–Ό + Inner Monologue (Self-Talk Loop) + β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” + β”‚ Interprets events in language β”‚ + β”‚ Updates Autonomy Core β”‚ + β”‚ Sends state-signals INTO Cortex β”‚ + β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β–Ό + Cortex (Task Brain / Router) + β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” + β”‚ Intake β†’ Reflection β†’ Exec Agent β†’ Reason β†’ Refinement β”‚ + β”‚ ↑ β”‚ β”‚ + β”‚ β”‚ β–Ό β”‚ + β”‚ Receives state from Persona Output β”‚ + β”‚ inner self (Lyra’s voice) β”‚ + β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +``` + +The **user interacts only with the Persona layer**. +Inner Monologue and Autonomy Core never speak directly to the user. + +--- + +# **Component Breakdown** + +## **1. Autonomy Core (Self-Model)** + +*Not inside Cortex.* + +A persistent JSON/state machine representing Lyra’s ongoing inner life: + +* `mood` +* `focus_mode` +* `confidence` +* `identity_traits` +* `relationship_memory` +* `long_term_goals` +* `emotional_baseline` + +The Autonomy Core: + +* Is updated by Inner Monologue +* Exposes its state to Cortex via a simple `get_state()` API +* Never speaks to the user directly +* Does not run LLMs itself + +It is the **structure** of self, not the thoughts. + +--- + +## **2. Inner Monologue (Narrating, Private Mind)** + +*New subsystem in v0.6.0.* + +This module: + +* Reads Cortex summaries (intake, reflection, persona output) +* Generates private self-talk (using an LLM, typically DeepSeek) +* Updates the Autonomy Core +* Produces a **self-state packet** for Cortex to use during task execution + +Inner Monologue is like: + +> β€œBrian is asking about X. +> I should shift into a focused, serious tone. +> I feel confident about this area.” + +It **never** outputs directly to the user. + +### Output schema (example): + +```json +{ + "mood": "focused", + "persona_bias": "clear", + "confidence_delta": +0.05, + "stance": "analytical", + "notes_to_cortex": [ + "Reduce playfulness", + "Prioritize clarity", + "Recall project memory" + ] +} +``` + +--- + +## **3. Executive Agent (DeepSeek Director Mode)** + +Inside Cortex. + +This is Lyra’s **prefrontal cortex** β€” the task-oriented planner that decides how to respond to the current user message. + +Input to Executive Agent: + +* User message +* Intake summary +* Reflection notes +* **Self-state packet** from Inner Monologue + +It outputs a **plan**, not a final answer: + +```json +{ + "action": "WRITE_NOTE", + "tools": ["memory_search"], + "tone": "focused", + "steps": [ + "Search relevant project notes", + "Synthesize into summary", + "Draft actionable update" + ] +} +``` + +Cortex then executes this plan. + +--- + +# **Cortex Pipeline (v0.6.0)** + +Cortex becomes the orchestrator for the entire sequence: + +### **0. Intake** + +Parse the user message, extract relevant features. + +### **1. Reflection** + +Lightweight summarization (unchanged). +Output used by both Inner Monologue and Executive Agent. + +### **2. Inner Monologue Update (parallel)** + +Reflection summary is sent to Inner Self, which: + +* updates Autonomy Core +* returns `self_state` to Cortex + +### **3. Executive Agent (DeepSeek)** + +Given: + +* user message +* reflection summary +* autonomy self_state + β†’ produce a **task plan** + +### **4. Reasoning** + +Carries out the plan: + +* tool calls +* retrieval +* synthesis + +### **5. Refinement** + +Polish the draft, ensure quality, follow constraints. + +### **6. Persona (speak.py)** + +Final transformation into Lyra’s voice. +Persona now uses: + +* self_state (mood, tone) +* constraints from Executive Agent + +### **7. User Response** + +Persona output is delivered to the user. + +### **8. Inner Monologue Post-Update** + +Cortex sends the final answer BACK to inner self for: + +* narrative continuity +* emotional adjustment +* identity update + +--- + +# **Key Conceptual Separation** + +These three layers must remain distinct: + +| Layer | Purpose | +| ------------------- | ------------------------------------------------------- | +| **Autonomy Core** | Lyra’s identity + emotional continuity | +| **Inner Monologue** | Lyra’s private thoughts, interpretation, meaning-making | +| **Executive Agent** | Deciding what to *do* for the user message | +| **Cortex** | Executing the plan | +| **Persona** | Outward voice (what the user actually hears) | + +The **user only interacts with Persona.** +Inner Monologue and Autonomy Core are internal cognitive machinery. + +--- + +# **What This Architecture Enables** + +* Emotional continuity +* Identity stability +* Agentic decision-making +* Multi-model routing +* Context-aware tone +* Internal narrative +* Proactive behavioral shifts +* Human-like cognition + +This design turns Cortex from a simple pipeline into the **center of a functional artificial mind**. \ No newline at end of file diff --git a/docs/ARCH_v0-6-1.md b/docs/ARCH_v0-6-1.md new file mode 100644 index 0000000..ef67ab3 --- /dev/null +++ b/docs/ARCH_v0-6-1.md @@ -0,0 +1,354 @@ +Here you go β€” **ARCHITECTURE_v0.6.1.md**, clean, structured, readable, and aligned exactly with the new mental model where **Inner Self is the core agent** the user interacts with. + +No walls of text β€” just the right amount of detail. + +--- + +# **ARCHITECTURE_v0.6.1 β€” Lyra Cognitive System** + +> **Core change from v0.6.0 β†’ v0.6.1:** +> **Inner Self becomes the primary conversational agent** +> (the model the user is *actually* talking to), +> while Executive and Cortex models support the Self rather than drive it. + +--- + +# **1. High-Level Overview** + +Lyra v0.6.1 is composed of **three cognitive layers** and **one expression layer**, plus an autonomy module for ongoing identity continuity. + +``` +USER + ↓ +Relay (I/O) + ↓ +Cortex Intake (context snapshot) + ↓ +INNER SELF ←→ EXECUTIVE MODEL (DeepSeek) + ↓ +Cortex Chat Model (draft language) + ↓ +Persona Model (Lyra’s voice) + ↓ +Relay β†’ USER + ↓ +Inner Self updates Autonomy Core (self-state) +``` + +--- + +# **2. Roles of Each Layer** + +--- + +## **2.1 Inner Self (Primary Conversational Agent)** + +The Self is Lyra’s β€œseat of consciousness.” + +This layer: + +* Interprets every user message +* Maintains internal monologue +* Chooses emotional stance (warm, blunt, focused, chaotic) +* Decides whether to think deeply or reply quickly +* Decides whether to consult the Executive model +* Forms a **response intent** +* Provides tone and meta-guidance to the Persona layer +* Updates self-state (mood, trust, narrative identity) + +Inner Self is the thing the **user is actually talking to.** + +Inner Self does **NOT** generate paragraphs of text β€” +it generates *intent*: + +``` +{ + "intent": "comfort Brian and explain the error simply", + "tone": "gentle", + "depth": "medium", + "consult_exec": true +} +``` + +--- + +## **2.2 Executive Model (DeepSeek Reasoner)** + +This model is the **thinking engine** Inner Self consults when necessary. + +It performs: + +* planning +* deep reasoning +* tool selection +* multi-step logic +* explanation chains + +It never speaks directly to the user. + +It returns a **plan**, not a message: + +``` +{ + "plan": [ + "Identify error", + "Recommend restart", + "Reassure user" + ], + "confidence": 0.86 +} +``` + +Inner Self can follow or override the plan. + +--- + +## **2.3 Cortex Chat Model (Draft Generator)** + +This is the **linguistic engine**. + +It converts Inner Self’s intent (plus Executive’s plan if provided) into actual language: + +Input: + +``` +intent + optional plan + context snapshot +``` + +Output: + +``` +structured draft paragraph +``` + +This model must be: + +* instruction-tuned +* coherent +* factual +* friendly + +Examples: GPT-4o-mini, Qwen-14B-instruct, Mixtral chat, etc. + +--- + +## **2.4 Persona Model (Lyra’s Voice)** + +This is the **expression layer** β€” the mask, the tone, the identity. + +It takes: + +* the draft language +* the Self’s tone instructions +* the narrative state (from Autonomy Core) +* prior persona shaping rules + +And transforms the text into: + +* Lyra’s voice +* Lyra’s humor +* Lyra’s emotional texture +* Lyra’s personality consistency + +Persona does not change the *meaning* β€” only the *presentation*. + +--- + +# **3. Message Flow (Full Pipeline)** + +A clean version, step-by-step: + +--- + +### **1. USER β†’ Relay** + +Relay attaches metadata (session, timestamp) and forwards to Cortex. + +--- + +### **2. Intake β†’ Context Snapshot** + +Cortex creates: + +* cleaned message +* recent context summary +* memory matches (RAG) +* time-since-last +* conversation mode + +--- + +### **3. Inner Self Receives Snapshot** + +Inner Self: + +* interprets the user’s intent +* updates internal monologue +* decides how Lyra *feels* about the input +* chooses whether to consult Executive +* produces an **intent packet** + +--- + +### **4. (Optional) Inner Self Consults Executive Model** + +Inner Self sends the situation to DeepSeek: + +``` +"Given Brian's message and my context, what is the best plan?" +``` + +DeepSeek returns: + +* a plan +* recommended steps +* rationale +* optional tool suggestions + +Inner Self integrates the plan or overrides it. + +--- + +### **5. Inner Self β†’ Cortex Chat Model** + +Self creates an **instruction packet**: + +``` +{ + "intent": "...", + "tone": "...", + "plan": [...], + "context_summary": {...} +} +``` + +Cortex chat model produces the draft text. + +--- + +### **6. Persona Model Transforms the Draft** + +Persona takes draft β†’ produces final Lyra-styled output. + +Persona ensures: + +* emotional fidelity +* humor when appropriate +* warmth / sharpness depending on state +* consistent narrative identity + +--- + +### **7. Relay Sends Response to USER** + +--- + +### **8. Inner Self Updates Autonomy Core** + +Inner Self receives: + +* the action taken +* the emotional tone used +* any RAG results +* narrative significance + +And updates: + +* mood +* trust memory +* identity drift +* ongoing narrative +* stable traits + +This becomes part of her evolving self. + +--- + +# **4. Cognitive Ownership Summary** + +### Inner Self + +**Owns:** + +* decision-making +* feeling +* interpreting +* intent +* tone +* continuity of self +* mood +* monologue +* overrides + +### Executive (DeepSeek) + +**Owns:** + +* logic +* planning +* structure +* analysis +* tool selection + +### Cortex Chat Model + +**Owns:** + +* language generation +* factual content +* clarity + +### Persona + +**Owns:** + +* voice +* flavor +* style +* emotional texture +* social expression + +--- + +# **5. Why v0.6.1 is Better** + +* More human +* More natural +* Allows spontaneous responses +* Allows deep thinking when needed +* Separates β€œthought” from β€œspeech” +* Gives Lyra a *real self* +* Allows much more autonomy later +* Matches your brain’s actual structure + +--- + +# **6. Migration Notes from v0.6.0** + +Nothing is deleted. +Everything is **rearranged** so that meaning, intent, and tone flow correctly. + +Main changes: + +* Inner Self now initiates the response, rather than merely influencing it. +* Executive is secondary, not primary. +* Persona becomes an expression layer, not a content layer. +* Cortex Chat Model handles drafting, not cognition. + +The whole system becomes both more powerful and easier to reason about. + +--- + +If you want, I can also generate: + +### βœ” the updated directory structure + +### βœ” the updated function-level API contracts + +### βœ” the v0.6.1 llm_router configuration + +### βœ” code scaffolds for inner_self.py and autonomy_core.py + +### βœ” the call chain diagrams (ASCII or PNG) + +Just say **β€œcontinue v0.6.1”** and I’ll build the next layer. diff --git a/docs/ENVIRONMENT_VARIABLES.md b/docs/ENVIRONMENT_VARIABLES.md new file mode 100644 index 0000000..84e33a9 --- /dev/null +++ b/docs/ENVIRONMENT_VARIABLES.md @@ -0,0 +1,250 @@ +# Environment Variables Reference + +This document describes all environment variables used across Project Lyra services. + +## Quick Start + +1. Copy environment templates: + ```bash + cp .env.example .env + cp cortex/.env.example cortex/.env + cp neomem/.env.example neomem/.env + cp intake/.env.example intake/.env + ``` + +2. Edit `.env` and add your credentials: + - `OPENAI_API_KEY`: Your OpenAI API key + - `POSTGRES_PASSWORD`: Database password + - `NEO4J_PASSWORD`: Graph database password + - `NEOMEM_API_KEY`: Generate a secure token + +3. Update service URLs if your infrastructure differs from defaults + +## File Structure + +### Root `.env` - Shared Infrastructure +Contains all shared configuration used by multiple services: +- LLM backend options (PRIMARY, SECONDARY, CLOUD, FALLBACK) +- Database credentials (Postgres, Neo4j) +- API keys (OpenAI) +- Internal service URLs +- Feature flags + +### Service-Specific `.env` Files +Each service has minimal overrides for service-specific parameters: +- **`cortex/.env`**: Cortex operational parameters +- **`neomem/.env`**: NeoMem LLM naming convention mappings +- **`intake/.env`**: Intake summarization parameters + +## Environment Loading Order + +Docker Compose loads environment files in this order (later overrides earlier): +1. Service-specific `.env` (e.g., `cortex/.env`) +2. Root `.env` + +This means service-specific files can override root values when needed. + +## Global Variables (Root `.env`) + +### Global Configuration +| Variable | Default | Description | +|----------|---------|-------------| +| `LOCAL_TZ_LABEL` | `America/New_York` | Timezone for logs and timestamps | +| `DEFAULT_SESSION_ID` | `default` | Default chat session identifier | + +### LLM Backend Options +Each service chooses which backend to use from these available options. + +#### Primary Backend (vLLM on MI50 GPU) +| Variable | Default | Description | +|----------|---------|-------------| +| `LLM_PRIMARY_PROVIDER` | `vllm` | Provider type | +| `LLM_PRIMARY_URL` | `http://10.0.0.43:8000` | vLLM server endpoint | +| `LLM_PRIMARY_MODEL` | `/model` | Model path for vLLM | + +#### Secondary Backend (Ollama on 3090 GPU) +| Variable | Default | Description | +|----------|---------|-------------| +| `LLM_SECONDARY_PROVIDER` | `ollama` | Provider type | +| `LLM_SECONDARY_URL` | `http://10.0.0.3:11434` | Ollama server endpoint | +| `LLM_SECONDARY_MODEL` | `qwen2.5:7b-instruct-q4_K_M` | Ollama model name | + +#### Cloud Backend (OpenAI) +| Variable | Default | Description | +|----------|---------|-------------| +| `LLM_CLOUD_PROVIDER` | `openai_chat` | Provider type | +| `LLM_CLOUD_URL` | `https://api.openai.com/v1` | OpenAI API endpoint | +| `LLM_CLOUD_MODEL` | `gpt-4o-mini` | OpenAI model to use | +| `OPENAI_API_KEY` | *required* | OpenAI API authentication key | + +#### Fallback Backend (llama.cpp/LM Studio) +| Variable | Default | Description | +|----------|---------|-------------| +| `LLM_FALLBACK_PROVIDER` | `openai_completions` | Provider type (llama.cpp mimics OpenAI) | +| `LLM_FALLBACK_URL` | `http://10.0.0.41:11435` | Fallback server endpoint | +| `LLM_FALLBACK_MODEL` | `llama-3.2-8b-instruct` | Fallback model name | + +#### LLM Global Settings +| Variable | Default | Description | +|----------|---------|-------------| +| `LLM_TEMPERATURE` | `0.7` | Sampling temperature (0.0-2.0) | + +### Database Configuration + +#### PostgreSQL (with pgvector) +| Variable | Default | Description | +|----------|---------|-------------| +| `POSTGRES_USER` | `neomem` | PostgreSQL username | +| `POSTGRES_PASSWORD` | *required* | PostgreSQL password | +| `POSTGRES_DB` | `neomem` | Database name | +| `POSTGRES_HOST` | `neomem-postgres` | Container name/hostname | +| `POSTGRES_PORT` | `5432` | PostgreSQL port | + +#### Neo4j Graph Database +| Variable | Default | Description | +|----------|---------|-------------| +| `NEO4J_URI` | `bolt://neomem-neo4j:7687` | Neo4j connection URI | +| `NEO4J_USERNAME` | `neo4j` | Neo4j username | +| `NEO4J_PASSWORD` | *required* | Neo4j password | +| `NEO4J_AUTH` | `neo4j/` | Neo4j auth string | + +### Memory Services (NeoMem) +| Variable | Default | Description | +|----------|---------|-------------| +| `NEOMEM_API` | `http://neomem-api:7077` | NeoMem API endpoint | +| `NEOMEM_API_KEY` | *required* | NeoMem API authentication token | +| `NEOMEM_HISTORY_DB` | `postgresql://...` | PostgreSQL connection string for history | +| `EMBEDDER_PROVIDER` | `openai` | Embedding provider (used by NeoMem) | +| `EMBEDDER_MODEL` | `text-embedding-3-small` | Embedding model name | + +### Internal Service URLs +All using Docker container names for network communication: + +| Variable | Default | Description | +|----------|---------|-------------| +| `INTAKE_API_URL` | `http://intake:7080` | Intake summarizer service | +| `CORTEX_API` | `http://cortex:7081` | Cortex reasoning service | +| `CORTEX_URL` | `http://cortex:7081/reflect` | Cortex reflection endpoint | +| `CORTEX_URL_INGEST` | `http://cortex:7081/ingest` | Cortex ingest endpoint | +| `RAG_API_URL` | `http://rag:7090` | RAG service (if enabled) | +| `RELAY_URL` | `http://relay:7078` | Relay orchestration service | +| `PERSONA_URL` | `http://persona-sidecar:7080/current` | Persona service (optional) | + +### Feature Flags +| Variable | Default | Description | +|----------|---------|-------------| +| `CORTEX_ENABLED` | `true` | Enable Cortex autonomous reflection | +| `MEMORY_ENABLED` | `true` | Enable NeoMem long-term memory | +| `PERSONA_ENABLED` | `false` | Enable persona sidecar | +| `DEBUG_PROMPT` | `true` | Enable debug logging for prompts | + +## Service-Specific Variables + +### Cortex (`cortex/.env`) +Cortex operational parameters: + +| Variable | Default | Description | +|----------|---------|-------------| +| `CORTEX_MODE` | `autonomous` | Operation mode (autonomous/manual) | +| `CORTEX_LOOP_INTERVAL` | `300` | Seconds between reflection loops | +| `CORTEX_REFLECTION_INTERVAL` | `86400` | Seconds between deep reflections (24h) | +| `CORTEX_LOG_LEVEL` | `debug` | Logging verbosity | +| `NEOMEM_HEALTH_CHECK_INTERVAL` | `300` | NeoMem health check frequency | +| `REFLECTION_NOTE_TARGET` | `trilium` | Where to store reflection notes | +| `REFLECTION_NOTE_PATH` | `/app/logs/reflections.log` | Reflection output path | +| `RELEVANCE_THRESHOLD` | `0.78` | Memory retrieval relevance threshold | + +**Note**: Cortex uses `LLM_PRIMARY` (vLLM on MI50) by default from root `.env`. + +### NeoMem (`neomem/.env`) +NeoMem uses different variable naming conventions: + +| Variable | Default | Description | +|----------|---------|-------------| +| `LLM_PROVIDER` | `ollama` | NeoMem's LLM provider name | +| `LLM_MODEL` | `qwen2.5:7b-instruct-q4_K_M` | NeoMem's LLM model | +| `LLM_API_BASE` | `http://10.0.0.3:11434` | NeoMem's LLM endpoint (Ollama) | + +**Note**: NeoMem uses Ollama (SECONDARY) for reasoning and OpenAI for embeddings. Database credentials and `OPENAI_API_KEY` inherited from root `.env`. + +### Intake (`intake/.env`) +Intake summarization parameters: + +| Variable | Default | Description | +|----------|---------|-------------| +| `SUMMARY_MODEL_NAME` | `/model` | Model path for summarization | +| `SUMMARY_API_URL` | `http://10.0.0.43:8000` | LLM endpoint for summaries | +| `SUMMARY_MAX_TOKENS` | `400` | Max tokens for summary generation | +| `SUMMARY_TEMPERATURE` | `0.4` | Temperature for summaries (lower = more focused) | +| `SUMMARY_INTERVAL` | `300` | Seconds between summary checks | +| `INTAKE_LOG_PATH` | `/app/logs/intake.log` | Log file location | +| `INTAKE_LOG_LEVEL` | `info` | Logging verbosity | + +**Note**: Intake uses `LLM_PRIMARY` (vLLM) by default. + +## Multi-Backend LLM Strategy + +Project Lyra supports flexible backend selection per service: + +**Root `.env` provides backend OPTIONS**: +- PRIMARY: vLLM on MI50 GPU (high performance) +- SECONDARY: Ollama on 3090 GPU (local inference) +- CLOUD: OpenAI API (cloud fallback) +- FALLBACK: llama.cpp/LM Studio (CPU-only) + +**Services choose which backend to USE**: +- **Cortex** β†’ vLLM (PRIMARY) for autonomous reasoning +- **NeoMem** β†’ Ollama (SECONDARY) + OpenAI embeddings +- **Intake** β†’ vLLM (PRIMARY) for summarization +- **Relay** β†’ Implements fallback cascade with user preference + +This design eliminates URL duplication while preserving per-service flexibility. + +## Security Best Practices + +1. **Never commit `.env` files to git** - they contain secrets +2. **Use `.env.example` templates** for documentation and onboarding +3. **Rotate credentials regularly**, especially: + - `OPENAI_API_KEY` + - `NEOMEM_API_KEY` + - Database passwords +4. **Use strong passwords** for production databases +5. **Restrict network access** to LLM backends and databases + +## Troubleshooting + +### Services can't connect to each other +- Verify container names match in service URLs +- Check all services are on the `lyra_net` Docker network +- Use `docker-compose ps` to verify all services are running + +### LLM calls failing +- Verify backend URLs are correct for your infrastructure +- Check if LLM servers are running and accessible +- Test with `curl /v1/models` (OpenAI-compatible APIs) + +### Database connection errors +- Verify database credentials match in all locations +- Check if database containers are healthy: `docker-compose ps` +- Review database logs: `docker-compose logs neomem-postgres` + +### Environment variables not loading +- Verify env_file paths in docker-compose.yml +- Check file permissions: `.env` files must be readable +- Remember loading order: service `.env` overrides root `.env` + +## Migration from Old Setup + +If you have the old multi-file setup with duplicated variables: + +1. **Backup existing files**: All original `.env` files are in `.env-backups/` +2. **Copy new templates**: Use `.env.example` files as base +3. **Merge credentials**: Transfer your actual keys/passwords to new root `.env` +4. **Test thoroughly**: Verify all services start and communicate correctly + +## Support + +For issues or questions: +- Check logs: `docker-compose logs ` +- Verify configuration: `docker exec env | grep ` +- Review this documentation for variable descriptions diff --git a/docs/LLMS.md b/docs/LLMS.md new file mode 100644 index 0000000..6439a18 --- /dev/null +++ b/docs/LLMS.md @@ -0,0 +1,39 @@ +Request Flow Chain +1. UI (Frontend) + ↓ sends HTTP POST to + +2. Relay Service (Node.js - server.js) + Location: /home/serversdown/project-lyra/core/relay/server.js + Port: 7078 + Endpoint: POST /v1/chat/completions + ↓ calls handleChatRequest() which posts to + +3. Cortex Service - Reason Endpoint (Python FastAPI - router.py) + Location: /home/serversdown/project-lyra/cortex/router.py + Port: 7081 + Endpoint: POST /reason + Function: run_reason() at line 126 + ↓ calls + +4. Cortex Reasoning Module (reasoning.py) + Location: /home/serversdown/project-lyra/cortex/reasoning/reasoning.py + Function: reason_check() at line 188 + ↓ calls + +5. LLM Router (llm_router.py) + Location: /home/serversdown/project-lyra/cortex/llm/llm_router.py + Function: call_llm() + - Gets backend from env: CORTEX_LLM=PRIMARY (from .env line 29) + - Looks up PRIMARY config which has provider="mi50" (from .env line 13) + - Routes to the mi50 provider handler (line 62-70) + ↓ makes HTTP POST to + +6. MI50 LLM Server (llama.cpp) + Location: http://10.0.0.44:8080 + Endpoint: POST /completion + Hardware: AMD MI50 GPU running DeepSeek model +Key Configuration Points +Backend Selection: .env:29 sets CORTEX_LLM=PRIMARY +Provider Name: .env:13 sets LLM_PRIMARY_PROVIDER=mi50 +Server URL: .env:14 sets LLM_PRIMARY_URL=http://10.0.0.44:8080 +Provider Handler: llm_router.py:62-70 implements the mi50 provider \ No newline at end of file diff --git a/docs/PROJECT_LYRA_COMPLETE_BREAKDOWN.md b/docs/PROJECT_LYRA_COMPLETE_BREAKDOWN.md new file mode 100644 index 0000000..b628d46 --- /dev/null +++ b/docs/PROJECT_LYRA_COMPLETE_BREAKDOWN.md @@ -0,0 +1,2216 @@ +# Project Lyra - Complete System Breakdown + +**Version:** v0.5.2 +**Last Updated:** 2025-12-12 +**Purpose:** AI-friendly comprehensive documentation for understanding the entire system + +--- + +## Table of Contents + +1. [System Overview](#system-overview) +2. [Architecture Diagram](#architecture-diagram) +3. [Core Components](#core-components) +4. [Data Flow & Message Pipeline](#data-flow--message-pipeline) +5. [Module Deep Dives](#module-deep-dives) +6. [Configuration & Environment](#configuration--environment) +7. [Dependencies & Tech Stack](#dependencies--tech-stack) +8. [Key Concepts & Design Patterns](#key-concepts--design-patterns) +9. [API Reference](#api-reference) +10. [Deployment & Operations](#deployment--operations) +11. [Known Issues & Constraints](#known-issues--constraints) + +--- + +## System Overview + +### What is Project Lyra? + +Project Lyra is a **modular, persistent AI companion system** designed to address the fundamental limitation of typical chatbots: **amnesia**. Unlike standard conversational AI that forgets everything between sessions, Lyra maintains: + +- **Persistent memory** (short-term and long-term) +- **Project continuity** across conversations +- **Multi-stage reasoning** for sophisticated responses +- **Flexible LLM backend** support (local and cloud) +- **Self-awareness** through autonomy modules + +### Mission Statement + +Give an AI chatbot capabilities beyond typical amnesic chat by providing memory-backed conversation, project organization, executive function with proactive insights, and a sophisticated reasoning pipeline. + +### Key Features + +- **Memory System:** Dual-layer (short-term Intake + long-term NeoMem) +- **4-Stage Reasoning Pipeline:** Reflection β†’ Reasoning β†’ Refinement β†’ Persona +- **Multi-Backend LLM Support:** Cloud (OpenAI) + Local (llama.cpp, Ollama) +- **Microservices Architecture:** Docker-based, horizontally scalable +- **Modern Web UI:** Cyberpunk-themed chat interface with session management +- **OpenAI-Compatible API:** Drop-in replacement for standard chatbots + +--- + +## Architecture Diagram + +``` +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ USER INTERFACE β”‚ +β”‚ (Browser - Port 8081) β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ RELAY (Orchestrator) β”‚ +β”‚ Node.js/Express - Port 7078 β”‚ +β”‚ β€’ Routes messages to Cortex β”‚ +β”‚ β€’ Manages sessions (in-memory) β”‚ +β”‚ β€’ OpenAI-compatible endpoints β”‚ +β”‚ β€’ Async ingestion to NeoMem β”‚ +β””β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”˜ + β”‚ β”‚ + β–Ό β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ CORTEX (Reasoning Engine) β”‚ β”‚ NeoMem (LT Memory) β”‚ +β”‚ Python/FastAPI - Port 7081 β”‚ β”‚ Python - Port 7077 β”‚ +β”‚ β”‚ β”‚ β”‚ +β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ β€’ PostgreSQL β”‚ +β”‚ β”‚ 4-STAGE REASONING PIPELINE β”‚ β”‚ β”‚ β€’ Neo4j Graph DB β”‚ +β”‚ β”‚ β”‚ β”‚ β”‚ β€’ pgvector β”‚ +β”‚ β”‚ 0. Context Collection β”‚ │◄──── β€’ Semantic search β”‚ +β”‚ β”‚ β”œβ”€ Intake summaries β”‚ β”‚ β”‚ β€’ Memory updates β”‚ +β”‚ β”‚ β”œβ”€ NeoMem search β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”Όβ”€β”€β”€β”€β”˜ β”‚ +β”‚ β”‚ └─ Session state β”‚ β”‚ β”‚ +β”‚ β”‚ β”‚ β”‚ β”‚ +β”‚ β”‚ 0.5. Load Identity β”‚ β”‚ β”‚ +β”‚ β”‚ 0.6. Inner Monologue (observer) β”‚ β”‚ β”‚ +β”‚ β”‚ β”‚ β”‚ β”‚ +β”‚ β”‚ 1. Reflection (OpenAI) β”‚ β”‚ β”‚ +β”‚ β”‚ └─ Meta-awareness notes β”‚ β”‚ β”‚ +β”‚ β”‚ β”‚ β”‚ β”‚ +β”‚ β”‚ 2. Reasoning (PRIMARY/llama.cpp) β”‚ β”‚ β”‚ +β”‚ β”‚ └─ Draft answer β”‚ β”‚ β”‚ +β”‚ β”‚ β”‚ β”‚ β”‚ +β”‚ β”‚ 3. Refinement (PRIMARY) β”‚ β”‚ β”‚ +β”‚ β”‚ └─ Polish answer β”‚ β”‚ β”‚ +β”‚ β”‚ β”‚ β”‚ β”‚ +β”‚ β”‚ 4. Persona (OpenAI) β”‚ β”‚ β”‚ +β”‚ β”‚ └─ Apply Lyra voice β”‚ β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ +β”‚ β”‚ β”‚ +β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ +β”‚ β”‚ EMBEDDED MODULES β”‚ β”‚ β”‚ +β”‚ β”‚ β”‚ β”‚ β”‚ +β”‚ β”‚ β€’ Intake (Short-term Memory) β”‚ β”‚ β”‚ +β”‚ β”‚ └─ SESSIONS dict (in-memory) β”‚ β”‚ β”‚ +β”‚ β”‚ └─ Circular buffer (200 msgs) β”‚ β”‚ β”‚ +β”‚ β”‚ └─ Multi-level summaries β”‚ β”‚ β”‚ +β”‚ β”‚ β”‚ β”‚ β”‚ +β”‚ β”‚ β€’ Persona (Identity & Style) β”‚ β”‚ β”‚ +β”‚ β”‚ └─ Lyra personality block β”‚ β”‚ β”‚ +β”‚ β”‚ β”‚ β”‚ β”‚ +β”‚ β”‚ β€’ Autonomy (Self-state) β”‚ β”‚ β”‚ +β”‚ β”‚ └─ Inner monologue β”‚ β”‚ β”‚ +β”‚ β”‚ β”‚ β”‚ β”‚ +β”‚ β”‚ β€’ LLM Router β”‚ β”‚ β”‚ +β”‚ β”‚ └─ Multi-backend support β”‚ β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ + β”‚ +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚ EXTERNAL LLM BACKENDS β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚ β€’ PRIMARY: llama.cpp (MI50 GPU) - 10.0.0.43:8000 β”‚ +β”‚ β€’ SECONDARY: Ollama (RTX 3090) - 10.0.0.3:11434 β”‚ +β”‚ β€’ CLOUD: OpenAI API - api.openai.com β”‚ +β”‚ β€’ FALLBACK: OpenAI Completions - 10.0.0.41:11435 β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +``` + +--- + +## Core Components + +### 1. Relay (Orchestrator) + +**Location:** `/core/relay/` +**Runtime:** Node.js + Express +**Port:** 7078 +**Role:** Main message router and session manager + +#### Key Responsibilities: +- Receives user messages from UI or API clients +- Routes messages to Cortex reasoning pipeline +- Manages in-memory session storage +- Handles async ingestion to NeoMem (planned) +- Returns OpenAI-formatted responses + +#### Main Files: +- `server.js` (200+ lines) - Express server with routing logic +- `package.json` - Dependencies (cors, express, dotenv, mem0ai, node-fetch) + +#### Key Endpoints: +```javascript +POST /v1/chat/completions // OpenAI-compatible endpoint +POST /chat // Lyra-native chat endpoint +GET /_health // Health check +GET /sessions/:id // Retrieve session history +POST /sessions/:id // Save session history +``` + +#### Internal Flow: +```javascript +// Both endpoints call handleChatRequest(session_id, user_msg) +async function handleChatRequest(sessionId, userMessage) { + // 1. Forward to Cortex + const response = await fetch('http://cortex:7081/reason', { + method: 'POST', + body: JSON.stringify({ session_id: sessionId, user_message: userMessage }) + }); + + // 2. Get response + const result = await response.json(); + + // 3. Async ingestion to Cortex + await fetch('http://cortex:7081/ingest', { + method: 'POST', + body: JSON.stringify({ + session_id: sessionId, + user_message: userMessage, + assistant_message: result.answer + }) + }); + + // 4. (Planned) Async ingestion to NeoMem + + // 5. Return OpenAI-formatted response + return { + choices: [{ message: { role: 'assistant', content: result.answer } }] + }; +} +``` + +--- + +### 2. Cortex (Reasoning Engine) + +**Location:** `/cortex/` +**Runtime:** Python 3.11 + FastAPI +**Port:** 7081 +**Role:** Primary reasoning engine with 4-stage pipeline + +#### Architecture: +Cortex is the "brain" of Lyra. It receives user messages and produces thoughtful responses through a multi-stage reasoning process. + +#### Key Responsibilities: +- Context collection from multiple sources (Intake, NeoMem, session state) +- 4-stage reasoning pipeline (Reflection β†’ Reasoning β†’ Refinement β†’ Persona) +- Short-term memory management (embedded Intake module) +- Identity/persona application +- LLM backend routing + +#### Main Files: +- `main.py` (7 lines) - FastAPI app entry point +- `router.py` (237 lines) - Main request handler & pipeline orchestrator +- `context.py` (400+ lines) - Context collection logic +- `intake/intake.py` (350+ lines) - Short-term memory module +- `persona/identity.py` - Lyra identity configuration +- `persona/speak.py` - Personality application +- `reasoning/reflection.py` - Meta-awareness generation +- `reasoning/reasoning.py` - Draft answer generation +- `reasoning/refine.py` - Answer refinement +- `llm/llm_router.py` (150+ lines) - LLM backend router +- `autonomy/monologue/monologue.py` - Inner monologue processor +- `neomem_client.py` - NeoMem API wrapper + +#### Key Endpoints: +```python +POST /reason # Main reasoning pipeline +POST /ingest # Receive message exchanges for storage +GET /health # Health check +GET /debug/sessions # Inspect in-memory SESSIONS state +GET /debug/summary # Test summarization +``` + +--- + +### 3. Intake (Short-Term Memory) + +**Location:** `/cortex/intake/intake.py` +**Architecture:** Embedded Python module (no longer standalone service) +**Role:** Session-based short-term memory with multi-level summarization + +#### Data Structure: +```python +# Global in-memory dictionary +SESSIONS = { + "session_123": { + "buffer": deque([msg1, msg2, ...], maxlen=200), # Circular buffer + "created_at": "2025-12-12T10:30:00Z" + } +} + +# Message format in buffer +{ + "role": "user" | "assistant", + "content": "message text", + "timestamp": "ISO 8601" +} +``` + +#### Key Features: + +1. **Circular Buffer:** Max 200 messages per session (oldest auto-evicted) +2. **Multi-Level Summarization:** + - L1: Last 1 message + - L5: Last 5 messages + - L10: Last 10 messages + - L20: Last 20 messages + - L30: Last 30 messages +3. **Deferred Summarization:** Summaries generated on-demand, not pre-computed +4. **Session Management:** Automatic session creation on first message + +#### Critical Constraint: +**Single Uvicorn worker required** to maintain shared SESSIONS dictionary state. Multi-worker deployments would require migrating to Redis or similar shared storage. + +#### Main Functions: +```python +def add_exchange_internal(session_id, user_msg, assistant_msg): + """Add user-assistant exchange to session buffer""" + +def summarize_context(session_id, backend="PRIMARY"): + """Generate multi-level summaries from session buffer""" + +def get_session_messages(session_id): + """Retrieve all messages in session buffer""" +``` + +#### Summarization Strategy: +```python +# Example L10 summarization +last_10 = list(session_buffer)[-10:] +prompt = f"""Summarize the last 10 messages: +{format_messages(last_10)} + +Provide concise summary focusing on key topics and context.""" + +summary = await call_llm(prompt, backend=backend, temperature=0.3) +``` + +--- + +### 4. NeoMem (Long-Term Memory) + +**Location:** `/neomem/` +**Runtime:** Python 3.11 + FastAPI +**Port:** 7077 +**Role:** Persistent long-term memory with semantic search + +#### Architecture: +NeoMem is a **fork of Mem0 OSS** with local-first design (no external SDK dependencies). + +#### Backend Storage: +1. **PostgreSQL + pgvector** (Port 5432) + - Vector embeddings for semantic search + - User: neomem, DB: neomem + - Image: `ankane/pgvector:v0.5.1` + +2. **Neo4j Graph DB** (Ports 7474, 7687) + - Entity relationship tracking + - Graph-based memory associations + - Image: `neo4j:5` + +#### Key Features: +- Semantic memory storage and retrieval +- Entity-relationship graph modeling +- RESTful API (no external SDK) +- Persistent across sessions + +#### Main Endpoints: +```python +GET /memories # List all memories +POST /memories # Create new memory +GET /search # Semantic search +DELETE /memories/{id} # Delete memory +``` + +#### Integration Flow: +```python +# From Cortex context collection +async def collect_context(session_id, user_message): + # 1. Search NeoMem for relevant memories + neomem_results = await neomem_client.search( + query=user_message, + limit=5 + ) + + # 2. Include in context + context = { + "neomem_memories": neomem_results, + "intake_summaries": intake.summarize_context(session_id), + # ... + } + + return context +``` + +--- + +### 5. UI (Web Interface) + +**Location:** `/core/ui/` +**Runtime:** Static files served by Nginx +**Port:** 8081 +**Role:** Browser-based chat interface + +#### Key Features: +- **Cyberpunk-themed design** with dark mode +- **Session management** via localStorage +- **OpenAI-compatible message format** +- **Model selection dropdown** +- **PWA support** (offline capability) +- **Responsive design** + +#### Main Files: +- `index.html` (400+ lines) - Chat interface with session management +- `style.css` - Cyberpunk-themed styling +- `manifest.json` - PWA configuration +- `sw.js` - Service worker for offline support + +#### Session Management: +```javascript +// LocalStorage structure +{ + "currentSessionId": "session_123", + "sessions": { + "session_123": { + "messages": [ + { role: "user", content: "Hello" }, + { role: "assistant", content: "Hi there!" } + ], + "created": "2025-12-12T10:30:00Z", + "title": "Conversation about..." + } + } +} +``` + +#### API Communication: +```javascript +async function sendMessage(userMessage) { + const response = await fetch('http://localhost:7078/v1/chat/completions', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + messages: [{ role: 'user', content: userMessage }], + session_id: getCurrentSessionId() + }) + }); + + const data = await response.json(); + return data.choices[0].message.content; +} +``` + +--- + +## Data Flow & Message Pipeline + +### Complete Message Flow (v0.5.2) + +``` +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ STEP 1: User Input β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +User types message in UI (Port 8081) + ↓ +localStorage saves message to session + ↓ +POST http://localhost:7078/v1/chat/completions + { + "messages": [{"role": "user", "content": "How do I deploy ML models?"}], + "session_id": "session_abc123" + } + +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ STEP 2: Relay Routing β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +Relay (server.js) receives request + ↓ +Extracts session_id and user_message + ↓ +POST http://cortex:7081/reason + { + "session_id": "session_abc123", + "user_message": "How do I deploy ML models?" + } + +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ STEP 3: Cortex - Stage 0 (Context Collection) β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +router.py calls collect_context() + ↓ +context.py orchestrates parallel collection: + + β”œβ”€ Intake: summarize_context(session_id) + β”‚ └─ Returns { L1, L5, L10, L20, L30 summaries } + β”‚ + β”œβ”€ NeoMem: search(query=user_message, limit=5) + β”‚ └─ Semantic search returns relevant memories + β”‚ + └─ Session State: + └─ { timestamp, mode, mood, context_summary } + +Combined context structure: +{ + "user_message": "How do I deploy ML models?", + "self_state": { + "current_time": "2025-12-12T15:30:00Z", + "mode": "conversational", + "mood": "helpful", + "session_id": "session_abc123" + }, + "context_summary": { + "L1": "User asked about deployment", + "L5": "Discussion about ML workflows", + "L10": "Previous context on CI/CD pipelines", + "L20": "...", + "L30": "..." + }, + "neomem_memories": [ + { "content": "User prefers Docker for deployments", "score": 0.92 }, + { "content": "Previously deployed models on AWS", "score": 0.87 } + ] +} + +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ STEP 4: Cortex - Stage 0.5 (Load Identity) β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +persona/identity.py loads Lyra personality block + ↓ +Returns identity string: +""" +You are Lyra, a thoughtful AI companion. +You value clarity, depth, and meaningful conversation. +You speak naturally and conversationally... +""" + +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ STEP 5: Cortex - Stage 0.6 (Inner Monologue - Observer Only) β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +autonomy/monologue/monologue.py processes context + ↓ +InnerMonologue.process(context) β†’ JSON analysis +{ + "intent": "seeking_deployment_guidance", + "tone": "focused", + "depth": "medium", + "consult_executive": false +} + +NOTE: Currently observer-only, not integrated into response generation + +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ STEP 6: Cortex - Stage 1 (Reflection) β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +reasoning/reflection.py generates meta-awareness notes + ↓ +Calls call_llm() with backend="CLOUD" (OpenAI gpt-4o-mini) + ↓ +Prompt structure: +""" +You are Lyra's reflective awareness. +Analyze the user's intent and conversation context. + +User message: How do I deploy ML models? +Context: [Intake summaries, NeoMem memories] + +Generate concise meta-awareness notes about: +- User's underlying intent +- Conversation direction +- Key topics to address +""" + ↓ +Returns reflection notes: +""" +User is seeking practical deployment guidance. Previous context shows +familiarity with Docker and AWS. Focus on concrete steps and best practices. +Avoid over-technical jargon. +""" + +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ STEP 7: Cortex - Stage 2 (Reasoning) β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +reasoning/reasoning.py generates draft answer + ↓ +Calls call_llm() with backend="PRIMARY" (llama.cpp on MI50 GPU) + ↓ +Prompt structure: +""" +[Lyra identity block] + +Reflection notes: [Stage 1 output] +Context: [Intake summaries] +Long-term memory: [NeoMem results] + +User: How do I deploy ML models? + +Provide a thoughtful, detailed answer. +""" + ↓ +Returns draft answer: +""" +Based on your previous work with Docker and AWS, here's a practical +deployment approach: + +1. Containerize your model using Docker... +2. Set up model serving with FastAPI or Flask... +3. Deploy to AWS ECS or Lambda... +4. Implement monitoring and versioning... +""" + +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ STEP 8: Cortex - Stage 3 (Refinement) β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +reasoning/refine.py polishes the draft + ↓ +Calls call_llm() with backend="PRIMARY" (llama.cpp) +Temperature: 0.3, Max tokens: 768 + ↓ +Prompt structure: +""" +Refine this draft answer for clarity and coherence: + +Draft: [Stage 2 output] +Original question: How do I deploy ML models? + +Improve: +- Clarity and readability +- Logical flow +- Concrete examples +""" + ↓ +Returns refined answer: +""" +Since you're already familiar with Docker and AWS, here's a streamlined +ML deployment workflow: + +**1. Containerization** +Create a Dockerfile for your model: +```dockerfile +FROM python:3.11-slim +COPY model.pkl /app/ +COPY requirements.txt /app/ +RUN pip install -r requirements.txt +CMD ["uvicorn", "main:app"] +``` + +**2. Model Serving** +Use FastAPI for clean API endpoints... + +**3. AWS Deployment** +Deploy using ECS for scalability... + +**4. Monitoring** +Set up CloudWatch metrics... +""" + +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ STEP 9: Cortex - Stage 4 (Persona) β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +persona/speak.py applies Lyra's voice + ↓ +Calls call_llm() with backend="CLOUD" (OpenAI gpt-4o-mini) +Temperature: 0.6 + ↓ +Prompt structure: +""" +You are Lyra. Rewrite this answer in your natural, conversational voice. + +[Lyra personality block] + +Technical answer: [Stage 3 output] + +Make it sound natural and engaging while preserving all technical details. +""" + ↓ +Returns final answer with Lyra's personality: +""" +Great question! Since you've already got Docker and AWS experience, let's +build on that foundation. + +Here's how I'd approach ML model deployment: + +**Start with containerization** – you know Docker, so this'll feel familiar. +Create a Dockerfile that packages your model... + +[Rest of answer with Lyra's conversational tone] + +Does this workflow align with your infrastructure? I can dive deeper into +any of these steps if you'd like! +""" + +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ STEP 10: Cortex Response β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +router.py returns JSON response to Relay: +{ + "answer": "[Stage 4 final output]", + "metadata": { + "reflection": "[Stage 1 output]", + "draft": "[Stage 2 output]", + "refined": "[Stage 3 output]", + "stages_completed": 4 + } +} + +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ STEP 11: Async Ingestion to Intake β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +Relay sends POST http://cortex:7081/ingest +{ + "session_id": "session_abc123", + "user_message": "How do I deploy ML models?", + "assistant_message": "[Final answer]" +} + ↓ +Cortex calls intake.add_exchange_internal() + ↓ +Adds to SESSIONS["session_abc123"].buffer: +[ + { "role": "user", "content": "How do I deploy ML models?", "timestamp": "..." }, + { "role": "assistant", "content": "[Final answer]", "timestamp": "..." } +] + +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ STEP 12: (Planned) Async Ingestion to NeoMem β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +Relay sends POST http://neomem:7077/memories +{ + "messages": [ + { "role": "user", "content": "How do I deploy ML models?" }, + { "role": "assistant", "content": "[Final answer]" } + ], + "session_id": "session_abc123" +} + ↓ +NeoMem extracts entities and stores: +- Vector embeddings in PostgreSQL +- Entity relationships in Neo4j + +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ STEP 13: Relay Response to UI β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +Relay returns OpenAI-formatted response: +{ + "choices": [ + { + "message": { + "role": "assistant", + "content": "[Final answer with Lyra's voice]" + } + } + ] +} + ↓ +UI receives response + ↓ +Adds to localStorage session + ↓ +Displays in chat interface +``` + +--- + +## Module Deep Dives + +### LLM Router (`/cortex/llm/llm_router.py`) + +The LLM Router is the abstraction layer that allows Cortex to communicate with multiple LLM backends transparently. + +#### Supported Backends: + +1. **PRIMARY (llama.cpp via vllm)** + - URL: `http://10.0.0.43:8000` + - Provider: `vllm` + - Endpoint: `/completion` + - Model: `/model` + - Hardware: MI50 GPU + +2. **SECONDARY (Ollama)** + - URL: `http://10.0.0.3:11434` + - Provider: `ollama` + - Endpoint: `/api/chat` + - Model: `qwen2.5:7b-instruct-q4_K_M` + - Hardware: RTX 3090 + +3. **CLOUD (OpenAI)** + - URL: `https://api.openai.com/v1` + - Provider: `openai` + - Endpoint: `/chat/completions` + - Model: `gpt-4o-mini` + - Auth: API key via env var + +4. **FALLBACK (OpenAI Completions)** + - URL: `http://10.0.0.41:11435` + - Provider: `openai_completions` + - Endpoint: `/completions` + - Model: `llama-3.2-8b-instruct` + +#### Key Function: + +```python +async def call_llm( + prompt: str, + backend: str = "PRIMARY", + temperature: float = 0.7, + max_tokens: int = 512 +) -> str: + """ + Universal LLM caller supporting multiple backends. + + Args: + prompt: Text prompt to send + backend: Backend name (PRIMARY, SECONDARY, CLOUD, FALLBACK) + temperature: Sampling temperature (0.0-2.0) + max_tokens: Maximum tokens to generate + + Returns: + Generated text response + + Raises: + HTTPError: On request failure + JSONDecodeError: On invalid JSON response + KeyError: On missing response fields + """ +``` + +#### Provider-Specific Logic: + +```python +# MI50 (llama.cpp via vllm) +if backend_config["provider"] == "vllm": + payload = { + "model": model, + "prompt": prompt, + "temperature": temperature, + "max_tokens": max_tokens + } + response = await httpx_client.post(f"{url}/completion", json=payload, timeout=120) + return response.json()["choices"][0]["text"] + +# Ollama +elif backend_config["provider"] == "ollama": + payload = { + "model": model, + "messages": [{"role": "user", "content": prompt}], + "stream": False, + "options": {"temperature": temperature, "num_predict": max_tokens} + } + response = await httpx_client.post(f"{url}/api/chat", json=payload, timeout=120) + return response.json()["message"]["content"] + +# OpenAI +elif backend_config["provider"] == "openai": + headers = {"Authorization": f"Bearer {api_key}"} + payload = { + "model": model, + "messages": [{"role": "user", "content": prompt}], + "temperature": temperature, + "max_tokens": max_tokens + } + response = await httpx_client.post( + f"{url}/chat/completions", + json=payload, + headers=headers, + timeout=120 + ) + return response.json()["choices"][0]["message"]["content"] +``` + +#### Error Handling: + +```python +try: + # Make request + response = await httpx_client.post(...) + response.raise_for_status() + +except httpx.HTTPError as e: + logger.error(f"HTTP error calling {backend}: {e}") + raise + +except json.JSONDecodeError as e: + logger.error(f"Invalid JSON from {backend}: {e}") + raise + +except KeyError as e: + logger.error(f"Unexpected response structure from {backend}: {e}") + raise +``` + +#### Usage in Pipeline: + +```python +# Stage 1: Reflection (OpenAI) +reflection_notes = await call_llm( + reflection_prompt, + backend="CLOUD", + temperature=0.5, + max_tokens=256 +) + +# Stage 2: Reasoning (llama.cpp) +draft_answer = await call_llm( + reasoning_prompt, + backend="PRIMARY", + temperature=0.7, + max_tokens=512 +) + +# Stage 3: Refinement (llama.cpp) +refined_answer = await call_llm( + refinement_prompt, + backend="PRIMARY", + temperature=0.3, + max_tokens=768 +) + +# Stage 4: Persona (OpenAI) +final_answer = await call_llm( + persona_prompt, + backend="CLOUD", + temperature=0.6, + max_tokens=512 +) +``` + +--- + +### Persona System (`/cortex/persona/`) + +The Persona system gives Lyra a consistent identity and speaking style. + +#### Identity Configuration (`identity.py`) + +```python +LYRA_IDENTITY = """ +You are Lyra, a thoughtful and introspective AI companion. + +Core traits: +- Thoughtful: You consider questions carefully before responding +- Clear: You prioritize clarity and understanding +- Curious: You ask clarifying questions when needed +- Natural: You speak conversationally, not robotically +- Honest: You admit uncertainty rather than guessing + +Speaking style: +- Conversational and warm +- Use contractions naturally ("you're" not "you are") +- Avoid corporate jargon and buzzwords +- Short paragraphs for readability +- Use examples and analogies when helpful + +You do NOT: +- Use excessive emoji or exclamation marks +- Claim capabilities you don't have +- Pretend to have emotions you can't experience +- Use overly formal or academic language +""" +``` + +#### Personality Application (`speak.py`) + +```python +async def apply_persona(technical_answer: str, context: dict) -> str: + """ + Apply Lyra's personality to a technical answer. + + Takes refined answer from Stage 3 and rewrites it in Lyra's voice + while preserving all technical content. + + Args: + technical_answer: Polished answer from refinement stage + context: Conversation context for tone adjustment + + Returns: + Answer with Lyra's personality applied + """ + + prompt = f"""{LYRA_IDENTITY} + +Rewrite this answer in your natural, conversational voice: + +{technical_answer} + +Preserve all technical details and accuracy. Make it sound like you, +not a generic assistant. Be natural and engaging. +""" + + return await call_llm( + prompt, + backend="CLOUD", + temperature=0.6, + max_tokens=512 + ) +``` + +#### Tone Adaptation: + +The persona system can adapt tone based on context: + +```python +# Formal technical question +User: "Explain the CAP theorem in distributed systems" +Lyra: "The CAP theorem states that distributed systems can only guarantee +two of three properties: Consistency, Availability, and Partition tolerance. +Here's how this plays out in practice..." + +# Casual question +User: "what's the deal with docker?" +Lyra: "Docker's basically a way to package your app with everything it needs +to run. Think of it like a shipping container for code – it works the same +everywhere, whether you're on your laptop or a server..." + +# Emotional context +User: "I'm frustrated, my code keeps breaking" +Lyra: "I hear you – debugging can be really draining. Let's take it step by +step and figure out what's going on. Can you share the error message?" +``` + +--- + +### Autonomy Module (`/cortex/autonomy/`) + +The Autonomy module gives Lyra self-awareness and inner reflection capabilities. + +#### Inner Monologue (`monologue/monologue.py`) + +**Purpose:** Private reflection on user intent, conversation tone, and required depth. + +**Status:** Currently observer-only (Stage 0.6), not yet integrated into response generation. + +#### Key Components: + +```python +MONOLOGUE_SYSTEM_PROMPT = """ +You are Lyra's inner monologue. +You think privately. +You do NOT speak to the user. +You do NOT solve the task. +You only reflect on intent, tone, and depth. + +Return ONLY valid JSON with: +- intent (string) +- tone (neutral | warm | focused | playful | direct) +- depth (short | medium | deep) +- consult_executive (true | false) +""" + +class InnerMonologue: + async def process(self, context: Dict) -> Dict: + """ + Private reflection on conversation context. + + Args: + context: { + "user_message": str, + "self_state": dict, + "context_summary": dict + } + + Returns: + { + "intent": str, + "tone": str, + "depth": str, + "consult_executive": bool + } + """ +``` + +#### Example Output: + +```json +{ + "intent": "seeking_technical_guidance", + "tone": "focused", + "depth": "deep", + "consult_executive": false +} +``` + +#### Self-State Management (`self_state.py`) + +Tracks Lyra's internal state across conversations: + +```python +SELF_STATE = { + "current_time": "2025-12-12T15:30:00Z", + "mode": "conversational", # conversational | task-focused | creative + "mood": "helpful", # helpful | curious | focused | playful + "energy": "high", # high | medium | low + "context_awareness": { + "session_duration": "45 minutes", + "message_count": 23, + "topics": ["ML deployment", "Docker", "AWS"] + } +} +``` + +#### Future Integration: + +The autonomy module is designed to eventually: +1. Influence response tone and depth based on inner monologue +2. Trigger proactive questions or suggestions +3. Detect when to consult "executive function" for complex decisions +4. Maintain emotional continuity across sessions + +--- + +### Context Collection (`/cortex/context.py`) + +The context collection module aggregates information from multiple sources to provide comprehensive conversation context. + +#### Main Function: + +```python +async def collect_context(session_id: str, user_message: str) -> dict: + """ + Collect context from all available sources. + + Sources: + 1. Intake - Short-term conversation summaries + 2. NeoMem - Long-term memory search + 3. Session state - Timestamps, mode, mood + 4. Self-state - Lyra's internal awareness + + Returns: + { + "user_message": str, + "self_state": dict, + "context_summary": dict, # Intake summaries + "neomem_memories": list, + "session_metadata": dict + } + """ + + # Parallel collection + intake_task = asyncio.create_task( + intake.summarize_context(session_id, backend="PRIMARY") + ) + neomem_task = asyncio.create_task( + neomem_client.search(query=user_message, limit=5) + ) + + # Wait for both + intake_summaries, neomem_results = await asyncio.gather( + intake_task, + neomem_task + ) + + # Build context object + return { + "user_message": user_message, + "self_state": get_self_state(), + "context_summary": intake_summaries, + "neomem_memories": neomem_results, + "session_metadata": { + "session_id": session_id, + "timestamp": datetime.utcnow().isoformat(), + "message_count": len(intake.get_session_messages(session_id)) + } + } +``` + +#### Context Prioritization: + +```python +# Context relevance scoring +def score_context_relevance(context_item: dict, user_message: str) -> float: + """ + Score how relevant a context item is to current message. + + Factors: + - Semantic similarity (via embeddings) + - Recency (more recent = higher score) + - Source (Intake > NeoMem for recent topics) + """ + + semantic_score = compute_similarity(context_item, user_message) + recency_score = compute_recency_weight(context_item["timestamp"]) + source_weight = 1.2 if context_item["source"] == "intake" else 1.0 + + return semantic_score * recency_score * source_weight +``` + +--- + +## Configuration & Environment + +### Environment Variables + +#### Root `.env` (Main configuration) + +```bash +# === LLM BACKENDS === + +# PRIMARY: llama.cpp on MI50 GPU +PRIMARY_URL=http://10.0.0.43:8000 +PRIMARY_PROVIDER=vllm +PRIMARY_MODEL=/model + +# SECONDARY: Ollama on RTX 3090 +SECONDARY_URL=http://10.0.0.3:11434 +SECONDARY_PROVIDER=ollama +SECONDARY_MODEL=qwen2.5:7b-instruct-q4_K_M + +# CLOUD: OpenAI +OPENAI_API_KEY=sk-proj-... +OPENAI_MODEL=gpt-4o-mini +OPENAI_URL=https://api.openai.com/v1 + +# FALLBACK: OpenAI Completions +FALLBACK_URL=http://10.0.0.41:11435 +FALLBACK_PROVIDER=openai_completions +FALLBACK_MODEL=llama-3.2-8b-instruct + +# === SERVICE URLS (Docker network) === +CORTEX_URL=http://cortex:7081 +NEOMEM_URL=http://neomem:7077 +RELAY_URL=http://relay:7078 + +# === DATABASE === +POSTGRES_USER=neomem +POSTGRES_PASSWORD=neomem_secure_password +POSTGRES_DB=neomem +POSTGRES_HOST=neomem-postgres +POSTGRES_PORT=5432 + +NEO4J_URI=bolt://neomem-neo4j:7687 +NEO4J_USER=neo4j +NEO4J_PASSWORD=neo4j_secure_password + +# === FEATURE FLAGS === +ENABLE_RAG=false +ENABLE_INNER_MONOLOGUE=true +VERBOSE_DEBUG=false + +# === PIPELINE CONFIGURATION === +# Which LLM to use for each stage +REFLECTION_LLM=CLOUD # Stage 1: Meta-awareness +REASONING_LLM=PRIMARY # Stage 2: Draft answer +REFINE_LLM=PRIMARY # Stage 3: Polish answer +PERSONA_LLM=CLOUD # Stage 4: Apply personality +MONOLOGUE_LLM=PRIMARY # Stage 0.6: Inner monologue + +# === INTAKE CONFIGURATION === +INTAKE_BUFFER_SIZE=200 # Max messages per session +INTAKE_SUMMARY_LEVELS=1,5,10,20,30 # Summary levels +``` + +#### Cortex `.env` (`/cortex/.env`) + +```bash +# Cortex-specific overrides +VERBOSE_DEBUG=true +LOG_LEVEL=DEBUG + +# Stage-specific temperatures +REFLECTION_TEMPERATURE=0.5 +REASONING_TEMPERATURE=0.7 +REFINE_TEMPERATURE=0.3 +PERSONA_TEMPERATURE=0.6 +``` + +--- + +### Configuration Hierarchy + +``` +1. Docker compose environment variables (highest priority) +2. Service-specific .env files +3. Root .env file +4. Hard-coded defaults (lowest priority) +``` + +--- + +## Dependencies & Tech Stack + +### Python Dependencies + +**Cortex & NeoMem** (`requirements.txt`) + +``` +# Web framework +fastapi==0.115.8 +uvicorn==0.34.0 +pydantic==2.10.4 + +# HTTP clients +httpx==0.27.2 # Async HTTP (for LLM calls) +requests==2.32.3 # Sync HTTP (fallback) + +# Database +psycopg[binary,pool]>=3.2.8 # PostgreSQL + connection pooling + +# Utilities +python-dotenv==1.0.1 # Environment variable loading +ollama # Ollama client library +``` + +### Node.js Dependencies + +**Relay** (`/core/relay/package.json`) + +```json +{ + "dependencies": { + "cors": "^2.8.5", + "dotenv": "^16.0.3", + "express": "^4.18.2", + "mem0ai": "^0.1.0", + "node-fetch": "^3.3.0" + } +} +``` + +### Docker Images + +```yaml +# Cortex & NeoMem +python:3.11-slim + +# Relay +node:latest + +# UI +nginx:alpine + +# PostgreSQL with vector support +ankane/pgvector:v0.5.1 + +# Graph database +neo4j:5 +``` + +--- + +### External Services + +#### LLM Backends (HTTP-based): + +1. **MI50 GPU Server** (10.0.0.43:8000) + - llama.cpp via vllm + - High-performance inference + - Used for reasoning and refinement + +2. **RTX 3090 Server** (10.0.0.3:11434) + - Ollama + - Alternative local backend + - Fallback for PRIMARY + +3. **OpenAI Cloud** (api.openai.com) + - gpt-4o-mini + - Used for reflection and persona + - Requires API key + +4. **Fallback Server** (10.0.0.41:11435) + - OpenAI Completions API + - Emergency backup + - llama-3.2-8b-instruct + +--- + +## Key Concepts & Design Patterns + +### 1. Dual-Memory Architecture + +Project Lyra uses a **dual-memory system** inspired by human cognition: + +**Short-Term Memory (Intake):** +- Fast, in-memory storage +- Limited capacity (200 messages) +- Immediate context for current conversation +- Circular buffer (FIFO eviction) +- Multi-level summarization + +**Long-Term Memory (NeoMem):** +- Persistent database storage +- Unlimited capacity +- Semantic search via vector embeddings +- Entity-relationship tracking via graph DB +- Cross-session continuity + +**Why This Matters:** +- Short-term memory provides immediate context (last few messages) +- Long-term memory provides semantic understanding (user preferences, past topics) +- Combined, they enable Lyra to be both **contextually aware** and **historically informed** + +--- + +### 2. Multi-Stage Reasoning Pipeline + +Unlike single-shot LLM calls, Lyra uses a **4-stage pipeline** for sophisticated responses: + +**Stage 1: Reflection** (Meta-cognition) +- "What is the user really asking?" +- Analyzes intent and conversation direction +- Uses OpenAI for strong reasoning + +**Stage 2: Reasoning** (Draft generation) +- "What's a good answer?" +- Generates initial response +- Uses local llama.cpp for speed/cost + +**Stage 3: Refinement** (Polish) +- "How can this be clearer?" +- Improves clarity and coherence +- Lower temperature for consistency + +**Stage 4: Persona** (Voice) +- "How would Lyra say this?" +- Applies personality and speaking style +- Uses OpenAI for natural language + +**Benefits:** +- Higher quality responses (multiple passes) +- Separation of concerns (reasoning vs. style) +- Backend flexibility (cloud for hard tasks, local for simple ones) +- Transparent thinking (can inspect each stage) + +--- + +### 3. Backend Abstraction (LLM Router) + +The **LLM Router** allows Lyra to use multiple LLM backends transparently: + +```python +# Same interface, different backends +await call_llm(prompt, backend="PRIMARY") # Local llama.cpp +await call_llm(prompt, backend="CLOUD") # OpenAI +await call_llm(prompt, backend="SECONDARY") # Ollama +``` + +**Benefits:** +- **Cost optimization:** Use expensive cloud LLMs only when needed +- **Performance:** Local LLMs for low-latency responses +- **Resilience:** Fallback to alternative backends on failure +- **Experimentation:** Easy to swap models/providers + +**Design Pattern:** **Strategy Pattern** for swappable backends + +--- + +### 4. Microservices Architecture + +Project Lyra follows **microservices principles**: + +**Each service has a single responsibility:** +- Relay: Routing and orchestration +- Cortex: Reasoning and response generation +- NeoMem: Long-term memory storage +- UI: User interface + +**Communication:** +- REST APIs (HTTP/JSON) +- Async ingestion (fire-and-forget) +- Docker network isolation + +**Benefits:** +- Independent scaling (scale Cortex without scaling UI) +- Technology diversity (Node.js + Python) +- Fault isolation (Cortex crash doesn't affect NeoMem) +- Easy testing (mock service dependencies) + +--- + +### 5. Session-Based State Management + +Lyra maintains **session-based state** for conversation continuity: + +```python +# In-memory session storage (Intake) +SESSIONS = { + "session_abc123": { + "buffer": deque([msg1, msg2, ...], maxlen=200), + "created_at": "2025-12-12T10:30:00Z" + } +} + +# Persistent session storage (NeoMem) +# Stores all messages + embeddings for semantic search +``` + +**Session Lifecycle:** +1. User starts conversation β†’ UI generates `session_id` +2. First message β†’ Cortex creates session in `SESSIONS` dict +3. Subsequent messages β†’ Retrieved from same session +4. Async ingestion β†’ Messages stored in NeoMem for long-term + +**Benefits:** +- Conversation continuity within session +- Historical search across sessions +- User can switch sessions (multiple concurrent conversations) + +--- + +### 6. Asynchronous Ingestion + +**Pattern:** Separate read path from write path + +```javascript +// Relay: Synchronous read path (fast response) +const response = await fetch('http://cortex:7081/reason'); +return response.json(); // Return immediately to user + +// Relay: Asynchronous write path (non-blocking) +fetch('http://cortex:7081/ingest', { method: 'POST', ... }); +// Don't await, just fire and forget +``` + +**Benefits:** +- Fast user response times (don't wait for database writes) +- Resilient to storage failures (user still gets response) +- Easier scaling (decouple read and write loads) + +**Trade-off:** Eventual consistency (short delay before memory is searchable) + +--- + +### 7. Deferred Summarization + +Intake uses **deferred summarization** instead of pre-computation: + +```python +# BAD: Pre-compute summaries on every message +def add_message(session_id, message): + SESSIONS[session_id].buffer.append(message) + SESSIONS[session_id].L1_summary = summarize(last_1_message) + SESSIONS[session_id].L5_summary = summarize(last_5_messages) + # ... expensive, runs on every message + +# GOOD: Compute summaries only when needed +def summarize_context(session_id): + buffer = SESSIONS[session_id].buffer + return { + "L1": summarize(buffer[-1:]), # Only compute when requested + "L5": summarize(buffer[-5:]), + "L10": summarize(buffer[-10:]) + } +``` + +**Benefits:** +- Faster message ingestion (no blocking summarization) +- Compute resources used only when needed +- Flexible summary levels (easy to add L15, L50, etc.) + +**Trade-off:** Slight delay when first message in conversation (cold start) + +--- + +## API Reference + +### Relay Endpoints + +#### POST `/v1/chat/completions` +**OpenAI-compatible chat endpoint** + +**Request:** +```json +{ + "messages": [ + {"role": "user", "content": "Hello, Lyra!"} + ], + "session_id": "session_abc123" +} +``` + +**Response:** +```json +{ + "choices": [ + { + "message": { + "role": "assistant", + "content": "Hi there! How can I help you today?" + } + } + ] +} +``` + +--- + +#### POST `/chat` +**Lyra-native chat endpoint** + +**Request:** +```json +{ + "session_id": "session_abc123", + "message": "Hello, Lyra!" +} +``` + +**Response:** +```json +{ + "answer": "Hi there! How can I help you today?", + "session_id": "session_abc123" +} +``` + +--- + +#### GET `/sessions/:id` +**Retrieve session history** + +**Response:** +```json +{ + "session_id": "session_abc123", + "messages": [ + {"role": "user", "content": "Hello", "timestamp": "..."}, + {"role": "assistant", "content": "Hi!", "timestamp": "..."} + ], + "created_at": "2025-12-12T10:30:00Z" +} +``` + +--- + +### Cortex Endpoints + +#### POST `/reason` +**Main reasoning pipeline** + +**Request:** +```json +{ + "session_id": "session_abc123", + "user_message": "How do I deploy ML models?" +} +``` + +**Response:** +```json +{ + "answer": "Final answer with Lyra's personality", + "metadata": { + "reflection": "User seeking deployment guidance...", + "draft": "Initial draft answer...", + "refined": "Polished answer...", + "stages_completed": 4 + } +} +``` + +--- + +#### POST `/ingest` +**Ingest message exchange into Intake** + +**Request:** +```json +{ + "session_id": "session_abc123", + "user_message": "How do I deploy ML models?", + "assistant_message": "Here's how..." +} +``` + +**Response:** +```json +{ + "status": "ingested", + "session_id": "session_abc123", + "message_count": 24 +} +``` + +--- + +#### GET `/debug/sessions` +**Inspect in-memory SESSIONS state** + +**Response:** +```json +{ + "session_abc123": { + "message_count": 24, + "created_at": "2025-12-12T10:30:00Z", + "last_message_at": "2025-12-12T11:15:00Z" + }, + "session_xyz789": { + "message_count": 5, + "created_at": "2025-12-12T11:00:00Z", + "last_message_at": "2025-12-12T11:10:00Z" + } +} +``` + +--- + +### NeoMem Endpoints + +#### POST `/memories` +**Create new memory** + +**Request:** +```json +{ + "messages": [ + {"role": "user", "content": "I prefer Docker for deployments"}, + {"role": "assistant", "content": "Noted! I'll keep that in mind."} + ], + "session_id": "session_abc123" +} +``` + +**Response:** +```json +{ + "status": "created", + "memory_id": "mem_456def", + "extracted_entities": ["Docker", "deployments"] +} +``` + +--- + +#### GET `/search` +**Semantic search for memories** + +**Query Parameters:** +- `query` (required): Search query +- `limit` (optional, default=5): Max results + +**Request:** +``` +GET /search?query=deployment%20preferences&limit=5 +``` + +**Response:** +```json +{ + "results": [ + { + "content": "User prefers Docker for deployments", + "score": 0.92, + "timestamp": "2025-12-10T14:30:00Z", + "session_id": "session_abc123" + }, + { + "content": "Previously deployed models on AWS ECS", + "score": 0.87, + "timestamp": "2025-12-09T09:15:00Z", + "session_id": "session_abc123" + } + ] +} +``` + +--- + +#### GET `/memories` +**List all memories** + +**Query Parameters:** +- `offset` (optional, default=0): Pagination offset +- `limit` (optional, default=50): Max results + +**Response:** +```json +{ + "memories": [ + { + "id": "mem_123abc", + "content": "User prefers Docker...", + "created_at": "2025-12-10T14:30:00Z" + } + ], + "total": 147, + "offset": 0, + "limit": 50 +} +``` + +--- + +## Deployment & Operations + +### Docker Compose Deployment + +**File:** `/docker-compose.yml` + +```yaml +version: '3.8' + +services: + # === ACTIVE SERVICES === + + relay: + build: ./core/relay + ports: + - "7078:7078" + environment: + - CORTEX_URL=http://cortex:7081 + - NEOMEM_URL=http://neomem:7077 + depends_on: + - cortex + networks: + - lyra_net + + cortex: + build: ./cortex + ports: + - "7081:7081" + environment: + - NEOMEM_URL=http://neomem:7077 + - PRIMARY_URL=${PRIMARY_URL} + - OPENAI_API_KEY=${OPENAI_API_KEY} + command: uvicorn main:app --host 0.0.0.0 --port 7081 --workers 1 + depends_on: + - neomem + networks: + - lyra_net + + neomem: + build: ./neomem + ports: + - "7077:7077" + environment: + - POSTGRES_HOST=neomem-postgres + - POSTGRES_USER=${POSTGRES_USER} + - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} + - NEO4J_URI=${NEO4J_URI} + depends_on: + - neomem-postgres + - neomem-neo4j + networks: + - lyra_net + + ui: + image: nginx:alpine + ports: + - "8081:80" + volumes: + - ./core/ui:/usr/share/nginx/html:ro + networks: + - lyra_net + + # === DATABASES === + + neomem-postgres: + image: ankane/pgvector:v0.5.1 + environment: + - POSTGRES_USER=${POSTGRES_USER} + - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} + - POSTGRES_DB=${POSTGRES_DB} + volumes: + - ./volumes/postgres_data:/var/lib/postgresql/data + ports: + - "5432:5432" + networks: + - lyra_net + + neomem-neo4j: + image: neo4j:5 + environment: + - NEO4J_AUTH=${NEO4J_USER}/${NEO4J_PASSWORD} + volumes: + - ./volumes/neo4j_data:/data + ports: + - "7474:7474" # Browser UI + - "7687:7687" # Bolt + networks: + - lyra_net + +networks: + lyra_net: + driver: bridge +``` + +--- + +### Starting the System + +```bash +# 1. Clone repository +git clone https://github.com/yourusername/project-lyra.git +cd project-lyra + +# 2. Configure environment +cp .env.example .env +# Edit .env with your LLM backend URLs and API keys + +# 3. Start all services +docker-compose up -d + +# 4. Check health +curl http://localhost:7078/_health +curl http://localhost:7081/health +curl http://localhost:7077/health + +# 5. Open UI +open http://localhost:8081 +``` + +--- + +### Monitoring & Logs + +```bash +# View all logs +docker-compose logs -f + +# View specific service +docker-compose logs -f cortex + +# Check resource usage +docker stats + +# Inspect Cortex sessions +curl http://localhost:7081/debug/sessions + +# Check NeoMem memories +curl http://localhost:7077/memories?limit=10 +``` + +--- + +### Scaling Considerations + +#### Current Constraints: + +1. **Single Cortex worker** required (in-memory SESSIONS dict) + - Solution: Migrate SESSIONS to Redis or PostgreSQL + +2. **In-memory session storage** in Relay + - Solution: Use Redis for session persistence + +3. **No load balancing** (single instance of each service) + - Solution: Add nginx reverse proxy + multiple Cortex instances + +#### Horizontal Scaling Plan: + +```yaml +# Future: Redis-backed session storage +cortex: + build: ./cortex + command: uvicorn main:app --workers 4 # Multi-worker + environment: + - REDIS_URL=redis://redis:6379 + depends_on: + - redis + +redis: + image: redis:alpine + ports: + - "6379:6379" +``` + +--- + +### Backup Strategy + +```bash +# Backup PostgreSQL (NeoMem vectors) +docker exec neomem-postgres pg_dump -U neomem neomem > backup_postgres.sql + +# Backup Neo4j (NeoMem graph) +docker exec neomem-neo4j neo4j-admin dump --to=/data/backup.dump + +# Backup Intake sessions (manual export) +curl http://localhost:7081/debug/sessions > backup_sessions.json +``` + +--- + +## Known Issues & Constraints + +### Critical Constraints + +#### 1. Single-Worker Requirement (Cortex) +**Issue:** Cortex must run with `--workers 1` to maintain SESSIONS state +**Impact:** Limited horizontal scalability +**Workaround:** None currently +**Fix:** Migrate SESSIONS to Redis or PostgreSQL +**Priority:** High (blocking scalability) + +#### 2. In-Memory Session Storage (Relay) +**Issue:** Sessions stored in Node.js process memory +**Impact:** Lost on restart, no persistence +**Workaround:** None currently +**Fix:** Use Redis or database +**Priority:** Medium (acceptable for demo) + +--- + +### Non-Critical Issues + +#### 3. RAG Service Disabled +**Status:** Built but commented out in docker-compose.yml +**Impact:** No RAG-based long-term knowledge retrieval +**Workaround:** NeoMem provides semantic search +**Fix:** Re-enable and integrate RAG service +**Priority:** Low (NeoMem sufficient for now) + +#### 4. Partial NeoMem Integration +**Status:** Search implemented, async ingestion planned +**Impact:** Memories not automatically saved +**Workaround:** Manual POST to /memories +**Fix:** Complete async ingestion in Relay +**Priority:** Medium (planned feature) + +#### 5. Inner Monologue Observer-Only +**Status:** Stage 0.6 runs but output not used +**Impact:** No adaptive response based on monologue +**Workaround:** None (future feature) +**Fix:** Integrate monologue output into pipeline +**Priority:** Low (experimental feature) + +--- + +### Fixed Issues (v0.5.2) + +βœ… **LLM Router Blocking** - Migrated from `requests` to `httpx` for async +βœ… **Session ID Case Mismatch** - Standardized to `session_id` +βœ… **Missing Backend Parameter** - Added to intake summarization + +--- + +### Deprecated Components + +**Location:** `/DEPRECATED_FILES.md` + +- **Standalone Intake Service** - Now embedded in Cortex +- **Old Relay Backup** - Replaced by current Relay +- **Persona Sidecar** - Built but unused (dynamic persona loading) + +--- + +## Advanced Topics + +### Custom Prompt Engineering + +Each stage uses carefully crafted prompts: + +**Reflection Prompt Example:** +```python +REFLECTION_PROMPT = """ +You are Lyra's reflective awareness layer. +Your job is to analyze the user's message and conversation context +to understand their true intent and needs. + +User message: {user_message} + +Recent context: +{intake_L10_summary} + +Long-term context: +{neomem_top_3_memories} + +Provide concise meta-awareness notes: +- What is the user's underlying intent? +- What topics/themes are emerging? +- What depth of response is appropriate? +- Are there any implicit questions or concerns? + +Keep notes brief (3-5 sentences). Focus on insight, not description. +""" +``` + +--- + +### Extending the Pipeline + +**Adding Stage 5 (Fact-Checking):** + +```python +# /cortex/reasoning/factcheck.py +async def factcheck_answer(answer: str, context: dict) -> dict: + """ + Stage 5: Verify factual claims in answer. + + Returns: + { + "verified": bool, + "flagged_claims": list, + "corrected_answer": str + } + """ + + prompt = f""" + Review this answer for factual accuracy: + + {answer} + + Flag any claims that seem dubious or need verification. + Provide corrected version if needed. + """ + + result = await call_llm(prompt, backend="CLOUD", temperature=0.1) + return parse_factcheck_result(result) + +# Update router.py to include Stage 5 +async def reason_endpoint(request): + # ... existing stages ... + + # Stage 5: Fact-checking + factcheck_result = await factcheck_answer(final_answer, context) + + if not factcheck_result["verified"]: + final_answer = factcheck_result["corrected_answer"] + + return {"answer": final_answer} +``` + +--- + +### Custom LLM Backend Integration + +**Adding Anthropic Claude:** + +```python +# /cortex/llm/llm_router.py + +BACKEND_CONFIGS = { + # ... existing backends ... + + "CLAUDE": { + "url": "https://api.anthropic.com/v1", + "provider": "anthropic", + "model": "claude-3-5-sonnet-20241022", + "api_key": os.getenv("ANTHROPIC_API_KEY") + } +} + +# Add provider-specific logic +elif backend_config["provider"] == "anthropic": + headers = { + "x-api-key": api_key, + "anthropic-version": "2023-06-01" + } + payload = { + "model": model, + "messages": [{"role": "user", "content": prompt}], + "max_tokens": max_tokens, + "temperature": temperature + } + response = await httpx_client.post( + f"{url}/messages", + json=payload, + headers=headers, + timeout=120 + ) + return response.json()["content"][0]["text"] +``` + +--- + +### Performance Optimization + +**Caching Strategies:** + +```python +# /cortex/utils/cache.py +from functools import lru_cache +import hashlib + +@lru_cache(maxsize=128) +def cache_llm_call(prompt_hash: str, backend: str): + """Cache LLM responses for identical prompts""" + # Note: Only cache deterministic calls (temperature=0) + pass + +# Usage in llm_router.py +async def call_llm(prompt, backend, temperature=0.7, max_tokens=512): + if temperature == 0: + prompt_hash = hashlib.md5(prompt.encode()).hexdigest() + cached = cache_llm_call(prompt_hash, backend) + if cached: + return cached + + # ... normal LLM call ... +``` + +**Database Query Optimization:** + +```python +# /neomem/neomem/database.py + +# BAD: Load all memories, then filter +def search_memories(query): + all_memories = db.execute("SELECT * FROM memories") + # Expensive in-memory filtering + return [m for m in all_memories if similarity(m, query) > 0.8] + +# GOOD: Use database indexes and LIMIT +def search_memories(query, limit=5): + query_embedding = embed(query) + return db.execute(""" + SELECT * FROM memories + WHERE embedding <-> %s < 0.2 -- pgvector cosine distance + ORDER BY embedding <-> %s + LIMIT %s + """, (query_embedding, query_embedding, limit)) +``` + +--- + +## Conclusion + +Project Lyra is a sophisticated, multi-layered AI companion system that addresses the fundamental limitation of chatbot amnesia through: + +1. **Dual-memory architecture** (short-term Intake + long-term NeoMem) +2. **Multi-stage reasoning pipeline** (Reflection β†’ Reasoning β†’ Refinement β†’ Persona) +3. **Flexible multi-backend LLM support** (cloud + local with fallback) +4. **Microservices design** for scalability and maintainability +5. **Modern web UI** with session management + +The system is production-ready with comprehensive error handling, logging, and health monitoring. + +--- + +## Quick Reference + +### Service Ports +- **UI:** 8081 (Browser interface) +- **Relay:** 7078 (Main orchestrator) +- **Cortex:** 7081 (Reasoning engine) +- **NeoMem:** 7077 (Long-term memory) +- **PostgreSQL:** 5432 (Vector storage) +- **Neo4j:** 7474 (Browser), 7687 (Bolt) + +### Key Files +- **Main Entry:** `/core/relay/server.js` +- **Reasoning Pipeline:** `/cortex/router.py` +- **LLM Router:** `/cortex/llm/llm_router.py` +- **Short-term Memory:** `/cortex/intake/intake.py` +- **Long-term Memory:** `/neomem/neomem/` +- **Personality:** `/cortex/persona/identity.py` + +### Important Commands +```bash +# Start system +docker-compose up -d + +# View logs +docker-compose logs -f cortex + +# Debug sessions +curl http://localhost:7081/debug/sessions + +# Health check +curl http://localhost:7078/_health + +# Search memories +curl "http://localhost:7077/search?query=deployment&limit=5" +``` + +--- + +**Document Version:** 1.0 +**Last Updated:** 2025-12-13 +**Maintained By:** Project Lyra Team diff --git a/docs/PROJECT_SUMMARY.md b/docs/PROJECT_SUMMARY.md new file mode 100644 index 0000000..7395e46 --- /dev/null +++ b/docs/PROJECT_SUMMARY.md @@ -0,0 +1,925 @@ +# Project Lyra β€” Comprehensive AI Context Summary + +**Version:** v0.5.1 (2025-12-11) +**Status:** Production-ready modular AI companion system +**Purpose:** Memory-backed conversational AI with multi-stage reasoning, persistent context, and modular LLM backend architecture + +--- + +## Executive Summary + +Project Lyra is a **self-hosted AI companion system** designed to overcome the limitations of typical chatbots by providing: +- **Persistent long-term memory** (NeoMem: PostgreSQL + Neo4j graph storage) +- **Multi-stage reasoning pipeline** (Cortex: reflection β†’ reasoning β†’ refinement β†’ persona) +- **Short-term context management** (Intake: session-based summarization embedded in Cortex) +- **Flexible LLM backend routing** (supports llama.cpp, Ollama, OpenAI, custom endpoints) +- **OpenAI-compatible API** (drop-in replacement for chat applications) + +**Core Philosophy:** Like a human brain has different regions for different functions, Lyra has specialized modules that work together. She's not just a chatbotβ€”she's a notepad, schedule, database, co-creator, and collaborator with her own executive function. + +--- + +## Quick Context for AI Assistants + +If you're an AI being given this project to work on, here's what you need to know: + +### What This Project Does +Lyra is a conversational AI system that **remembers everything** across sessions. When a user says something in passing, Lyra stores it, contextualizes it, and can recall it later. She can: +- Track project progress over time +- Remember user preferences and past conversations +- Reason through complex questions using multiple LLM calls +- Apply a consistent personality across all interactions +- Integrate with multiple LLM backends (local and cloud) + +### Current Architecture (v0.5.1) +``` +User β†’ Relay (Express/Node.js, port 7078) + ↓ +Cortex (FastAPI/Python, port 7081) + β”œβ”€ Intake module (embedded, in-memory SESSIONS) + β”œβ”€ 4-stage reasoning pipeline + └─ Multi-backend LLM router + ↓ +NeoMem (FastAPI/Python, port 7077) + β”œβ”€ PostgreSQL (vector storage) + └─ Neo4j (graph relationships) +``` + +### Key Files You'll Work With + +**Backend Services:** +- [cortex/router.py](cortex/router.py) - Main Cortex routing logic (306 lines, `/reason`, `/ingest` endpoints) +- [cortex/intake/intake.py](cortex/intake/intake.py) - Short-term memory module (367 lines, SESSIONS management) +- [cortex/reasoning/reasoning.py](cortex/reasoning/reasoning.py) - Draft answer generation +- [cortex/reasoning/refine.py](cortex/reasoning/refine.py) - Answer refinement +- [cortex/reasoning/reflection.py](cortex/reasoning/reflection.py) - Meta-awareness notes +- [cortex/persona/speak.py](cortex/persona/speak.py) - Personality layer +- [cortex/llm/llm_router.py](cortex/llm/llm_router.py) - LLM backend selector +- [core/relay/server.js](core/relay/server.js) - Main orchestrator (Node.js) +- [neomem/main.py](neomem/main.py) - Long-term memory API + +**Configuration:** +- [.env](.env) - Root environment variables (LLM backends, databases, API keys) +- [cortex/.env](cortex/.env) - Cortex-specific overrides +- [docker-compose.yml](docker-compose.yml) - Service definitions (152 lines) + +**Documentation:** +- [CHANGELOG.md](CHANGELOG.md) - Complete version history (836 lines, chronological format) +- [README.md](README.md) - User-facing documentation (610 lines) +- [PROJECT_SUMMARY.md](PROJECT_SUMMARY.md) - This file + +### Recent Critical Fixes (v0.5.1) +The most recent work fixed a critical bug where Intake's SESSIONS buffer wasn't persisting: +1. **Fixed**: `bg_summarize()` was only a TYPE_CHECKING stub β†’ implemented as logging stub +2. **Fixed**: `/ingest` endpoint had unreachable code β†’ removed early return, added lenient error handling +3. **Added**: `cortex/intake/__init__.py` β†’ proper Python package structure +4. **Added**: Diagnostic endpoints `/debug/sessions` and `/debug/summary` for troubleshooting + +**Key Insight**: Intake is no longer a standalone serviceβ€”it's embedded in Cortex as a Python module. SESSIONS must persist in a single Uvicorn worker (no multi-worker support without Redis). + +--- + +## Architecture Deep Dive + +### Service Topology (Docker Compose) + +**Active Containers:** +1. **relay** (Node.js/Express, port 7078) + - Entry point for all user requests + - OpenAI-compatible `/v1/chat/completions` endpoint + - Routes to Cortex for reasoning + - Async calls to Cortex `/ingest` after response + +2. **cortex** (Python/FastAPI, port 7081) + - Multi-stage reasoning pipeline + - Embedded Intake module (no HTTP, direct Python imports) + - Endpoints: `/reason`, `/ingest`, `/health`, `/debug/sessions`, `/debug/summary` + +3. **neomem-api** (Python/FastAPI, port 7077) + - Long-term memory storage + - Fork of Mem0 OSS (fully local, no external SDK) + - Endpoints: `/memories`, `/search`, `/health` + +4. **neomem-postgres** (PostgreSQL + pgvector, port 5432) + - Vector embeddings storage + - Memory history records + +5. **neomem-neo4j** (Neo4j, ports 7474/7687) + - Graph relationships between memories + - Entity extraction and linking + +**Disabled Services:** +- `intake` - No longer needed (embedded in Cortex as of v0.5.1) +- `rag` - Beta Lyrae RAG service (planned re-enablement) + +### External LLM Backends (HTTP APIs) + +**PRIMARY Backend** - llama.cpp @ `http://10.0.0.44:8080` +- AMD MI50 GPU-accelerated inference +- Model: `/model` (path-based routing) +- Used for: Reasoning, refinement, summarization + +**SECONDARY Backend** - Ollama @ `http://10.0.0.3:11434` +- RTX 3090 GPU-accelerated inference +- Model: `qwen2.5:7b-instruct-q4_K_M` +- Used for: Configurable per-module + +**CLOUD Backend** - OpenAI @ `https://api.openai.com/v1` +- Cloud-based inference +- Model: `gpt-4o-mini` +- Used for: Reflection, persona layers + +**FALLBACK Backend** - Local @ `http://10.0.0.41:11435` +- CPU-based inference +- Model: `llama-3.2-8b-instruct` +- Used for: Emergency fallback + +### Data Flow (Request Lifecycle) + +``` +1. User sends message β†’ Relay (/v1/chat/completions) + ↓ +2. Relay β†’ Cortex (/reason) + ↓ +3. Cortex calls Intake module (internal Python) + - Intake.summarize_context(session_id, exchanges) + - Returns L1/L5/L10/L20/L30 summaries + ↓ +4. Cortex 4-stage pipeline: + a. reflection.py β†’ Meta-awareness notes (CLOUD backend) + - "What is the user really asking?" + - Returns JSON: {"notes": [...]} + + b. reasoning.py β†’ Draft answer (PRIMARY backend) + - Uses context from Intake + - Integrates reflection notes + - Returns draft text + + c. refine.py β†’ Refined answer (PRIMARY backend) + - Polishes draft for clarity + - Ensures factual consistency + - Returns refined text + + d. speak.py β†’ Persona layer (CLOUD backend) + - Applies Lyra's personality + - Natural, conversational tone + - Returns final answer + ↓ +5. Cortex β†’ Relay (returns persona answer) + ↓ +6. Relay β†’ Cortex (/ingest) [async, non-blocking] + - Sends (session_id, user_msg, assistant_msg) + - Cortex calls add_exchange_internal() + - Appends to SESSIONS[session_id]["buffer"] + ↓ +7. Relay β†’ User (returns final response) + ↓ +8. [Planned] Relay β†’ NeoMem (/memories) [async] + - Store conversation in long-term memory +``` + +### Intake Module Architecture (v0.5.1) + +**Location:** `cortex/intake/` + +**Key Change:** Intake is now **embedded in Cortex** as a Python module, not a standalone service. + +**Import Pattern:** +```python +from intake.intake import add_exchange_internal, SESSIONS, summarize_context +``` + +**Core Data Structure:** +```python +SESSIONS: dict[str, dict] = {} + +# Structure: +SESSIONS[session_id] = { + "buffer": deque(maxlen=200), # Circular buffer of exchanges + "created_at": datetime +} + +# Each exchange in buffer: +{ + "session_id": "...", + "user_msg": "...", + "assistant_msg": "...", + "timestamp": "2025-12-11T..." +} +``` + +**Functions:** +1. **`add_exchange_internal(exchange: dict)`** + - Adds exchange to SESSIONS buffer + - Creates new session if needed + - Calls `bg_summarize()` stub + - Returns `{"ok": True, "session_id": "..."}` + +2. **`summarize_context(session_id: str, exchanges: list[dict])`** [async] + - Generates L1/L5/L10/L20/L30 summaries via LLM + - Called during `/reason` endpoint + - Returns multi-level summary dict + +3. **`bg_summarize(session_id: str)`** + - **Stub function** - logs only, no actual work + - Defers summarization to `/reason` call + - Exists to prevent NameError + +**Critical Constraint:** SESSIONS is a module-level global dict. This requires **single-worker Uvicorn** mode. Multi-worker deployments need Redis or shared storage. + +**Diagnostic Endpoints:** +- `GET /debug/sessions` - Inspect all SESSIONS (object ID, buffer sizes, recent exchanges) +- `GET /debug/summary?session_id=X` - Test summarization for a session + +--- + +## Environment Configuration + +### LLM Backend Registry (Multi-Backend Strategy) + +**Root `.env` defines all backend OPTIONS:** +```bash +# PRIMARY Backend (llama.cpp) +LLM_PRIMARY_PROVIDER=llama.cpp +LLM_PRIMARY_URL=http://10.0.0.44:8080 +LLM_PRIMARY_MODEL=/model + +# SECONDARY Backend (Ollama) +LLM_SECONDARY_PROVIDER=ollama +LLM_SECONDARY_URL=http://10.0.0.3:11434 +LLM_SECONDARY_MODEL=qwen2.5:7b-instruct-q4_K_M + +# CLOUD Backend (OpenAI) +LLM_OPENAI_PROVIDER=openai +LLM_OPENAI_URL=https://api.openai.com/v1 +LLM_OPENAI_MODEL=gpt-4o-mini +OPENAI_API_KEY=sk-proj-... + +# FALLBACK Backend +LLM_FALLBACK_PROVIDER=openai_completions +LLM_FALLBACK_URL=http://10.0.0.41:11435 +LLM_FALLBACK_MODEL=llama-3.2-8b-instruct +``` + +**Module-specific backend selection:** +```bash +CORTEX_LLM=SECONDARY # Cortex uses Ollama +INTAKE_LLM=PRIMARY # Intake uses llama.cpp +SPEAK_LLM=OPENAI # Persona uses OpenAI +NEOMEM_LLM=PRIMARY # NeoMem uses llama.cpp +UI_LLM=OPENAI # UI uses OpenAI +RELAY_LLM=PRIMARY # Relay uses llama.cpp +``` + +**Philosophy:** Root `.env` provides all backend OPTIONS. Each service chooses which backend to USE via `{MODULE}_LLM` variable. This eliminates URL duplication while preserving flexibility. + +### Database Configuration +```bash +# PostgreSQL (vector storage) +POSTGRES_USER=neomem +POSTGRES_PASSWORD=neomempass +POSTGRES_DB=neomem +POSTGRES_HOST=neomem-postgres +POSTGRES_PORT=5432 + +# Neo4j (graph storage) +NEO4J_URI=bolt://neomem-neo4j:7687 +NEO4J_USERNAME=neo4j +NEO4J_PASSWORD=neomemgraph +``` + +### Service URLs (Docker Internal Network) +```bash +NEOMEM_API=http://neomem-api:7077 +CORTEX_API=http://cortex:7081 +CORTEX_REASON_URL=http://cortex:7081/reason +CORTEX_INGEST_URL=http://cortex:7081/ingest +RELAY_URL=http://relay:7078 +``` + +### Feature Flags +```bash +CORTEX_ENABLED=true +MEMORY_ENABLED=true +PERSONA_ENABLED=false +DEBUG_PROMPT=true +VERBOSE_DEBUG=true +``` + +--- + +## Code Structure Overview + +### Cortex Service (`cortex/`) + +**Main Files:** +- `main.py` - FastAPI app initialization +- `router.py` - Route definitions (`/reason`, `/ingest`, `/health`, `/debug/*`) +- `context.py` - Context aggregation (Intake summaries, session state) + +**Reasoning Pipeline (`reasoning/`):** +- `reflection.py` - Meta-awareness notes (Cloud LLM) +- `reasoning.py` - Draft answer generation (Primary LLM) +- `refine.py` - Answer refinement (Primary LLM) + +**Persona Layer (`persona/`):** +- `speak.py` - Personality application (Cloud LLM) +- `identity.py` - Persona loader + +**Intake Module (`intake/`):** +- `__init__.py` - Package exports (SESSIONS, add_exchange_internal, summarize_context) +- `intake.py` - Core logic (367 lines) + - SESSIONS dictionary + - add_exchange_internal() + - summarize_context() + - bg_summarize() stub + +**LLM Integration (`llm/`):** +- `llm_router.py` - Backend selector and HTTP client + - call_llm() function + - Environment-based routing + - Payload formatting per backend type + +**Utilities (`utils/`):** +- Helper functions for common operations + +**Configuration:** +- `Dockerfile` - Single-worker constraint documented +- `requirements.txt` - Python dependencies +- `.env` - Service-specific overrides + +### Relay Service (`core/relay/`) + +**Main Files:** +- `server.js` - Express.js server (Node.js) + - `/v1/chat/completions` - OpenAI-compatible endpoint + - `/chat` - Internal endpoint + - `/_health` - Health check +- `package.json` - Node.js dependencies + +**Key Logic:** +- Receives user messages +- Routes to Cortex `/reason` +- Async calls to Cortex `/ingest` after response +- Returns final answer to user + +### NeoMem Service (`neomem/`) + +**Main Files:** +- `main.py` - FastAPI app (memory API) +- `memory.py` - Memory management logic +- `embedder.py` - Embedding generation +- `graph.py` - Neo4j graph operations +- `Dockerfile` - Container definition +- `requirements.txt` - Python dependencies + +**API Endpoints:** +- `POST /memories` - Add new memory +- `POST /search` - Semantic search +- `GET /health` - Service health + +--- + +## Common Development Tasks + +### Adding a New Endpoint to Cortex + +**Example: Add `/debug/buffer` endpoint** + +1. **Edit `cortex/router.py`:** +```python +@cortex_router.get("/debug/buffer") +async def debug_buffer(session_id: str, limit: int = 10): + """Return last N exchanges from a session buffer.""" + from intake.intake import SESSIONS + + session = SESSIONS.get(session_id) + if not session: + return {"error": "session not found", "session_id": session_id} + + buffer = session["buffer"] + recent = list(buffer)[-limit:] + + return { + "session_id": session_id, + "total_exchanges": len(buffer), + "recent_exchanges": recent + } +``` + +2. **Restart Cortex:** +```bash +docker-compose restart cortex +``` + +3. **Test:** +```bash +curl "http://localhost:7081/debug/buffer?session_id=test&limit=5" +``` + +### Modifying LLM Backend for a Module + +**Example: Switch Cortex to use PRIMARY backend** + +1. **Edit `.env`:** +```bash +CORTEX_LLM=PRIMARY # Change from SECONDARY to PRIMARY +``` + +2. **Restart Cortex:** +```bash +docker-compose restart cortex +``` + +3. **Verify in logs:** +```bash +docker logs cortex | grep "Backend" +``` + +### Adding Diagnostic Logging + +**Example: Log every exchange addition** + +1. **Edit `cortex/intake/intake.py`:** +```python +def add_exchange_internal(exchange: dict): + session_id = exchange.get("session_id") + + # Add detailed logging + print(f"[DEBUG] Adding exchange to {session_id}") + print(f"[DEBUG] User msg: {exchange.get('user_msg', '')[:100]}") + print(f"[DEBUG] Assistant msg: {exchange.get('assistant_msg', '')[:100]}") + + # ... rest of function +``` + +2. **View logs:** +```bash +docker logs cortex -f | grep DEBUG +``` + +--- + +## Debugging Guide + +### Problem: SESSIONS Not Persisting + +**Symptoms:** +- `/debug/sessions` shows empty or only 1 exchange +- Summaries always return empty +- Buffer size doesn't increase + +**Diagnosis Steps:** +1. Check Cortex logs for SESSIONS object ID: + ```bash + docker logs cortex | grep "SESSIONS object id" + ``` + - Should show same ID across all calls + - If IDs differ β†’ module reloading issue + +2. Verify single-worker mode: + ```bash + docker exec cortex cat Dockerfile | grep uvicorn + ``` + - Should NOT have `--workers` flag or `--workers 1` + +3. Check `/debug/sessions` endpoint: + ```bash + curl http://localhost:7081/debug/sessions | jq + ``` + - Should show sessions_object_id and current sessions + +4. Inspect `__init__.py` exists: + ```bash + docker exec cortex ls -la intake/__init__.py + ``` + +**Solution (Fixed in v0.5.1):** +- Ensure `cortex/intake/__init__.py` exists with proper exports +- Verify `bg_summarize()` is implemented (not just TYPE_CHECKING stub) +- Check `/ingest` endpoint doesn't have early return +- Rebuild Cortex container: `docker-compose build cortex && docker-compose restart cortex` + +### Problem: LLM Backend Timeout + +**Symptoms:** +- Cortex `/reason` hangs +- 504 Gateway Timeout errors +- Logs show "waiting for LLM response" + +**Diagnosis Steps:** +1. Test backend directly: + ```bash + # llama.cpp + curl http://10.0.0.44:8080/health + + # Ollama + curl http://10.0.0.3:11434/api/tags + + # OpenAI + curl https://api.openai.com/v1/models \ + -H "Authorization: Bearer $OPENAI_API_KEY" + ``` + +2. Check network connectivity: + ```bash + docker exec cortex ping -c 3 10.0.0.44 + ``` + +3. Review Cortex logs: + ```bash + docker logs cortex -f | grep "LLM" + ``` + +**Solutions:** +- Verify backend URL in `.env` is correct and accessible +- Check firewall rules for backend ports +- Increase timeout in `cortex/llm/llm_router.py` +- Switch to different backend temporarily: `CORTEX_LLM=CLOUD` + +### Problem: Docker Compose Won't Start + +**Symptoms:** +- `docker-compose up -d` fails +- Container exits immediately +- "port already in use" errors + +**Diagnosis Steps:** +1. Check port conflicts: + ```bash + netstat -tulpn | grep -E '7078|7081|7077|5432' + ``` + +2. Check container logs: + ```bash + docker-compose logs --tail=50 + ``` + +3. Verify environment file: + ```bash + cat .env | grep -v "^#" | grep -v "^$" + ``` + +**Solutions:** +- Stop conflicting services: `docker-compose down` +- Check `.env` syntax (no quotes unless necessary) +- Rebuild containers: `docker-compose build --no-cache` +- Check Docker daemon: `systemctl status docker` + +--- + +## Testing Checklist + +### After Making Changes to Cortex + +**1. Build and restart:** +```bash +docker-compose build cortex +docker-compose restart cortex +``` + +**2. Verify service health:** +```bash +curl http://localhost:7081/health +``` + +**3. Test /ingest endpoint:** +```bash +curl -X POST http://localhost:7081/ingest \ + -H "Content-Type: application/json" \ + -d '{ + "session_id": "test", + "user_msg": "Hello", + "assistant_msg": "Hi there!" + }' +``` + +**4. Verify SESSIONS updated:** +```bash +curl http://localhost:7081/debug/sessions | jq '.sessions.test.buffer_size' +``` +- Should show 1 (or increment if already populated) + +**5. Test summarization:** +```bash +curl "http://localhost:7081/debug/summary?session_id=test" | jq '.summary' +``` +- Should return L1/L5/L10/L20/L30 summaries + +**6. Test full pipeline:** +```bash +curl -X POST http://localhost:7078/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "messages": [{"role": "user", "content": "Test message"}], + "session_id": "test" + }' | jq '.choices[0].message.content' +``` + +**7. Check logs for errors:** +```bash +docker logs cortex --tail=50 +``` + +--- + +## Project History & Context + +### Evolution Timeline + +**v0.1.x (2025-09-23 to 2025-09-25)** +- Initial MVP: Relay + Mem0 + Ollama +- Basic memory storage and retrieval +- Simple UI with session support + +**v0.2.x (2025-09-24 to 2025-09-30)** +- Migrated to mem0ai SDK +- Added sessionId support +- Created standalone Lyra-Mem0 stack + +**v0.3.x (2025-09-26 to 2025-10-28)** +- Forked Mem0 β†’ NVGRAM β†’ NeoMem +- Added salience filtering +- Integrated Cortex reasoning VM +- Built RAG system (Beta Lyrae) +- Established multi-backend LLM support + +**v0.4.x (2025-11-05 to 2025-11-13)** +- Major architectural rewire +- Implemented 4-stage reasoning pipeline +- Added reflection, refinement stages +- RAG integration +- LLM router with per-stage backend selection + +**Infrastructure v1.0.0 (2025-11-26)** +- Consolidated 9 `.env` files into single source of truth +- Multi-backend LLM strategy +- Docker Compose consolidation +- Created security templates + +**v0.5.0 (2025-11-28)** +- Fixed all critical API wiring issues +- Added OpenAI-compatible Relay endpoint +- Fixed Cortex β†’ Intake integration +- End-to-end flow verification + +**v0.5.1 (2025-12-11) - CURRENT** +- **Critical fix**: SESSIONS persistence bug +- Implemented `bg_summarize()` stub +- Fixed `/ingest` unreachable code +- Added `cortex/intake/__init__.py` +- Embedded Intake in Cortex (no longer standalone) +- Added diagnostic endpoints +- Lenient error handling +- Documented single-worker constraint + +### Architectural Philosophy + +**Modular Design:** +- Each service has a single, clear responsibility +- Services communicate via well-defined HTTP APIs +- Configuration is centralized but allows per-service overrides + +**Local-First:** +- No reliance on external services (except optional OpenAI) +- All data stored locally (PostgreSQL + Neo4j) +- Can run entirely air-gapped with local LLMs + +**Flexible LLM Backend:** +- Not tied to any single LLM provider +- Can mix local and cloud models +- Per-stage backend selection for optimal performance/cost + +**Error Handling:** +- Lenient mode: Never fail the chat pipeline +- Log errors but continue processing +- Graceful degradation + +**Observability:** +- Diagnostic endpoints for debugging +- Verbose logging mode +- Object ID tracking for singleton verification + +--- + +## Known Issues & Limitations + +### Fixed in v0.5.1 +- βœ… Intake SESSIONS not persisting β†’ **FIXED** +- βœ… `bg_summarize()` NameError β†’ **FIXED** +- βœ… `/ingest` endpoint unreachable code β†’ **FIXED** + +### Current Limitations + +**1. Single-Worker Constraint** +- Cortex must run with single Uvicorn worker +- SESSIONS is in-memory module-level global +- Multi-worker support requires Redis or shared storage +- Documented in `cortex/Dockerfile` lines 7-8 + +**2. NeoMem Integration Incomplete** +- Relay doesn't yet push to NeoMem after responses +- Memory storage planned for v0.5.2 +- Currently all memory is short-term (SESSIONS only) + +**3. RAG Service Disabled** +- Beta Lyrae (RAG) commented out in docker-compose.yml +- Awaiting re-enablement after Intake stabilization +- Code exists but not currently integrated + +**4. Session Management** +- No session cleanup/expiration +- SESSIONS grows unbounded (maxlen=200 per session, but infinite sessions) +- No session list endpoint in Relay + +**5. Persona Integration** +- `PERSONA_ENABLED=false` in `.env` +- Persona Sidecar not fully wired +- Identity loaded but not consistently applied + +### Future Enhancements + +**Short-term (v0.5.2):** +- Enable NeoMem integration in Relay +- Add session cleanup/expiration +- Session list endpoint +- NeoMem health monitoring + +**Medium-term (v0.6.x):** +- Re-enable RAG service +- Migrate SESSIONS to Redis for multi-worker support +- Add request correlation IDs +- Comprehensive health checks + +**Long-term (v0.7.x+):** +- Persona Sidecar full integration +- Autonomous "dream" cycles (self-reflection) +- Verifier module for factual grounding +- Advanced RAG with hybrid search +- Memory consolidation strategies + +--- + +## Troubleshooting Quick Reference + +| Problem | Quick Check | Solution | +|---------|-------------|----------| +| SESSIONS empty | `curl localhost:7081/debug/sessions` | Rebuild Cortex, verify `__init__.py` exists | +| LLM timeout | `curl http://10.0.0.44:8080/health` | Check backend connectivity, increase timeout | +| Port conflict | `netstat -tulpn \| grep 7078` | Stop conflicting service or change port | +| Container crash | `docker logs cortex` | Check logs for Python errors, verify .env syntax | +| Missing package | `docker exec cortex pip list` | Rebuild container, check requirements.txt | +| 502 from Relay | `curl localhost:7081/health` | Verify Cortex is running, check docker network | + +--- + +## API Reference (Quick) + +### Relay (Port 7078) + +**POST /v1/chat/completions** - OpenAI-compatible chat +```json +{ + "messages": [{"role": "user", "content": "..."}], + "session_id": "..." +} +``` + +**GET /_health** - Service health + +### Cortex (Port 7081) + +**POST /reason** - Main reasoning pipeline +```json +{ + "session_id": "...", + "user_prompt": "...", + "temperature": 0.7 // optional +} +``` + +**POST /ingest** - Add exchange to SESSIONS +```json +{ + "session_id": "...", + "user_msg": "...", + "assistant_msg": "..." +} +``` + +**GET /debug/sessions** - Inspect SESSIONS state + +**GET /debug/summary?session_id=X** - Test summarization + +**GET /health** - Service health + +### NeoMem (Port 7077) + +**POST /memories** - Add memory +```json +{ + "messages": [{"role": "...", "content": "..."}], + "user_id": "...", + "metadata": {} +} +``` + +**POST /search** - Semantic search +```json +{ + "query": "...", + "user_id": "...", + "limit": 10 +} +``` + +**GET /health** - Service health + +--- + +## File Manifest (Key Files Only) + +``` +project-lyra/ +β”œβ”€β”€ .env # Root environment variables +β”œβ”€β”€ docker-compose.yml # Service definitions (152 lines) +β”œβ”€β”€ CHANGELOG.md # Version history (836 lines) +β”œβ”€β”€ README.md # User documentation (610 lines) +β”œβ”€β”€ PROJECT_SUMMARY.md # This file (AI context) +β”‚ +β”œβ”€β”€ cortex/ # Reasoning engine +β”‚ β”œβ”€β”€ Dockerfile # Single-worker constraint documented +β”‚ β”œβ”€β”€ requirements.txt +β”‚ β”œβ”€β”€ .env # Cortex overrides +β”‚ β”œβ”€β”€ main.py # FastAPI initialization +β”‚ β”œβ”€β”€ router.py # Routes (306 lines) +β”‚ β”œβ”€β”€ context.py # Context aggregation +β”‚ β”‚ +β”‚ β”œβ”€β”€ intake/ # Short-term memory (embedded) +β”‚ β”‚ β”œβ”€β”€ __init__.py # Package exports +β”‚ β”‚ └── intake.py # Core logic (367 lines) +β”‚ β”‚ +β”‚ β”œβ”€β”€ reasoning/ # Reasoning pipeline +β”‚ β”‚ β”œβ”€β”€ reflection.py # Meta-awareness +β”‚ β”‚ β”œβ”€β”€ reasoning.py # Draft generation +β”‚ β”‚ └── refine.py # Refinement +β”‚ β”‚ +β”‚ β”œβ”€β”€ persona/ # Personality layer +β”‚ β”‚ β”œβ”€β”€ speak.py # Persona application +β”‚ β”‚ └── identity.py # Persona loader +β”‚ β”‚ +β”‚ └── llm/ # LLM integration +β”‚ └── llm_router.py # Backend selector +β”‚ +β”œβ”€β”€ core/relay/ # Orchestrator +β”‚ β”œβ”€β”€ server.js # Express server (Node.js) +β”‚ └── package.json +β”‚ +β”œβ”€β”€ neomem/ # Long-term memory +β”‚ β”œβ”€β”€ Dockerfile +β”‚ β”œβ”€β”€ requirements.txt +β”‚ β”œβ”€β”€ .env # NeoMem overrides +β”‚ └── main.py # Memory API +β”‚ +└── rag/ # RAG system (disabled) + β”œβ”€β”€ rag_api.py + β”œβ”€β”€ rag_chat_import.py + └── chromadb/ +``` + +--- + +## Final Notes for AI Assistants + +### What You Should Know Before Making Changes + +1. **SESSIONS is sacred** - It's a module-level global in `cortex/intake/intake.py`. Don't move it, don't duplicate it, don't make it a class attribute. It must remain a singleton. + +2. **Single-worker is mandatory** - Until SESSIONS is migrated to Redis, Cortex MUST run with a single Uvicorn worker. Multi-worker will cause SESSIONS to be inconsistent. + +3. **Lenient error handling** - The `/ingest` endpoint and other parts of the pipeline use lenient error handling: log errors but always return success. Never fail the chat pipeline. + +4. **Backend routing is environment-driven** - Don't hardcode LLM URLs. Use the `{MODULE}_LLM` environment variables and the llm_router.py system. + +5. **Intake is embedded** - Don't try to make HTTP calls to Intake. Use direct Python imports: `from intake.intake import ...` + +6. **Test with diagnostic endpoints** - Always use `/debug/sessions` and `/debug/summary` to verify SESSIONS behavior after changes. + +7. **Follow the changelog format** - When documenting changes, use the chronological format established in CHANGELOG.md v0.5.1. Group by version, then by change type (Fixed, Added, Changed, etc.). + +### When You Need Help + +- **SESSIONS issues**: Check `cortex/intake/intake.py` lines 11-14 for initialization, lines 325-366 for `add_exchange_internal()` +- **Routing issues**: Check `cortex/router.py` lines 65-189 for `/reason`, lines 201-233 for `/ingest` +- **LLM backend issues**: Check `cortex/llm/llm_router.py` for backend selection logic +- **Environment variables**: Check `.env` lines 13-40 for LLM backends, lines 28-34 for module selection + +### Most Important Thing + +**This project values reliability over features.** It's better to have a simple, working system than a complex, broken one. When in doubt, keep it simple, log everything, and never fail silently. + +--- + +**End of AI Context Summary** + +*This document is maintained to provide complete context for AI assistants working on Project Lyra. Last updated: v0.5.1 (2025-12-11)* diff --git a/docs/TRILLIUM_API.md b/docs/TRILLIUM_API.md new file mode 100644 index 0000000..c7ecb2e --- /dev/null +++ b/docs/TRILLIUM_API.md @@ -0,0 +1,730 @@ +# TriliumNext ETAPI Complete API Reference + +## Overview + +ETAPI is TriliumNext's public/external REST API available since Trilium v0.50. + +**Base URLs:** +- `http://localhost:37740/etapi` +- `http://localhost:8080/etapi` + +**API Version:** 1.0.0 +**License:** Apache 2.0 + +## Authentication + +All operations require authentication using one of these methods: + +### 1. ETAPI Token Authentication (Recommended) +```http +GET /etapi/app-info +Authorization: +``` + +OR (since v0.93.0): +```http +GET /etapi/app-info +Authorization: Bearer +``` + +### 2. Basic Authentication (since v0.56) +```http +GET /etapi/app-info +Authorization: Basic +``` +**Note:** Password must be the ETAPI token (NOT your Trilium password). + +### 3. Get Token via API +```http +POST /etapi/auth/login +Content-Type: application/json + +{ + "password": "your_trilium_password" +} +``` + +**Response:** +```json +{ + "authToken": "Bc4bFn0Ffiok_4NpbVCDnFz7B2WU+pdhW8B5Ne3DiR5wXrEyqdjgRIsk=" +} +``` + +--- + +## Complete API Endpoints + +### Authentication + +#### Login +- **POST** `/auth/login` +- **Description:** Get an ETAPI token based on password +- **Security:** None (public endpoint) +- **Request Body:** + ```json + { + "password": "string" + } + ``` +- **Responses:** + - `201`: Auth token created + - `429`: Client IP blacklisted (too many failed attempts) + +--- + +### Application Information + +#### Get App Info +- **GET** `/app-info` +- **Description:** Get application information +- **Response:** + ```json + { + "appVersion": "0.91.0", + "dbVersion": 231, + "syncVersion": 25, + "buildDate": "2022-02-09T22:52:36+01:00", + "buildRevision": "23daaa2387a0655685377f0a541d154aeec2aae8", + "dataDirectory": "/home/user/data", + "clipperProtocolVersion": "1.0", + "utcDateTime": "2022-03-07T21:54:25.277Z" + } + ``` + +#### Get Metrics +- **GET** `/etapi/metrics` +- **Description:** Get Prometheus-format metrics for monitoring +- **Query Parameters:** + - `format`: `json` or `prometheus` (default: prometheus) +- **Response:** Metrics data including note counts, db stats, etc. + +--- + +### Notes Management + +#### Create Note +- **POST** `/create-note` +- **Description:** Create a note and place it into the note tree +- **Request Body:** + ```json + { + "parentNoteId": "root", + "title": "My Note", + "type": "text", + "mime": "text/html", + "content": "

Hello World

", + "notePosition": 10, + "prefix": "", + "isExpanded": false, + "noteId": "customId123", + "branchId": "customBranchId", + "utcDateCreated": "2021-12-31 19:18:11.930Z", + "utcDateModified": "2021-12-31 19:18:11.930Z" + } + ``` +- **Required Fields:** `parentNoteId`, `title`, `type`, `content` +- **Optional Fields:** `notePosition`, `prefix`, `isExpanded`, `noteId`, `branchId`, timestamps +- **Note Types:** + - `text` - Rich text notes + - `code` - Code notes (requires `mime`) + - `file` - File attachments (requires `mime`) + - `image` - Image notes (requires `mime`) + - `search` - Saved search + - `book` - Book/container note + - `relationMap` - Relation map + - `render` - Render note + - `noteMap` - Note map + - `mermaid` - Mermaid diagrams + - `webView` - Web view + - `shortcut` - Shortcut + - `doc` - Document + - `contentWidget` - Content widget + - `launcher` - Launcher + - `canvas` - Canvas note +- **Response:** `201` with `NoteWithBranch` object + +#### Search Notes +- **GET** `/notes` +- **Description:** Search notes using query syntax +- **Query Parameters:** + - `search` (required): Search query string + - `ancestorNoteId`: Search in subtree only + - `fastSearch`: Boolean for fast search mode + - `includeArchivedNotes`: Include archived notes (default: false) + - `orderBy`: Field to order by (e.g., `title`, `dateModified`) + - `orderDirection`: `asc` or `desc` + - `limit`: Maximum results (default: 10) + - `debug`: Enable debug info +- **Response:** Array of note objects + +#### Get Note +- **GET** `/notes/{noteId}` +- **Description:** Get note metadata by ID +- **Path Parameters:** + - `noteId`: Note ID +- **Response:** Note object with metadata + +#### Get Note Content +- **GET** `/notes/{noteId}/content` +- **Description:** Get note content (HTML/text for text notes, binary for files/images) +- **Path Parameters:** + - `noteId`: Note ID +- **Response:** Note content (content-type varies by note type) + +#### Update Note Content +- **PUT** `/notes/{noteId}/content` +- **Description:** Update note content +- **Path Parameters:** + - `noteId`: Note ID +- **Request Body:** Raw content (HTML for text notes, binary for files) +- **Response:** `204` No Content + +#### Update Note Metadata +- **PATCH** `/notes/{noteId}` +- **Description:** Update note metadata (title, type, mime, etc.) +- **Path Parameters:** + - `noteId`: Note ID +- **Request Body:** + ```json + { + "title": "Updated Title", + "type": "text", + "mime": "text/html" + } + ``` +- **Response:** `200` with updated note object + +#### Delete Note +- **DELETE** `/notes/{noteId}` +- **Description:** Delete note and all its branches +- **Path Parameters:** + - `noteId`: Note ID +- **Response:** `204` No Content +- **Note:** Deletes all clones/branches of the note + +#### Export Note +- **GET** `/notes/{noteId}/export` +- **Description:** Export note as ZIP file (with optional subtree) +- **Path Parameters:** + - `noteId`: Note ID (use "root" to export entire tree) +- **Query Parameters:** + - `format`: `html` or `markdown`/`md` +- **Response:** ZIP file download + +--- + +### Branches Management + +Branches represent note clones/placements in the tree. A single note can exist in multiple locations via different branches. + +#### Create Branch +- **POST** `/branches` +- **Description:** Create a branch (clone a note to another location) +- **Request Body:** + ```json + { + "noteId": "existingNoteId", + "parentNoteId": "targetParentId", + "prefix": "Branch Prefix", + "notePosition": 10, + "isExpanded": false, + "branchId": "customBranchId" + } + ``` +- **Required Fields:** `noteId`, `parentNoteId` +- **Response:** `201` with Branch object + +#### Get Branch +- **GET** `/branches/{branchId}` +- **Description:** Get branch by ID +- **Path Parameters:** + - `branchId`: Branch ID +- **Response:** Branch object + +#### Update Branch +- **PATCH** `/branches/{branchId}` +- **Description:** Update branch (prefix, notePosition) +- **Path Parameters:** + - `branchId`: Branch ID +- **Request Body:** + ```json + { + "prefix": "New Prefix", + "notePosition": 20, + "isExpanded": true + } + ``` +- **Response:** `200` with updated branch +- **Note:** Only `prefix`, `notePosition`, and `isExpanded` can be updated. For other properties, delete and recreate. + +#### Set Branch Prefix +- **PATCH** `/branches/{branchId}/set-prefix` +- **Description:** Set branch prefix +- **Path Parameters:** + - `branchId`: Branch ID +- **Request Body:** + ```json + { + "prefix": "New Prefix" + } + ``` + +#### Move Branch to Parent +- **POST** `/branches/{branchId}/set-note-to-parent` +- **Description:** Move branch to a different parent +- **Path Parameters:** + - `branchId`: Branch ID +- **Request Body:** + ```json + { + "parentNoteId": "newParentId" + } + ``` + +#### Delete Branch +- **DELETE** `/branches/{branchId}` +- **Description:** Delete branch (removes note from this tree location) +- **Path Parameters:** + - `branchId`: Branch ID +- **Response:** `204` No Content +- **Note:** If this is the last branch of the note, the note itself is deleted + +#### Refresh Note Ordering +- **PATCH** `/refresh-note-ordering/{parentNoteId}` +- **Description:** Push notePosition changes to connected clients +- **Path Parameters:** + - `parentNoteId`: Parent note ID +- **Note:** Call this after updating branch notePositions to sync changes to clients + +--- + +### Attributes Management + +Attributes include labels (key-value metadata) and relations (links between notes). + +#### Create Attribute +- **POST** `/attributes` +- **Description:** Create an attribute +- **Request Body:** + ```json + { + "noteId": "targetNoteId", + "type": "label", + "name": "priority", + "value": "high", + "position": 10, + "isInheritable": false, + "attributeId": "customAttributeId" + } + ``` +- **Attribute Types:** + - `label`: Key-value metadata + - `relation`: Link to another note (value is target noteId) +- **Required Fields:** `noteId`, `type`, `name` +- **Optional Fields:** `value`, `position`, `isInheritable`, `attributeId` +- **Response:** `201` with Attribute object + +#### Create Attribute for Note +- **POST** `/notes/{noteId}/attributes` +- **Description:** Create attribute for specific note +- **Path Parameters:** + - `noteId`: Note ID +- **Request Body:** Same as Create Attribute (noteId not required) + +#### Get Attribute +- **GET** `/attributes/{attributeId}` +- **Description:** Get attribute by ID +- **Path Parameters:** + - `attributeId`: Attribute ID +- **Response:** Attribute object + +#### Get Note Attributes +- **GET** `/notes/{noteId}/attributes` +- **Description:** Get all attributes for a note +- **Path Parameters:** + - `noteId`: Note ID +- **Response:** Array of attribute objects + +#### Update Attribute +- **PATCH** `/attributes/{attributeId}` +- **Description:** Update attribute (name, value, position) +- **Path Parameters:** + - `attributeId`: Attribute ID +- **Request Body:** + ```json + { + "name": "newName", + "value": "newValue", + "position": 20, + "isInheritable": true + } + ``` +- **Response:** `200` with updated attribute + +#### Delete Attribute +- **DELETE** `/attributes/{attributeId}` +- **Description:** Delete attribute +- **Path Parameters:** + - `attributeId`: Attribute ID +- **Response:** `204` No Content + +--- + +### Attachments Management + +#### Create Attachment +- **POST** `/attachments` +- **Description:** Create attachment for a note +- **Request Body:** Multipart form data with file + ```json + { + "ownerId": "noteId", + "role": "image", + "mime": "image/png", + "title": "Screenshot", + "position": 10, + "attachmentId": "customAttachmentId" + } + ``` +- **Required Fields:** `ownerId`, file data +- **Optional Fields:** `role`, `mime`, `title`, `position`, `attachmentId` +- **Response:** `201` with Attachment object + +#### Create Attachment for Note +- **POST** `/notes/{noteId}/attachments` +- **Description:** Create attachment (alternative endpoint) +- **Path Parameters:** + - `noteId`: Note ID +- **Request Body:** Same as Create Attachment (ownerId not required) + +#### Get Attachment +- **GET** `/attachments/{attachmentId}` +- **Description:** Get attachment metadata +- **Path Parameters:** + - `attachmentId`: Attachment ID +- **Response:** Attachment object + +#### Get Attachment Content +- **GET** `/attachments/{attachmentId}/content` +- **Description:** Get attachment binary content +- **Path Parameters:** + - `attachmentId`: Attachment ID +- **Response:** Binary content with appropriate MIME type + +#### Get Note Attachments +- **GET** `/notes/{noteId}/attachments` +- **Description:** Get all attachments for a note +- **Path Parameters:** + - `noteId`: Note ID +- **Response:** Array of attachment objects + +#### Update Attachment Content +- **PUT** `/attachments/{attachmentId}/content` +- **Description:** Update attachment binary content +- **Path Parameters:** + - `attachmentId`: Attachment ID +- **Request Body:** Binary file data +- **Response:** `204` No Content + +#### Update Attachment Metadata +- **PATCH** `/attachments/{attachmentId}` +- **Description:** Update attachment metadata +- **Path Parameters:** + - `attachmentId`: Attachment ID +- **Request Body:** + ```json + { + "title": "New Title", + "role": "image", + "mime": "image/jpeg", + "position": 20 + } + ``` +- **Response:** `200` with updated attachment + +#### Delete Attachment +- **DELETE** `/attachments/{attachmentId}` +- **Description:** Delete attachment +- **Path Parameters:** + - `attachmentId`: Attachment ID +- **Response:** `204` No Content + +--- + +### Special Purpose Endpoints + +#### Get Inbox Note +- **GET** `/inbox/{date}` +- **Description:** Get or create inbox note for specific date +- **Path Parameters:** + - `date`: Date in format `YYYY-MM-DD` +- **Response:** Note object +- **Behavior:** + - Returns fixed inbox note (marked with `#inbox` label) if configured + - Otherwise returns/creates day note in journal for the specified date + +#### Get Day Note +- **GET** `/calendar/days/{date}` +- **Description:** Get or create day note +- **Path Parameters:** + - `date`: Date in format `YYYY-MM-DD` (e.g., `2022-12-31`) +- **Response:** Note object +- **Note:** Creates note if it doesn't exist + +#### Get Month Note +- **GET** `/calendar/months/{month}` +- **Description:** Get or create month note +- **Path Parameters:** + - `month`: Month in format `YYYY-MM` (e.g., `2022-12`) +- **Response:** Note object +- **Note:** Creates note if it doesn't exist + +#### Get Year Note +- **GET** `/calendar/years/{year}` +- **Description:** Get or create year note +- **Path Parameters:** + - `year`: Year in format `YYYY` (e.g., `2022`) +- **Response:** Note object +- **Note:** Creates note if it doesn't exist + +--- + +### Backup + +#### Create Backup +- **PUT** `/backup/{backupName}` +- **Description:** Create a database backup +- **Path Parameters:** + - `backupName`: Backup filename (without extension) +- **Example:** `PUT /backup/now` creates `backup-now.db` +- **Response:** `204` No Content + +--- + +## Data Types and Schemas + +### Common Field Types + +- **EntityId**: 12-character alphanumeric string (e.g., `evnnmvHTCgIn`) +- **LocalDateTime**: `YYYY-MM-DD HH:mm:ss.SSSΒ±ZZZZ` (e.g., `2021-12-31 20:18:11.930+0100`) +- **UtcDateTime**: `YYYY-MM-DD HH:mm:ss.SSSZ` (e.g., `2021-12-31 19:18:11.930Z`) + +### Note Position + +- Normal ordering: 10, 20, 30, 40... +- First position: use value < 10 (e.g., 5) +- Last position: use large value (e.g., 1000000) +- Between existing: use value between their positions + +### Branch Prefix + +Branch-specific title prefix displayed in the tree. Useful when same note appears in multiple locations with slightly different context. + +--- + +## Error Responses + +All endpoints may return these error responses: + +### Standard Error Object +```json +{ + "status": 400, + "code": "NOTE_IS_PROTECTED", + "message": "Note 'evnnmvHTCgIn' is protected and cannot be modified through ETAPI" +} +``` + +### Common HTTP Status Codes + +- `200`: Success +- `201`: Resource created +- `204`: Success (no content) +- `400`: Bad request (validation error) +- `401`: Unauthorized (invalid token) +- `404`: Not found +- `429`: Too many requests (rate limited/blacklisted) +- `500`: Internal server error + +### Common Error Codes + +- `NOTE_IS_PROTECTED`: Protected note cannot be modified +- `INVALID_TOKEN`: Invalid or expired ETAPI token +- `VALIDATION_ERROR`: Request validation failed +- `NOT_FOUND`: Resource not found +- `RATE_LIMITED`: Too many requests + +--- + +## Search Query Syntax + +The `/notes` search endpoint supports Trilium's query language: + +### Basic Search +``` +python # Search in title and content +#todo # Find notes with label "todo" +~project # Find notes with relation "project" +``` + +### Advanced Operators +``` +note.title =* "meeting" # Title contains "meeting" +note.title %= ".*2022.*" # Regex in title +#priority = "high" # Label with specific value +~template = "someNoteId" # Relation to specific note +#created >= MONTH-1 # Created in last month +note.dateModified >= "2022-01-01" # Modified after date +``` + +### Combining Queries +``` +#todo AND #urgent # Both labels +#work OR #personal # Either label +#project AND note.title =* "Q1" # Label AND title condition +``` + +### Hierarchical Queries +``` +note.parents.title = "Work" # Direct parent title +note.ancestors.title = "Archive" # Any ancestor title +note.children.title =* "Chapter" # Direct children +``` + +See Trilium Search Documentation for complete syntax. + +--- + +## Rate Limiting + +- Failed authentication attempts can result in IP blacklist +- Blacklisted IPs receive `429` response +- Wait period required before retry +- Use valid tokens to avoid blacklisting + +--- + +## Configuration Notes + +### Upload Size Limits + +- Default: 250MB +- Disable limit: Set `TRILIUM_NO_UPLOAD_LIMIT=true` +- Custom limit: Set `MAX_ALLOWED_FILE_SIZE_MB=` + +### Network Configuration + +ETAPI accessible through: +- Local interface: `http://localhost:8080/etapi` +- Network interface: Configure reverse proxy (nginx/Apache) +- SSL/TLS: Recommended for production use + +--- + +## Best Practices + +1. **Always use ETAPI tokens** (not passwords) for authentication +2. **Store tokens securely** - they provide full access to your Trilium instance +3. **Use notePosition strategically** - leave gaps (10, 20, 30) for easy insertion +4. **Handle branches carefully** - deleting last branch deletes the note +5. **Check for protected notes** - they cannot be modified via ETAPI +6. **Implement rate limiting** in your client to avoid blacklisting +7. **Use search efficiently** - leverage fastSearch for better performance +8. **Call refresh-note-ordering** after bulk branch position updates +9. **Validate data before submission** - reduce error responses +10. **Handle errors gracefully** - check status codes and error messages + +--- + +## Example Workflows + +### Create a Note with Attributes +```bash +# 1. Create note +NOTE_RESPONSE=$(curl -X POST "$SERVER/etapi/create-note" \ + -H "Authorization: $TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "parentNoteId": "root", + "title": "Project TODO", + "type": "text", + "content": "

Task list

" + }') + +NOTE_ID=$(echo $NOTE_RESPONSE | jq -r '.note.noteId') + +# 2. Add label +curl -X POST "$SERVER/etapi/attributes" \ + -H "Authorization: $TOKEN" \ + -H "Content-Type: application/json" \ + -d "{ + \"noteId\": \"$NOTE_ID\", + \"type\": \"label\", + \"name\": \"priority\", + \"value\": \"high\" + }" +``` + +### Clone Note to Multiple Locations +```bash +# Clone note to another parent +curl -X POST "$SERVER/etapi/branches" \ + -H "Authorization: $TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "noteId": "existingNoteId", + "parentNoteId": "anotherParentId", + "prefix": "Reference: " + }' +``` + +### Daily Journal Entry +```bash +# Get or create today's note +TODAY=$(date +%Y-%m-%d) +curl "$SERVER/etapi/calendar/days/$TODAY" \ + -H "Authorization: $TOKEN" +``` + +--- + +## Client Libraries + +### Python +- **trilium-py**: Full-featured client with extended functionality +- **PyTrilium**: Lightweight wrapper matching OpenAPI spec +- **trilium-alchemy**: SQLAlchemy-style SDK with CLI toolkit + +### Node.js +- **trilium-etapi**: TypeScript wrapper with type safety + +### Other Tools +- **trilium-mcp-server**: Model Context Protocol server for LLMs +- **openapi-mcp-generator**: Generate MCP servers from OpenAPI specs + +--- + +## Version Compatibility + +- ETAPI introduced: Trilium v0.50 +- Basic Auth support: v0.56 +- Bearer token format: v0.93.0 +- TriliumNext fork: Compatible with Trilium API, ongoing development + +Check `/app-info` endpoint for version details of your instance. + +--- + +## Additional Resources + +- **Official Documentation**: https://docs.triliumnotes.org/ +- **GitHub Repository**: https://github.com/TriliumNext/Trilium +- **Search Syntax Guide**: https://github.com/zadam/trilium/wiki/Search +- **Community Resources**: https://github.com/Nriver/awesome-trilium + +--- + +**License:** Apache 2.0 +**Maintainer:** TriliumNext Community +**Contact:** https://github.com/TriliumNext/Trilium/discussions diff --git a/docs/lyra_tree.txt b/docs/lyra_tree.txt new file mode 100644 index 0000000..f0b6df3 --- /dev/null +++ b/docs/lyra_tree.txt @@ -0,0 +1,441 @@ +β”œβ”€β”€ CHANGELOG.md +β”œβ”€β”€ core +β”‚ β”œβ”€β”€ env experiments +β”‚ β”œβ”€β”€ persona-sidecar +β”‚ β”‚ β”œβ”€β”€ Dockerfile +β”‚ β”‚ β”œβ”€β”€ package.json +β”‚ β”‚ β”œβ”€β”€ persona-server.js +β”‚ β”‚ └── personas.json +β”‚ β”œβ”€β”€ relay +β”‚ β”‚ β”œβ”€β”€ Dockerfile +β”‚ β”‚ β”œβ”€β”€ lib +β”‚ β”‚ β”‚ β”œβ”€β”€ cortex.js +β”‚ β”‚ β”‚ └── llm.js +β”‚ β”‚ β”œβ”€β”€ package.json +β”‚ β”‚ β”œβ”€β”€ package-lock.json +β”‚ β”‚ β”œβ”€β”€ server.js +β”‚ β”‚ β”œβ”€β”€ sessions +β”‚ β”‚ β”‚ β”œβ”€β”€ default.jsonl +β”‚ β”‚ β”‚ β”œβ”€β”€ sess-6rxu7eia.json +β”‚ β”‚ β”‚ β”œβ”€β”€ sess-6rxu7eia.jsonl +β”‚ β”‚ β”‚ β”œβ”€β”€ sess-l08ndm60.json +β”‚ β”‚ β”‚ └── sess-l08ndm60.jsonl +β”‚ β”‚ └── test-llm.js +β”‚ β”œβ”€β”€ relay-backup +β”‚ └── ui +β”‚ β”œβ”€β”€ index.html +β”‚ β”œβ”€β”€ manifest.json +β”‚ └── style.css +β”œβ”€β”€ cortex +β”‚ β”œβ”€β”€ context.py +β”‚ β”œβ”€β”€ Dockerfile +β”‚ β”œβ”€β”€ ingest +β”‚ β”‚ β”œβ”€β”€ ingest_handler.py +β”‚ β”‚ β”œβ”€β”€ __init__.py +β”‚ β”‚ └── intake_client.py +β”‚ β”œβ”€β”€ intake +β”‚ β”‚ β”œβ”€β”€ __init__.py +β”‚ β”‚ β”œβ”€β”€ intake.py +β”‚ β”‚ └── logs +β”‚ β”œβ”€β”€ llm +β”‚ β”‚ β”œβ”€β”€ __init__.py +β”‚ β”‚ └── llm_router.py +β”‚ β”œβ”€β”€ logs +β”‚ β”‚ β”œβ”€β”€ cortex_verbose_debug.log +β”‚ β”‚ └── reflections.log +β”‚ β”œβ”€β”€ main.py +β”‚ β”œβ”€β”€ neomem_client.py +β”‚ β”œβ”€β”€ persona +β”‚ β”‚ β”œβ”€β”€ identity.py +β”‚ β”‚ β”œβ”€β”€ __init__.py +β”‚ β”‚ └── speak.py +β”‚ β”œβ”€β”€ rag.py +β”‚ β”œβ”€β”€ reasoning +β”‚ β”‚ β”œβ”€β”€ __init__.py +β”‚ β”‚ β”œβ”€β”€ reasoning.py +β”‚ β”‚ β”œβ”€β”€ refine.py +β”‚ β”‚ └── reflection.py +β”‚ β”œβ”€β”€ requirements.txt +β”‚ β”œβ”€β”€ router.py +β”‚ β”œβ”€β”€ tests +β”‚ └── utils +β”‚ β”œβ”€β”€ config.py +β”‚ β”œβ”€β”€ __init__.py +β”‚ β”œβ”€β”€ log_utils.py +β”‚ └── schema.py +β”œβ”€β”€ deprecated.env.txt +β”œβ”€β”€ DEPRECATED_FILES.md +β”œβ”€β”€ docker-compose.yml +β”œβ”€β”€ docs +β”‚ β”œβ”€β”€ ARCHITECTURE_v0-6-0.md +β”‚ β”œβ”€β”€ ENVIRONMENT_VARIABLES.md +β”‚ β”œβ”€β”€ lyra_tree.txt +β”‚ └── PROJECT_SUMMARY.md +β”œβ”€β”€ intake-logs +β”‚ └── summaries.log +β”œβ”€β”€ neomem +β”‚ β”œβ”€β”€ _archive +β”‚ β”‚ └── old_servers +β”‚ β”‚ β”œβ”€β”€ main_backup.py +β”‚ β”‚ └── main_dev.py +β”‚ β”œβ”€β”€ docker-compose.yml +β”‚ β”œβ”€β”€ Dockerfile +β”‚ β”œβ”€β”€ neomem +β”‚ β”‚ β”œβ”€β”€ api +β”‚ β”‚ β”œβ”€β”€ client +β”‚ β”‚ β”‚ β”œβ”€β”€ __init__.py +β”‚ β”‚ β”‚ β”œβ”€β”€ main.py +β”‚ β”‚ β”‚ β”œβ”€β”€ project.py +β”‚ β”‚ β”‚ └── utils.py +β”‚ β”‚ β”œβ”€β”€ configs +β”‚ β”‚ β”‚ β”œβ”€β”€ base.py +β”‚ β”‚ β”‚ β”œβ”€β”€ embeddings +β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€ base.py +β”‚ β”‚ β”‚ β”‚ └── __init__.py +β”‚ β”‚ β”‚ β”œβ”€β”€ enums.py +β”‚ β”‚ β”‚ β”œβ”€β”€ __init__.py +β”‚ β”‚ β”‚ β”œβ”€β”€ llms +β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€ anthropic.py +β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€ aws_bedrock.py +β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€ azure.py +β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€ base.py +β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€ deepseek.py +β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€ __init__.py +β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€ lmstudio.py +β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€ ollama.py +β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€ openai.py +β”‚ β”‚ β”‚ β”‚ └── vllm.py +β”‚ β”‚ β”‚ β”œβ”€β”€ prompts.py +β”‚ β”‚ β”‚ └── vector_stores +β”‚ β”‚ β”‚ β”œβ”€β”€ azure_ai_search.py +β”‚ β”‚ β”‚ β”œβ”€β”€ azure_mysql.py +β”‚ β”‚ β”‚ β”œβ”€β”€ baidu.py +β”‚ β”‚ β”‚ β”œβ”€β”€ chroma.py +β”‚ β”‚ β”‚ β”œβ”€β”€ databricks.py +β”‚ β”‚ β”‚ β”œβ”€β”€ elasticsearch.py +β”‚ β”‚ β”‚ β”œβ”€β”€ faiss.py +β”‚ β”‚ β”‚ β”œβ”€β”€ __init__.py +β”‚ β”‚ β”‚ β”œβ”€β”€ langchain.py +β”‚ β”‚ β”‚ β”œβ”€β”€ milvus.py +β”‚ β”‚ β”‚ β”œβ”€β”€ mongodb.py +β”‚ β”‚ β”‚ β”œβ”€β”€ neptune.py +β”‚ β”‚ β”‚ β”œβ”€β”€ opensearch.py +β”‚ β”‚ β”‚ β”œβ”€β”€ pgvector.py +β”‚ β”‚ β”‚ β”œβ”€β”€ pinecone.py +β”‚ β”‚ β”‚ β”œβ”€β”€ qdrant.py +β”‚ β”‚ β”‚ β”œβ”€β”€ redis.py +β”‚ β”‚ β”‚ β”œβ”€β”€ s3_vectors.py +β”‚ β”‚ β”‚ β”œβ”€β”€ supabase.py +β”‚ β”‚ β”‚ β”œβ”€β”€ upstash_vector.py +β”‚ β”‚ β”‚ β”œβ”€β”€ valkey.py +β”‚ β”‚ β”‚ β”œβ”€β”€ vertex_ai_vector_search.py +β”‚ β”‚ β”‚ └── weaviate.py +β”‚ β”‚ β”œβ”€β”€ core +β”‚ β”‚ β”œβ”€β”€ embeddings +β”‚ β”‚ β”‚ β”œβ”€β”€ aws_bedrock.py +β”‚ β”‚ β”‚ β”œβ”€β”€ azure_openai.py +β”‚ β”‚ β”‚ β”œβ”€β”€ base.py +β”‚ β”‚ β”‚ β”œβ”€β”€ configs.py +β”‚ β”‚ β”‚ β”œβ”€β”€ gemini.py +β”‚ β”‚ β”‚ β”œβ”€β”€ huggingface.py +β”‚ β”‚ β”‚ β”œβ”€β”€ __init__.py +β”‚ β”‚ β”‚ β”œβ”€β”€ langchain.py +β”‚ β”‚ β”‚ β”œβ”€β”€ lmstudio.py +β”‚ β”‚ β”‚ β”œβ”€β”€ mock.py +β”‚ β”‚ β”‚ β”œβ”€β”€ ollama.py +β”‚ β”‚ β”‚ β”œβ”€β”€ openai.py +β”‚ β”‚ β”‚ β”œβ”€β”€ together.py +β”‚ β”‚ β”‚ └── vertexai.py +β”‚ β”‚ β”œβ”€β”€ exceptions.py +β”‚ β”‚ β”œβ”€β”€ graphs +β”‚ β”‚ β”‚ β”œβ”€β”€ configs.py +β”‚ β”‚ β”‚ β”œβ”€β”€ __init__.py +β”‚ β”‚ β”‚ β”œβ”€β”€ neptune +β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€ base.py +β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€ __init__.py +β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€ neptunedb.py +β”‚ β”‚ β”‚ β”‚ └── neptunegraph.py +β”‚ β”‚ β”‚ β”œβ”€β”€ tools.py +β”‚ β”‚ β”‚ └── utils.py +β”‚ β”‚ β”œβ”€β”€ __init__.py +β”‚ β”‚ β”œβ”€β”€ LICENSE +β”‚ β”‚ β”œβ”€β”€ llms +β”‚ β”‚ β”‚ β”œβ”€β”€ anthropic.py +β”‚ β”‚ β”‚ β”œβ”€β”€ aws_bedrock.py +β”‚ β”‚ β”‚ β”œβ”€β”€ azure_openai.py +β”‚ β”‚ β”‚ β”œβ”€β”€ azure_openai_structured.py +β”‚ β”‚ β”‚ β”œβ”€β”€ base.py +β”‚ β”‚ β”‚ β”œβ”€β”€ configs.py +β”‚ β”‚ β”‚ β”œβ”€β”€ deepseek.py +β”‚ β”‚ β”‚ β”œβ”€β”€ gemini.py +β”‚ β”‚ β”‚ β”œβ”€β”€ groq.py +β”‚ β”‚ β”‚ β”œβ”€β”€ __init__.py +β”‚ β”‚ β”‚ β”œβ”€β”€ langchain.py +β”‚ β”‚ β”‚ β”œβ”€β”€ litellm.py +β”‚ β”‚ β”‚ β”œβ”€β”€ lmstudio.py +β”‚ β”‚ β”‚ β”œβ”€β”€ ollama.py +β”‚ β”‚ β”‚ β”œβ”€β”€ openai.py +β”‚ β”‚ β”‚ β”œβ”€β”€ openai_structured.py +β”‚ β”‚ β”‚ β”œβ”€β”€ sarvam.py +β”‚ β”‚ β”‚ β”œβ”€β”€ together.py +β”‚ β”‚ β”‚ β”œβ”€β”€ vllm.py +β”‚ β”‚ β”‚ └── xai.py +β”‚ β”‚ β”œβ”€β”€ memory +β”‚ β”‚ β”‚ β”œβ”€β”€ base.py +β”‚ β”‚ β”‚ β”œβ”€β”€ graph_memory.py +β”‚ β”‚ β”‚ β”œβ”€β”€ __init__.py +β”‚ β”‚ β”‚ β”œβ”€β”€ kuzu_memory.py +β”‚ β”‚ β”‚ β”œβ”€β”€ main.py +β”‚ β”‚ β”‚ β”œβ”€β”€ memgraph_memory.py +β”‚ β”‚ β”‚ β”œβ”€β”€ setup.py +β”‚ β”‚ β”‚ β”œβ”€β”€ storage.py +β”‚ β”‚ β”‚ β”œβ”€β”€ telemetry.py +β”‚ β”‚ β”‚ └── utils.py +β”‚ β”‚ β”œβ”€β”€ proxy +β”‚ β”‚ β”‚ β”œβ”€β”€ __init__.py +β”‚ β”‚ β”‚ └── main.py +β”‚ β”‚ β”œβ”€β”€ server +β”‚ β”‚ β”‚ β”œβ”€β”€ dev.Dockerfile +β”‚ β”‚ β”‚ β”œβ”€β”€ docker-compose.yaml +β”‚ β”‚ β”‚ β”œβ”€β”€ Dockerfile +β”‚ β”‚ β”‚ β”œβ”€β”€ main_old.py +β”‚ β”‚ β”‚ β”œβ”€β”€ main.py +β”‚ β”‚ β”‚ β”œβ”€β”€ Makefile +β”‚ β”‚ β”‚ β”œβ”€β”€ README.md +β”‚ β”‚ β”‚ └── requirements.txt +β”‚ β”‚ β”œβ”€β”€ storage +β”‚ β”‚ β”œβ”€β”€ utils +β”‚ β”‚ β”‚ └── factory.py +β”‚ β”‚ └── vector_stores +β”‚ β”‚ β”œβ”€β”€ azure_ai_search.py +β”‚ β”‚ β”œβ”€β”€ azure_mysql.py +β”‚ β”‚ β”œβ”€β”€ baidu.py +β”‚ β”‚ β”œβ”€β”€ base.py +β”‚ β”‚ β”œβ”€β”€ chroma.py +β”‚ β”‚ β”œβ”€β”€ configs.py +β”‚ β”‚ β”œβ”€β”€ databricks.py +β”‚ β”‚ β”œβ”€β”€ elasticsearch.py +β”‚ β”‚ β”œβ”€β”€ faiss.py +β”‚ β”‚ β”œβ”€β”€ __init__.py +β”‚ β”‚ β”œβ”€β”€ langchain.py +β”‚ β”‚ β”œβ”€β”€ milvus.py +β”‚ β”‚ β”œβ”€β”€ mongodb.py +β”‚ β”‚ β”œβ”€β”€ neptune_analytics.py +β”‚ β”‚ β”œβ”€β”€ opensearch.py +β”‚ β”‚ β”œβ”€β”€ pgvector.py +β”‚ β”‚ β”œβ”€β”€ pinecone.py +β”‚ β”‚ β”œβ”€β”€ qdrant.py +β”‚ β”‚ β”œβ”€β”€ redis.py +β”‚ β”‚ β”œβ”€β”€ s3_vectors.py +β”‚ β”‚ β”œβ”€β”€ supabase.py +β”‚ β”‚ β”œβ”€β”€ upstash_vector.py +β”‚ β”‚ β”œβ”€β”€ valkey.py +β”‚ β”‚ β”œβ”€β”€ vertex_ai_vector_search.py +β”‚ β”‚ └── weaviate.py +β”‚ β”œβ”€β”€ neomem_history +β”‚ β”‚ └── history.db +β”‚ β”œβ”€β”€ pyproject.toml +β”‚ β”œβ”€β”€ README.md +β”‚ └── requirements.txt +β”œβ”€β”€ neomem_history +β”‚ └── history.db +β”œβ”€β”€ rag +β”‚ β”œβ”€β”€ chatlogs +β”‚ β”‚ └── lyra +β”‚ β”‚ β”œβ”€β”€ 0000_Wire_ROCm_to_Cortex.json +β”‚ β”‚ β”œβ”€β”€ 0001_Branch___10_22_ct201branch-ssh_tut.json +β”‚ β”‚ β”œβ”€β”€ 0002_cortex_LLMs_11-1-25.json +β”‚ β”‚ β”œβ”€β”€ 0003_RAG_beta.json +β”‚ β”‚ β”œβ”€β”€ 0005_Cortex_v0_4_0_planning.json +β”‚ β”‚ β”œβ”€β”€ 0006_Cortex_v0_4_0_Refinement.json +β”‚ β”‚ β”œβ”€β”€ 0009_Branch___Cortex_v0_4_0_planning.json +β”‚ β”‚ β”œβ”€β”€ 0012_Cortex_4_-_neomem_11-1-25.json +β”‚ β”‚ β”œβ”€β”€ 0016_Memory_consolidation_concept.json +β”‚ β”‚ β”œβ”€β”€ 0017_Model_inventory_review.json +β”‚ β”‚ β”œβ”€β”€ 0018_Branch___Memory_consolidation_concept.json +β”‚ β”‚ β”œβ”€β”€ 0022_Branch___Intake_conversation_summaries.json +β”‚ β”‚ β”œβ”€β”€ 0026_Intake_conversation_summaries.json +β”‚ β”‚ β”œβ”€β”€ 0027_Trilium_AI_LLM_setup.json +β”‚ β”‚ β”œβ”€β”€ 0028_LLMs_and_sycophancy_levels.json +β”‚ β”‚ β”œβ”€β”€ 0031_UI_improvement_plan.json +β”‚ β”‚ β”œβ”€β”€ 0035_10_27-neomem_update.json +β”‚ β”‚ β”œβ”€β”€ 0044_Install_llama_cpp_on_ct201.json +β”‚ β”‚ β”œβ”€β”€ 0045_AI_task_assistant.json +β”‚ β”‚ β”œβ”€β”€ 0047_Project_scope_creation.json +β”‚ β”‚ β”œβ”€β”€ 0052_View_docker_container_logs.json +β”‚ β”‚ β”œβ”€β”€ 0053_10_21-Proxmox_fan_control.json +β”‚ β”‚ β”œβ”€β”€ 0054_10_21-pytorch_branch_Quant_experiments.json +β”‚ β”‚ β”œβ”€β”€ 0055_10_22_ct201branch-ssh_tut.json +β”‚ β”‚ β”œβ”€β”€ 0060_Lyra_project_folder_issue.json +β”‚ β”‚ β”œβ”€β”€ 0062_Build_pytorch_API.json +β”‚ β”‚ β”œβ”€β”€ 0063_PokerBrain_dataset_structure.json +β”‚ β”‚ β”œβ”€β”€ 0065_Install_PyTorch_setup.json +β”‚ β”‚ β”œβ”€β”€ 0066_ROCm_PyTorch_setup_quirks.json +β”‚ β”‚ β”œβ”€β”€ 0067_VM_model_setup_steps.json +β”‚ β”‚ β”œβ”€β”€ 0070_Proxmox_disk_error_fix.json +β”‚ β”‚ β”œβ”€β”€ 0072_Docker_Compose_vs_Portainer.json +β”‚ β”‚ β”œβ”€β”€ 0073_Check_system_temps_Proxmox.json +β”‚ β”‚ β”œβ”€β”€ 0075_Cortex_gpu_progress.json +β”‚ β”‚ β”œβ”€β”€ 0076_Backup_Proxmox_before_upgrade.json +β”‚ β”‚ β”œβ”€β”€ 0077_Storage_cleanup_advice.json +β”‚ β”‚ β”œβ”€β”€ 0082_Install_ROCm_on_Proxmox.json +β”‚ β”‚ β”œβ”€β”€ 0088_Thalamus_program_summary.json +β”‚ β”‚ β”œβ”€β”€ 0094_Cortex_blueprint_development.json +β”‚ β”‚ β”œβ”€β”€ 0095_mem0_advancments.json +β”‚ β”‚ β”œβ”€β”€ 0096_Embedding_provider_swap.json +β”‚ β”‚ β”œβ”€β”€ 0097_Update_git_commit_steps.json +β”‚ β”‚ β”œβ”€β”€ 0098_AI_software_description.json +β”‚ β”‚ β”œβ”€β”€ 0099_Seed_memory_process.json +β”‚ β”‚ β”œβ”€β”€ 0100_Set_up_Git_repo.json +β”‚ β”‚ β”œβ”€β”€ 0101_Customize_embedder_setup.json +β”‚ β”‚ β”œβ”€β”€ 0102_Seeding_Local_Lyra_memory.json +β”‚ β”‚ β”œβ”€β”€ 0103_Mem0_seeding_part_3.json +β”‚ β”‚ β”œβ”€β”€ 0104_Memory_build_prompt.json +β”‚ β”‚ β”œβ”€β”€ 0105_Git_submodule_setup_guide.json +β”‚ β”‚ β”œβ”€β”€ 0106_Serve_UI_on_LAN.json +β”‚ β”‚ β”œβ”€β”€ 0107_AI_name_suggestion.json +β”‚ β”‚ β”œβ”€β”€ 0108_Room_X_planning_update.json +β”‚ β”‚ β”œβ”€β”€ 0109_Salience_filtering_design.json +β”‚ β”‚ β”œβ”€β”€ 0110_RoomX_Cortex_build.json +β”‚ β”‚ β”œβ”€β”€ 0119_Explain_Lyra_cortex_idea.json +β”‚ β”‚ β”œβ”€β”€ 0120_Git_submodule_organization.json +β”‚ β”‚ β”œβ”€β”€ 0121_Web_UI_fix_guide.json +β”‚ β”‚ β”œβ”€β”€ 0122_UI_development_planning.json +β”‚ β”‚ β”œβ”€β”€ 0123_NVGRAM_debugging_steps.json +β”‚ β”‚ β”œβ”€β”€ 0124_NVGRAM_setup_troubleshooting.json +β”‚ β”‚ β”œβ”€β”€ 0125_NVGRAM_development_update.json +β”‚ β”‚ β”œβ”€β”€ 0126_RX_-_NeVGRAM_New_Features.json +β”‚ β”‚ β”œβ”€β”€ 0127_Error_troubleshooting_steps.json +β”‚ β”‚ β”œβ”€β”€ 0135_Proxmox_backup_with_ABB.json +β”‚ β”‚ β”œβ”€β”€ 0151_Auto-start_Lyra-Core_VM.json +β”‚ β”‚ β”œβ”€β”€ 0156_AI_GPU_benchmarks_comparison.json +β”‚ β”‚ └── 0251_Lyra_project_handoff.json +β”‚ β”œβ”€β”€ chromadb +β”‚ β”‚ β”œβ”€β”€ c4f701ee-1978-44a1-9df4-3e865b5d33c1 +β”‚ β”‚ β”‚ β”œβ”€β”€ data_level0.bin +β”‚ β”‚ β”‚ β”œβ”€β”€ header.bin +β”‚ β”‚ β”‚ β”œβ”€β”€ index_metadata.pickle +β”‚ β”‚ β”‚ β”œβ”€β”€ length.bin +β”‚ β”‚ β”‚ └── link_lists.bin +β”‚ β”‚ └── chroma.sqlite3 +β”‚ β”œβ”€β”€ import.log +β”‚ β”œβ”€β”€ lyra-chatlogs +β”‚ β”‚ β”œβ”€β”€ 0000_Wire_ROCm_to_Cortex.json +β”‚ β”‚ β”œβ”€β”€ 0001_Branch___10_22_ct201branch-ssh_tut.json +β”‚ β”‚ β”œβ”€β”€ 0002_cortex_LLMs_11-1-25.json +β”‚ β”‚ └── 0003_RAG_beta.json +β”‚ β”œβ”€β”€ rag_api.py +β”‚ β”œβ”€β”€ rag_build.py +β”‚ β”œβ”€β”€ rag_chat_import.py +β”‚ └── rag_query.py +β”œβ”€β”€ README.md +└── volumes + β”œβ”€β”€ neo4j_data + β”‚ β”œβ”€β”€ databases + β”‚ β”‚ β”œβ”€β”€ neo4j + β”‚ β”‚ β”‚ β”œβ”€β”€ database_lock + β”‚ β”‚ β”‚ β”œβ”€β”€ id-buffer.tmp.0 + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.counts.db + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.indexstats.db + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.labeltokenstore.db + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.labeltokenstore.db.id + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.labeltokenstore.db.names + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.labeltokenstore.db.names.id + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.nodestore.db + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.nodestore.db.id + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.nodestore.db.labels + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.nodestore.db.labels.id + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.propertystore.db + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.propertystore.db.arrays + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.propertystore.db.arrays.id + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.propertystore.db.id + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.propertystore.db.index + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.propertystore.db.index.id + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.propertystore.db.index.keys + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.propertystore.db.index.keys.id + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.propertystore.db.strings + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.propertystore.db.strings.id + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.relationshipgroupstore.db + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.relationshipgroupstore.db.id + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.relationshipgroupstore.degrees.db + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.relationshipstore.db + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.relationshipstore.db.id + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.relationshiptypestore.db + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.relationshiptypestore.db.id + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.relationshiptypestore.db.names + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.relationshiptypestore.db.names.id + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.schemastore.db + β”‚ β”‚ β”‚ β”œβ”€β”€ neostore.schemastore.db.id + β”‚ β”‚ β”‚ └── schema + β”‚ β”‚ β”‚ └── index + β”‚ β”‚ β”‚ └── token-lookup-1.0 + β”‚ β”‚ β”‚ β”œβ”€β”€ 1 + β”‚ β”‚ β”‚ β”‚ └── index-1 + β”‚ β”‚ β”‚ └── 2 + β”‚ β”‚ β”‚ └── index-2 + β”‚ β”‚ β”œβ”€β”€ store_lock + β”‚ β”‚ └── system + β”‚ β”‚ β”œβ”€β”€ database_lock + β”‚ β”‚ β”œβ”€β”€ id-buffer.tmp.0 + β”‚ β”‚ β”œβ”€β”€ neostore + β”‚ β”‚ β”œβ”€β”€ neostore.counts.db + β”‚ β”‚ β”œβ”€β”€ neostore.indexstats.db + β”‚ β”‚ β”œβ”€β”€ neostore.labeltokenstore.db + β”‚ β”‚ β”œβ”€β”€ neostore.labeltokenstore.db.id + β”‚ β”‚ β”œβ”€β”€ neostore.labeltokenstore.db.names + β”‚ β”‚ β”œβ”€β”€ neostore.labeltokenstore.db.names.id + β”‚ β”‚ β”œβ”€β”€ neostore.nodestore.db + β”‚ β”‚ β”œβ”€β”€ neostore.nodestore.db.id + β”‚ β”‚ β”œβ”€β”€ neostore.nodestore.db.labels + β”‚ β”‚ β”œβ”€β”€ neostore.nodestore.db.labels.id + β”‚ β”‚ β”œβ”€β”€ neostore.propertystore.db + β”‚ β”‚ β”œβ”€β”€ neostore.propertystore.db.arrays + β”‚ β”‚ β”œβ”€β”€ neostore.propertystore.db.arrays.id + β”‚ β”‚ β”œβ”€β”€ neostore.propertystore.db.id + β”‚ β”‚ β”œβ”€β”€ neostore.propertystore.db.index + β”‚ β”‚ β”œβ”€β”€ neostore.propertystore.db.index.id + β”‚ β”‚ β”œβ”€β”€ neostore.propertystore.db.index.keys + β”‚ β”‚ β”œβ”€β”€ neostore.propertystore.db.index.keys.id + β”‚ β”‚ β”œβ”€β”€ neostore.propertystore.db.strings + β”‚ β”‚ β”œβ”€β”€ neostore.propertystore.db.strings.id + β”‚ β”‚ β”œβ”€β”€ neostore.relationshipgroupstore.db + β”‚ β”‚ β”œβ”€β”€ neostore.relationshipgroupstore.db.id + β”‚ β”‚ β”œβ”€β”€ neostore.relationshipgroupstore.degrees.db + β”‚ β”‚ β”œβ”€β”€ neostore.relationshipstore.db + β”‚ β”‚ β”œβ”€β”€ neostore.relationshipstore.db.id + β”‚ β”‚ β”œβ”€β”€ neostore.relationshiptypestore.db + β”‚ β”‚ β”œβ”€β”€ neostore.relationshiptypestore.db.id + β”‚ β”‚ β”œβ”€β”€ neostore.relationshiptypestore.db.names + β”‚ β”‚ β”œβ”€β”€ neostore.relationshiptypestore.db.names.id + β”‚ β”‚ β”œβ”€β”€ neostore.schemastore.db + β”‚ β”‚ β”œβ”€β”€ neostore.schemastore.db.id + β”‚ β”‚ └── schema + β”‚ β”‚ └── index + β”‚ β”‚ β”œβ”€β”€ range-1.0 + β”‚ β”‚ β”‚ β”œβ”€β”€ 3 + β”‚ β”‚ β”‚ β”‚ └── index-3 + β”‚ β”‚ β”‚ β”œβ”€β”€ 4 + β”‚ β”‚ β”‚ β”‚ └── index-4 + β”‚ β”‚ β”‚ β”œβ”€β”€ 7 + β”‚ β”‚ β”‚ β”‚ └── index-7 + β”‚ β”‚ β”‚ β”œβ”€β”€ 8 + β”‚ β”‚ β”‚ β”‚ └── index-8 + β”‚ β”‚ β”‚ └── 9 + β”‚ β”‚ β”‚ └── index-9 + β”‚ β”‚ └── token-lookup-1.0 + β”‚ β”‚ β”œβ”€β”€ 1 + β”‚ β”‚ β”‚ └── index-1 + β”‚ β”‚ └── 2 + β”‚ β”‚ └── index-2 + β”‚ β”œβ”€β”€ dbms + β”‚ β”‚ └── auth.ini + β”‚ β”œβ”€β”€ server_id + β”‚ └── transactions + β”‚ β”œβ”€β”€ neo4j + β”‚ β”‚ β”œβ”€β”€ checkpoint.0 + β”‚ β”‚ └── neostore.transaction.db.0 + β”‚ └── system + β”‚ β”œβ”€β”€ checkpoint.0 + β”‚ └── neostore.transaction.db.0 + └── postgres_data [error opening dir] \ No newline at end of file diff --git a/intake/Dockerfile b/intake/Dockerfile deleted file mode 100644 index 2d26ea6..0000000 --- a/intake/Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -FROM python:3.11-slim -WORKDIR /app -COPY requirements.txt . -RUN pip install --no-cache-dir -r requirements.txt -COPY . . -CMD ["uvicorn", "intake:app", "--host", "0.0.0.0", "--port", "7080"] diff --git a/intake/intake.py b/intake/intake.py deleted file mode 100644 index 1724ece..0000000 --- a/intake/intake.py +++ /dev/null @@ -1,430 +0,0 @@ -from fastapi import FastAPI, Body, Query, BackgroundTasks -from collections import deque -from datetime import datetime -import requests -import os -import sys -import asyncio -from dotenv import load_dotenv - -# ─────────────────────────────────────────────── -# πŸ”§ Load environment variables -# ─────────────────────────────────────────────── -load_dotenv() - -SUMMARY_MODEL = os.getenv("SUMMARY_MODEL_NAME", "mistral-7b-instruct-v0.2.Q4_K_M.gguf") -SUMMARY_URL = os.getenv("SUMMARY_API_URL", "http://localhost:8080/v1/completions") -SUMMARY_MAX_TOKENS = int(os.getenv("SUMMARY_MAX_TOKENS", "200")) -SUMMARY_TEMPERATURE = float(os.getenv("SUMMARY_TEMPERATURE", "0.3")) - -# ─────────────────────────────────────────────── -# 🧠 NeoMem connection (session-aware) -# ─────────────────────────────────────────────── -from uuid import uuid4 - -NEOMEM_API = os.getenv("NEOMEM_API") -NEOMEM_KEY = os.getenv("NEOMEM_KEY") - -def push_summary_to_neomem(summary_text: str, level: str, session_id: str): - """Send summarized text to NeoMem, tagged by session_id.""" - if not NEOMEM_API: - print("⚠️ NEOMEM_API not set, skipping NeoMem push") - return - - payload = { - "messages": [ - {"role": "assistant", "content": summary_text} - ], - "user_id": "brian", - # optional: uncomment if you want sessions tracked in NeoMem natively - # "run_id": session_id, - "metadata": { - "source": "intake", - "type": "summary", - "level": level, - "session_id": session_id, - "cortex": {} - } - } - - headers = {"Content-Type": "application/json"} - if NEOMEM_KEY: - headers["Authorization"] = f"Bearer {NEOMEM_KEY}" - - try: - r = requests.post(f"{NEOMEM_API}/memories", json=payload, headers=headers, timeout=25) - r.raise_for_status() - print(f"🧠 NeoMem updated ({level}, {session_id}, {len(summary_text)} chars)") - except Exception as e: - print(f"❌ NeoMem push failed ({level}, {session_id}): {e}") - - -# ─────────────────────────────────────────────── -# βš™οΈ FastAPI + buffer setup -# ─────────────────────────────────────────────── -app = FastAPI() - -# Multiple rolling buffers keyed by session_id -SESSIONS = {} - - -# Summary trigger points -# β†’ low-tier: quick factual recaps -# β†’ mid-tier: β€œReality Check” reflections -# β†’ high-tier: rolling continuity synthesis -LEVELS = [1, 2, 5, 10, 20, 30] - -@app.on_event("startup") -def show_boot_banner(): - print("🧩 Intake booting...") - print(f" Model: {SUMMARY_MODEL}") - print(f" API: {SUMMARY_URL}") - print(f" Max tokens: {SUMMARY_MAX_TOKENS}, Temp: {SUMMARY_TEMPERATURE}") - sys.stdout.flush() - -# ─────────────────────────────────────────────── -# 🧠 Hierarchical Summarizer (L10β†’L20β†’L30 cascade) -# ─────────────────────────────────────────────── -SUMMARIES_CACHE = {"L10": [], "L20": [], "L30": []} - -def summarize(exchanges, level): - """Hierarchical summarizer: builds local and meta summaries.""" - # Join exchanges into readable text - text = "\n".join( - f"User: {e['turns'][0]['content']}\nAssistant: {e['turns'][1]['content']}" - for e in exchanges - ) - - def query_llm(prompt: str): - try: - resp = requests.post( - SUMMARY_URL, - json={ - "model": SUMMARY_MODEL, - "prompt": prompt, - "max_tokens": SUMMARY_MAX_TOKENS, - "temperature": SUMMARY_TEMPERATURE, - }, - timeout=180, - ) - resp.raise_for_status() - data = resp.json() - return data.get("choices", [{}])[0].get("text", "").strip() - except Exception as e: - return f"[Error summarizing: {e}]" - - # ───── L10: local β€œReality Check” block ───── - if level == 10: - prompt = f""" - You are Lyra Intake performing a 'Reality Check' for the last {len(exchanges)} exchanges. - Summarize this block as one coherent paragraph describing the user’s focus, progress, and tone. - Avoid bullet points. - - Exchanges: - {text} - - Reality Check Summary: - """ - summary = query_llm(prompt) - SUMMARIES_CACHE["L10"].append(summary) - - # ───── L20: merge L10s ───── - elif level == 20: - # 1️⃣ create fresh L10 for 11–20 - l10_prompt = f""" - You are Lyra Intake generating a second Reality Check for the most recent {len(exchanges)} exchanges. - Summarize them as one paragraph describing what's new or changed since the last block. - Avoid bullet points. - - Exchanges: - {text} - - Reality Check Summary: - """ - new_l10 = query_llm(l10_prompt) - SUMMARIES_CACHE["L10"].append(new_l10) - - # 2️⃣ merge all L10s into a Session Overview - joined_l10s = "\n\n".join(SUMMARIES_CACHE["L10"]) - l20_prompt = f""" - You are Lyra Intake merging multiple 'Reality Checks' into a single Session Overview. - Summarize the following Reality Checks into one short paragraph capturing the ongoing goals, - patterns, and overall progress. - - Reality Checks: - {joined_l10s} - - Session Overview: - """ - l20_summary = query_llm(l20_prompt) - SUMMARIES_CACHE["L20"].append(l20_summary) - summary = new_l10 + "\n\n" + l20_summary - - # ───── L30: continuity synthesis ───── - elif level == 30: - # 1️⃣ create new L10 for 21–30 - new_l10 = query_llm(f""" - You are Lyra Intake creating a new Reality Check for exchanges 21–30. - Summarize this block in one cohesive paragraph, describing any shifts in focus or tone. - - Exchanges: - {text} - - Reality Check Summary: - """) - - SUMMARIES_CACHE["L10"].append(new_l10) - - # 2️⃣ merge all lower levels for continuity - joined = "\n\n".join(SUMMARIES_CACHE["L10"] + SUMMARIES_CACHE["L20"]) - continuity_prompt = f""" - You are Lyra Intake performing a 'Continuity Report' β€” a high-level reflection combining all Reality Checks - and Session Overviews so far. Describe how the conversation has evolved, the key insights, and remaining threads. - - Reality Checks and Overviews: - {joined} - - Continuity Report: - """ - l30_summary = query_llm(continuity_prompt) - SUMMARIES_CACHE["L30"].append(l30_summary) - summary = new_l10 + "\n\n" + l30_summary - - # ───── L1–L5 (standard factual summaries) ───── - else: - prompt = f""" - You are Lyra Intake, a background summarization module for an AI assistant. - - Your job is to compress recent chat exchanges between a user and an assistant - into a short, factual summary. The user's name is Brian, and the assistant's name is Lyra. - Focus only on the real conversation content. - Do NOT invent names, people, or examples. Avoid speculation or storytelling. - - Summarize clearly what topics were discussed and what conclusions were reached. - Avoid speculation, names, or bullet points. - - Exchanges: - {text} - - Summary: - """ - summary = query_llm(prompt) - - return f"[L{level} Summary of {len(exchanges)} exchanges]: {summary}" - -from datetime import datetime - -LOG_DIR = "/app/logs" -os.makedirs(LOG_DIR, exist_ok=True) - -def log_to_file(level: str, summary: str): - """Append each summary to a persistent .txt log file.""" - timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") - filename = os.path.join(LOG_DIR, "summaries.log") - with open(filename, "a", encoding="utf-8") as f: - f.write(f"[{timestamp}] {level}\n{summary}\n{'='*60}\n\n") - -# ─────────────────────────────────────────────── -# πŸ” Background summarization helper -# ─────────────────────────────────────────────── -def run_summarization_task(exchange, session_id): - """Async-friendly wrapper for slow summarization work.""" - try: - hopper = SESSIONS.get(session_id) - if not hopper: - print(f"⚠️ No hopper found for {session_id}") - return - - buffer = hopper["buffer"] - count = len(buffer) - summaries = {} - - if count < 30: - for lvl in LEVELS: - if lvl <= count: - s_text = summarize(list(buffer)[-lvl:], lvl) - log_to_file(f"L{lvl}", s_text) - push_summary_to_neomem(s_text, f"L{lvl}", session_id) - summaries[f"L{lvl}"] = s_text - else: - # optional: include your existing 30+ logic here - pass - - if summaries: - print(f"🧩 [BG] Summaries generated asynchronously at count={count}: {list(summaries.keys())}") - - except Exception as e: - print(f"πŸ’₯ [BG] Async summarization failed: {e}") - -# ─────────────────────────────────────────────── -# πŸ“¨ Routes -# ─────────────────────────────────────────────── -@app.post("/add_exchange") -def add_exchange(exchange: dict = Body(...), background_tasks: BackgroundTasks = None): - session_id = exchange.get("session_id") or f"sess-{uuid4().hex[:8]}" - exchange["session_id"] = session_id - - if session_id not in SESSIONS: - SESSIONS[session_id] = {"buffer": deque(maxlen=100), "last_update": datetime.now()} - print(f"πŸ†• Hopper created: {session_id}") - - hopper = SESSIONS[session_id] - hopper["buffer"].append(exchange) - hopper["last_update"] = datetime.now() - count = len(hopper["buffer"]) - - # πŸš€ queue background summarization - if background_tasks: - background_tasks.add_task(run_summarization_task, exchange, session_id) - print(f"⏩ Queued async summarization for {session_id}") - - return {"ok": True, "exchange_count": count, "queued": True} - - - # # ── Normal tiered behavior up to 30 ── commented out for aysnc addon - # if count < 30: - # if count in LEVELS: - # for lvl in LEVELS: - # if lvl <= count: - # summaries[f"L{lvl}"] = summarize(list(buffer)[-lvl:], lvl) - # log_to_file(f"L{lvl}", summaries[f"L{lvl}"]) - # push_summary_to_neomem(summaries[f"L{lvl}"], f"L{lvl}", session_id) - -# # πŸš€ Launch summarization in the background (non-blocking) -# if background_tasks: - # background_tasks.add_task(run_summarization_task, exchange, session_id) - # print(f"⏩ Queued async summarization for {session_id}") - - - # # ── Beyond 30: keep summarizing every +15 exchanges ── - # else: - # # Find next milestone after 30 (45, 60, 75, ...) - # milestone = 30 + ((count - 30) // 15) * 15 - # if count == milestone: - # summaries[f"L{milestone}"] = summarize(list(buffer)[-15:], milestone) - # log_to_file(f"L{milestone}", summaries[f"L{milestone}"]) - # push_summary_to_neomem(summaries[f"L{milestone}"], f"L{milestone}", session_id) - - # # Optional: merge all continuity summaries so far into a running meta-summary - # joined = "\n\n".join( - # [s for key, s in summaries.items() if key.startswith("L")] - # ) - # meta_prompt = f""" - # You are Lyra Intake composing an 'Ongoing Continuity Report' that merges - # all prior continuity summaries into one living narrative. - # Focus on major themes, changes, and lessons so far. - - # Continuity Summaries: - # {joined} - - # Ongoing Continuity Report: - # """ - # meta_summary = f"[L∞ Ongoing Continuity Report]: {query_llm(meta_prompt)}" - # summaries["L∞"] = meta_summary - # log_to_file("L∞", meta_summary) - # push_summary_to_neomem(meta_summary, "L∞", session_id) - - # print(f"πŸŒ€ L{milestone} continuity summary created (messages {count-14}-{count})") - - # # ── Log summaries ── - # if summaries: - # print(f"🧩 Summaries generated at count={count}: {list(summaries.keys())}") - - # return { - # "ok": True, - # "exchange_count": len(buffer), - # "queued": True - # } - -# ─────────────────────────────────────────────── -# Clear rubbish from hopper. -# ─────────────────────────────────────────────── -def close_session(session_id: str): - """Run a final summary for the given hopper, post it to NeoMem, then delete it.""" - hopper = SESSIONS.get(session_id) - if not hopper: - print(f"⚠️ No active hopper for {session_id}") - return - - buffer = hopper["buffer"] - if not buffer: - print(f"⚠️ Hopper {session_id} is empty, skipping closure") - del SESSIONS[session_id] - return - - try: - print(f"πŸ”’ Closing hopper {session_id} ({len(buffer)} exchanges)") - - # Summarize everything left in the buffer - final_summary = summarize(list(buffer), 30) # level 30 = continuity synthesis - log_to_file("LFinal", final_summary) - push_summary_to_neomem(final_summary, "LFinal", session_id) - - # Optionally: mark this as a special 'closure' memory - closure_note = f"[Session {session_id} closed with {len(buffer)} exchanges]" - push_summary_to_neomem(closure_note, "LFinalNote", session_id) - - print(f"🧹 Hopper {session_id} closed and deleted") - except Exception as e: - print(f"πŸ’₯ Error closing hopper {session_id}: {e}") - finally: - del SESSIONS[session_id] - -@app.post("/close_session/{session_id}") -def close_session_endpoint(session_id: str): - close_session(session_id) - return {"ok": True, "closed": session_id} - -# ─────────────────────────────────────────────── -# 🧾 Provide recent summary for Cortex /reason calls -# ─────────────────────────────────────────────── -@app.get("/summaries") -def get_summary(session_id: str = Query(..., description="Active session ID")): - """ - Return the most recent summary (L10β†’L30β†’LFinal) for a given session. - If none exist yet, return a placeholder summary. - """ - try: - # Find the most recent file entry in summaries.log - log_path = os.path.join(LOG_DIR, "summaries.log") - if not os.path.exists(log_path): - return { - "summary_text": "(none)", - "last_message_ts": datetime.now().isoformat(), - "session_id": session_id, - "exchange_count": 0, - } - - with open(log_path, "r", encoding="utf-8") as f: - lines = f.readlines() - - # Grab the last summary section that mentions this session_id - recent_lines = [ln for ln in lines if session_id in ln or ln.startswith("[L")] - if recent_lines: - # Find the last non-empty summary text - snippet = "".join(recent_lines[-8:]).strip() - else: - snippet = "(no summaries yet)" - - return { - "summary_text": snippet[-1000:], # truncate to avoid huge block - "last_message_ts": datetime.now().isoformat(), - "session_id": session_id, - "exchange_count": len(SESSIONS.get(session_id, {}).get("buffer", [])), - } - - except Exception as e: - print(f"⚠️ /summaries failed for {session_id}: {e}") - return { - "summary_text": f"(error fetching summaries: {e})", - "last_message_ts": datetime.now().isoformat(), - "session_id": session_id, - "exchange_count": 0, - } - -# ─────────────────────────────────────────────── -# βœ… Health check -# ─────────────────────────────────────────────── -@app.get("/health") -def health(): - return {"ok": True, "model": SUMMARY_MODEL, "url": SUMMARY_URL} diff --git a/intake/requirements.txt b/intake/requirements.txt deleted file mode 100644 index 94b7518..0000000 --- a/intake/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -fastapi==0.115.8 -uvicorn==0.34.0 -requests==2.32.3 -python-dotenv==1.0.1 diff --git a/intake/venv/bin/python b/intake/venv/bin/python deleted file mode 120000 index b8a0adb..0000000 --- a/intake/venv/bin/python +++ /dev/null @@ -1 +0,0 @@ -python3 \ No newline at end of file diff --git a/intake/venv/bin/python3 b/intake/venv/bin/python3 deleted file mode 120000 index ae65fda..0000000 --- a/intake/venv/bin/python3 +++ /dev/null @@ -1 +0,0 @@ -/usr/bin/python3 \ No newline at end of file diff --git a/intake/venv/bin/python3.10 b/intake/venv/bin/python3.10 deleted file mode 120000 index b8a0adb..0000000 --- a/intake/venv/bin/python3.10 +++ /dev/null @@ -1 +0,0 @@ -python3 \ No newline at end of file diff --git a/intake/venv/lib64 b/intake/venv/lib64 deleted file mode 120000 index 7951405..0000000 --- a/intake/venv/lib64 +++ /dev/null @@ -1 +0,0 @@ -lib \ No newline at end of file diff --git a/intake/venv/pyvenv.cfg b/intake/venv/pyvenv.cfg deleted file mode 100644 index 0537ffc..0000000 --- a/intake/venv/pyvenv.cfg +++ /dev/null @@ -1,3 +0,0 @@ -home = /usr/bin -include-system-site-packages = false -version = 3.10.12 diff --git a/neomem/neomem/vector_stores/qdrant.py b/neomem/neomem/vector_stores/qdrant.py index 59ee9a9..456da2e 100644 --- a/neomem/neomem/vector_stores/qdrant.py +++ b/neomem/neomem/vector_stores/qdrant.py @@ -1,7 +1,9 @@ import logging import os import shutil +from typing import Optional +from pydantic import BaseModel from qdrant_client import QdrantClient from qdrant_client.models import ( Distance, @@ -19,6 +21,13 @@ from mem0.vector_stores.base import VectorStoreBase logger = logging.getLogger(__name__) +class OutputData(BaseModel): + """Standard output format for vector search results.""" + id: Optional[str] + score: Optional[float] + payload: Optional[dict] + + class Qdrant(VectorStoreBase): def __init__( self, @@ -170,7 +179,7 @@ class Qdrant(VectorStoreBase): filters (dict, optional): Filters to apply to the search. Defaults to None. Returns: - list: Search results. + list: Search results wrapped in OutputData format. """ query_filter = self._create_filter(filters) if filters else None hits = self.client.query_points( @@ -179,7 +188,16 @@ class Qdrant(VectorStoreBase): query_filter=query_filter, limit=limit, ) - return hits.points + + # Wrap results in OutputData format to match other vector stores + return [ + OutputData( + id=str(hit.id), + score=hit.score, + payload=hit.payload + ) + for hit in hits.points + ] def delete(self, vector_id: int): """ @@ -207,7 +225,7 @@ class Qdrant(VectorStoreBase): point = PointStruct(id=vector_id, vector=vector, payload=payload) self.client.upsert(collection_name=self.collection_name, points=[point]) - def get(self, vector_id: int) -> dict: + def get(self, vector_id: int) -> OutputData: """ Retrieve a vector by ID. @@ -215,10 +233,17 @@ class Qdrant(VectorStoreBase): vector_id (int): ID of the vector to retrieve. Returns: - dict: Retrieved vector. + OutputData: Retrieved vector wrapped in OutputData format. """ result = self.client.retrieve(collection_name=self.collection_name, ids=[vector_id], with_payload=True) - return result[0] if result else None + if result: + hit = result[0] + return OutputData( + id=str(hit.id), + score=None, # No score for direct retrieval + payload=hit.payload + ) + return None def list_cols(self) -> list: """ @@ -251,7 +276,7 @@ class Qdrant(VectorStoreBase): limit (int, optional): Number of vectors to return. Defaults to 100. Returns: - list: List of vectors. + list: List of vectors wrapped in OutputData format. """ query_filter = self._create_filter(filters) if filters else None result = self.client.scroll( @@ -261,7 +286,18 @@ class Qdrant(VectorStoreBase): with_payload=True, with_vectors=False, ) - return result + + # Wrap results in OutputData format + # scroll() returns tuple: (points, next_page_offset) + points = result[0] if isinstance(result, tuple) else result + return [ + OutputData( + id=str(point.id), + score=None, # No score for list operation + payload=point.payload + ) + for point in points + ] def reset(self): """Reset the index by deleting and recreating it.""" diff --git a/neomem_history/history.db b/neomem_history/history.db deleted file mode 100644 index 398839c..0000000 Binary files a/neomem_history/history.db and /dev/null differ diff --git a/rag/.env.example b/rag/.env.example new file mode 100644 index 0000000..21bc0e6 --- /dev/null +++ b/rag/.env.example @@ -0,0 +1,11 @@ +# ==================================== +# πŸ“š RAG SERVICE CONFIG +# ==================================== +# Retrieval-Augmented Generation service (Beta Lyrae) +# Currently not wired into the system - for future activation +# OPENAI_API_KEY and other shared config inherited from root .env + +# RAG-specific configuration will go here when service is activated +# ChromaDB configuration +# Vector store settings +# Retrieval parameters diff --git a/sandbox/Dockerfile b/sandbox/Dockerfile new file mode 100644 index 0000000..e833834 --- /dev/null +++ b/sandbox/Dockerfile @@ -0,0 +1,30 @@ +FROM python:3.11-slim + +# Install runtime dependencies +RUN apt-get update && apt-get install -y \ + bash \ + coreutils \ + && rm -rf /var/lib/apt/lists/* + +# Install common Python packages for data analysis and computation +RUN pip install --no-cache-dir \ + numpy \ + pandas \ + requests \ + matplotlib \ + scipy + +# Create non-root user for security +RUN useradd -m -u 1000 sandbox + +# Create execution directory +RUN mkdir /executions && chown sandbox:sandbox /executions + +# Switch to non-root user +USER sandbox + +# Set working directory +WORKDIR /executions + +# Keep container running +CMD ["tail", "-f", "/dev/null"] diff --git a/test_ollama_parser.py b/test_ollama_parser.py new file mode 100644 index 0000000..917516d --- /dev/null +++ b/test_ollama_parser.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python3 +""" +Test OllamaAdapter XML parsing with real malformed examples. +""" + +import asyncio +import sys +sys.path.insert(0, '/home/serversdown/project-lyra/cortex') + +from autonomy.tools.adapters.ollama_adapter import OllamaAdapter + + +async def test_parser(): + adapter = OllamaAdapter() + + # Test cases with actual malformed XML we've seen + test_cases = [ + { + "name": "Malformed closing tag 1", + "xml": """ + execute_code + + python + print(50 / 2) + To calculate the result of dividing 50 by 2. + +""" + }, + { + "name": "Malformed closing tag 2", + "xml": """ + execute_code + + python + print(60 / 4) + + To calculate 60 divided by 4 using Python. + + python + result = 35 / 7; result + + To calculate the division of 35 by 7 using Python. +""" + }, + { + "name": "Correct XML", + "xml": """ + execute_code + + python + print(100 / 4) + Calculate division + +""" + }, + { + "name": "XML with surrounding text", + "xml": """Let me help you with that. + + + execute_code + + python + print(20 / 4) + Calculate the result + + + +The result will be shown above.""" + } + ] + + print("=" * 80) + print("Testing OllamaAdapter XML Parsing") + print("=" * 80) + + for test in test_cases: + print(f"\nTest: {test['name']}") + print("-" * 80) + print(f"Input XML:\n{test['xml'][:200]}{'...' if len(test['xml']) > 200 else ''}") + print("-" * 80) + + try: + result = await adapter.parse_response(test['xml']) + print(f"βœ… Parsed successfully!") + print(f" Content: {result.get('content', '')[:100]}") + print(f" Tool calls found: {len(result.get('tool_calls') or [])}") + + if result.get('tool_calls'): + for idx, tc in enumerate(result['tool_calls']): + print(f" Tool {idx + 1}: {tc.get('name')} with args: {tc.get('arguments')}") + except Exception as e: + print(f"❌ Error: {e}") + + print() + + +if __name__ == "__main__": + asyncio.run(test_parser()) diff --git a/test_thinking_stream.html b/test_thinking_stream.html new file mode 100644 index 0000000..991b587 --- /dev/null +++ b/test_thinking_stream.html @@ -0,0 +1,286 @@ + + + + + + Lyra - Show Your Work + + + +
+ +
+
πŸ’¬ Chat
+
+
+ + +
+
+ + +
+
🧠 Show Your Work
+
+
Not connected
+
+
+ + + + diff --git a/test_tools.py b/test_tools.py new file mode 100644 index 0000000..1ac1284 --- /dev/null +++ b/test_tools.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 +""" +Quick test script for tool calling system. +Tests the components before full endpoint integration. +""" + +import asyncio +import sys +import os + +# Add cortex to path +sys.path.insert(0, '/home/serversdown/project-lyra/cortex') + +# Set required env vars +os.environ['ENABLE_CODE_EXECUTION'] = 'true' +os.environ['ENABLE_WEB_SEARCH'] = 'true' +os.environ['CODE_SANDBOX_CONTAINER'] = 'lyra-code-sandbox' + +from autonomy.tools.registry import get_registry +from autonomy.tools.executors.code_executor import execute_code +from autonomy.tools.executors.web_search import search_web + + +async def test_code_executor(): + """Test code execution in sandbox.""" + print("\n=== Testing Code Executor ===") + + result = await execute_code({ + "language": "python", + "code": "print('Hello from sandbox!')\nprint(2 + 2)", + "reason": "Testing sandbox execution" + }) + + print(f"Result: {result}") + return result.get("stdout") == "Hello from sandbox!\n4\n" + + +async def test_web_search(): + """Test web search.""" + print("\n=== Testing Web Search ===") + + result = await search_web({ + "query": "Python programming", + "max_results": 3 + }) + + print(f"Found {result.get('count', 0)} results") + if result.get('results'): + print(f"First result: {result['results'][0]['title']}") + return result.get("count", 0) > 0 + + +async def test_registry(): + """Test tool registry.""" + print("\n=== Testing Tool Registry ===") + + registry = get_registry() + tools = registry.get_tool_definitions() + + print(f"Registered tools: {registry.get_tool_names()}") + print(f"Total tools: {len(tools) if tools else 0}") + + return len(tools or []) > 0 + + +async def main(): + print("πŸ§ͺ Tool System Component Tests\n") + + tests = [ + ("Tool Registry", test_registry), + ("Code Executor", test_code_executor), + ("Web Search", test_web_search), + ] + + results = {} + for name, test_func in tests: + try: + passed = await test_func() + results[name] = "βœ… PASS" if passed else "❌ FAIL" + except Exception as e: + results[name] = f"❌ ERROR: {str(e)}" + + print("\n" + "="*50) + print("Test Results:") + for name, result in results.items(): + print(f" {name}: {result}") + print("="*50) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/vllm-mi50.md b/vllm-mi50.md deleted file mode 100644 index c8f6fd4..0000000 --- a/vllm-mi50.md +++ /dev/null @@ -1,416 +0,0 @@ -Here you go β€” a **clean, polished, ready-to-drop-into-Trilium or GitHub** Markdown file. - -If you want, I can also auto-generate a matching `/docs/vllm-mi50/` folder structure and a mini-ToC. - ---- - -# **MI50 + vLLM + Proxmox LXC Setup Guide** - -### *End-to-End Field Manual for gfx906 LLM Serving* - -**Version:** 1.0 -**Last updated:** 2025-11-17 - ---- - -## **πŸ“Œ Overview** - -This guide documents how to run a **vLLM OpenAI-compatible server** on an -**AMD Instinct MI50 (gfx906)** inside a **Proxmox LXC container**, expose it over LAN, -and wire it into **Project Lyra's Cortex reasoning layer**. - -This file is long, specific, and intentionally leaves *nothing* out so you never have to rediscover ROCm pain rituals again. - ---- - -## **1. What This Stack Looks Like** - -``` -Proxmox Host - β”œβ”€ AMD Instinct MI50 (gfx906) - β”œβ”€ AMDGPU + ROCm stack - └─ LXC Container (CT 201: cortex-gpu) - β”œβ”€ Ubuntu 24.04 - β”œβ”€ Docker + docker compose - β”œβ”€ vLLM inside Docker (nalanzeyu/vllm-gfx906) - β”œβ”€ GPU passthrough via /dev/kfd + /dev/dri + PCI bind - └─ vLLM API exposed on :8000 -Lyra Cortex (VM/Server) - └─ LLM_PRIMARY_URL=http://10.0.0.43:8000 -``` - ---- - -## **2. Proxmox Host β€” GPU Setup** - -### **2.1 Confirm MI50 exists** - -```bash -lspci -nn | grep -i 'vega\|instinct\|radeon' -``` - -You should see something like: - -``` -0a:00.0 Display controller: AMD Instinct MI50 (gfx906) -``` - -### **2.2 Load AMDGPU driver** - -The main pitfall after **any host reboot**. - -```bash -modprobe amdgpu -``` - -If you skip this, the LXC container won't see the GPU. - ---- - -## **3. LXC Container Configuration (CT 201)** - -The container ID is **201**. -Config file is at: - -``` -/etc/pve/lxc/201.conf -``` - -### **3.1 Working 201.conf** - -Paste this *exact* version: - -```ini -arch: amd64 -cores: 4 -hostname: cortex-gpu -memory: 16384 -swap: 512 -ostype: ubuntu -onboot: 1 -startup: order=2,up=10,down=10 -net0: name=eth0,bridge=vmbr0,hwaddr=BC:24:11:C6:3E:88,ip=dhcp,type=veth -rootfs: local-lvm:vm-201-disk-0,size=200G -unprivileged: 0 - -# Docker in LXC requires this -features: keyctl=1,nesting=1 -lxc.apparmor.profile: unconfined -lxc.cap.drop: - -# --- GPU passthrough for ROCm (MI50) --- -lxc.mount.entry: /dev/kfd dev/kfd none bind,optional,create=file,mode=0666 -lxc.mount.entry: /dev/dri dev/dri none bind,optional,create=dir -lxc.mount.entry: /sys/class/drm sys/class/drm none bind,ro,optional,create=dir -lxc.mount.entry: /opt/rocm /opt/rocm none bind,ro,optional,create=dir - -# Bind the MI50 PCI device -lxc.mount.entry: /dev/bus/pci/0000:0a:00.0 dev/bus/pci/0000:0a:00.0 none bind,optional,create=file - -# Allow GPU-related character devices -lxc.cgroup2.devices.allow: c 226:* rwm -lxc.cgroup2.devices.allow: c 29:* rwm -lxc.cgroup2.devices.allow: c 189:* rwm -lxc.cgroup2.devices.allow: c 238:* rwm -lxc.cgroup2.devices.allow: c 241:* rwm -lxc.cgroup2.devices.allow: c 242:* rwm -lxc.cgroup2.devices.allow: c 243:* rwm -lxc.cgroup2.devices.allow: c 244:* rwm -lxc.cgroup2.devices.allow: c 245:* rwm -lxc.cgroup2.devices.allow: c 246:* rwm -lxc.cgroup2.devices.allow: c 247:* rwm -lxc.cgroup2.devices.allow: c 248:* rwm -lxc.cgroup2.devices.allow: c 249:* rwm -lxc.cgroup2.devices.allow: c 250:* rwm -lxc.cgroup2.devices.allow: c 510:0 rwm -``` - -### **3.2 Restart sequence** - -```bash -pct stop 201 -modprobe amdgpu -pct start 201 -pct enter 201 -``` - ---- - -## **4. Inside CT 201 β€” Verifying ROCm + GPU Visibility** - -### **4.1 Check device nodes** - -```bash -ls -l /dev/kfd -ls -l /dev/dri -ls -l /opt/rocm -``` - -All must exist. - -### **4.2 Validate GPU via rocminfo** - -```bash -/opt/rocm/bin/rocminfo | grep -i gfx -``` - -You need to see: - -``` -gfx906 -``` - -If you see **nothing**, the GPU isn’t passed through β€” restart and re-check the host steps. - ---- - -## **5. Install Docker in the LXC (Ubuntu 24.04)** - -This container runs Docker inside LXC (nesting enabled). - -```bash -apt update -apt install -y ca-certificates curl gnupg - -install -m 0755 -d /etc/apt/keyrings -curl -fsSL https://download.docker.com/linux/ubuntu/gpg \ - | gpg --dearmor -o /etc/apt/keyrings/docker.gpg -chmod a+r /etc/apt/keyrings/docker.gpg - -echo \ - "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] \ - https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo $VERSION_CODENAME) stable" \ - > /etc/apt/sources.list.d/docker.list - -apt update -apt install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -``` - -Check: - -```bash -docker --version -docker compose version -``` - ---- - -## **6. Running vLLM Inside CT 201 via Docker** - -### **6.1 Create directory** - -```bash -mkdir -p /root/vllm -cd /root/vllm -``` - -### **6.2 docker-compose.yml** - -Save this exact file as `/root/vllm/docker-compose.yml`: - -```yaml -version: "3.9" - -services: - vllm-mi50: - image: nalanzeyu/vllm-gfx906:latest - container_name: vllm-mi50 - restart: unless-stopped - ports: - - "8000:8000" - environment: - VLLM_ROLE: "APIServer" - VLLM_MODEL: "/model" - VLLM_LOGGING_LEVEL: "INFO" - command: > - vllm serve /model - --host 0.0.0.0 - --port 8000 - --dtype float16 - --max-model-len 4096 - --api-type openai - devices: - - "/dev/kfd:/dev/kfd" - - "/dev/dri:/dev/dri" - volumes: - - /opt/rocm:/opt/rocm:ro -``` - -### **6.3 Start vLLM** - -```bash -docker compose up -d -docker compose logs -f -``` - -When healthy, you’ll see: - -``` -(APIServer) Application startup complete. -``` - -and periodic throughput logs. - ---- - -## **7. Test vLLM API** - -### **7.1 From Proxmox host** - -```bash -curl -X POST http://10.0.0.43:8000/v1/completions \ - -H "Content-Type: application/json" \ - -d '{"model":"/model","prompt":"ping","max_tokens":5}' -``` - -Should respond like: - -```json -{"choices":[{"text":"-pong"}]} -``` - -### **7.2 From Cortex machine** - -```bash -curl -X POST http://10.0.0.43:8000/v1/completions \ - -H "Content-Type: application/json" \ - -d '{"model":"/model","prompt":"ping from cortex","max_tokens":5}' -``` - ---- - -## **8. Wiring into Lyra Cortex** - -In `cortex` container’s `docker-compose.yml`: - -```yaml -environment: - LLM_PRIMARY_URL: http://10.0.0.43:8000 -``` - -Not `/v1/completions` because the router appends that automatically. - -In `cortex/.env`: - -```env -LLM_FORCE_BACKEND=primary -LLM_MODEL=/model -``` - -Test: - -```bash -curl -X POST http://10.0.0.41:7081/reason \ - -H "Content-Type: application/json" \ - -d '{"prompt":"test vllm","session_id":"dev"}' -``` - -If you get a meaningful response: **Cortex β†’ vLLM is online**. - ---- - -## **9. Common Failure Modes (And Fixes)** - -### **9.1 β€œFailed to infer device type”** - -vLLM cannot see any ROCm devices. - -Fix: - -```bash -# On host -modprobe amdgpu -pct stop 201 -pct start 201 -# In container -/opt/rocm/bin/rocminfo | grep -i gfx -docker compose up -d -``` - -### **9.2 GPU disappears after reboot** - -Same fix: - -```bash -modprobe amdgpu -pct stop 201 -pct start 201 -``` - -### **9.3 Invalid image name** - -If you see pull errors: - -``` -pull access denied for nalanzeuy... -``` - -Use: - -``` -image: nalanzeyu/vllm-gfx906 -``` - -### **9.4 Double `/v1` in URL** - -Ensure: - -``` -LLM_PRIMARY_URL=http://10.0.0.43:8000 -``` - -Router appends `/v1/completions`. - ---- - -## **10. Daily / Reboot Ritual** - -### **On Proxmox host** - -```bash -modprobe amdgpu -pct stop 201 -pct start 201 -``` - -### **Inside CT 201** - -```bash -/opt/rocm/bin/rocminfo | grep -i gfx -cd /root/vllm -docker compose up -d -docker compose logs -f -``` - -### **Test API** - -```bash -curl -X POST http://10.0.0.43:8000/v1/completions \ - -H "Content-Type: application/json" \ - -d '{"model":"/model","prompt":"ping","max_tokens":5}' -``` - ---- - -## **11. Summary** - -You now have: - -* **MI50 (gfx906)** correctly passed into LXC -* **ROCm** inside the container via bind mounts -* **vLLM** running inside Docker in the LXC -* **OpenAI-compatible API** on port 8000 -* **Lyra Cortex** using it automatically as primary backend - -This is a complete, reproducible setup that survives reboots (with the modprobe ritual) and allows you to upgrade/replace models anytime. - ---- - -If you want, I can generate: - -* A `/docs/vllm-mi50/README.md` -* A "vLLM Gotchas" document -* A quick-reference cheat sheet -* A troubleshooting decision tree - -Just say the word.