diff --git a/backend/main.py b/backend/main.py index 963eca1..1c95686 100644 --- a/backend/main.py +++ b/backend/main.py @@ -9,7 +9,7 @@ from typing import List, Dict from pydantic import BaseModel from backend.database import engine, Base, get_db -from backend.routers import roster, units, photos, roster_edit, dashboard, dashboard_tabs +from backend.routers import roster, units, photos, roster_edit, dashboard, dashboard_tabs, activity from backend.services.snapshot import emit_status_snapshot from backend.models import IgnoredUnit @@ -67,6 +67,7 @@ app.include_router(photos.router) app.include_router(roster_edit.router) app.include_router(dashboard.router) app.include_router(dashboard_tabs.router) +app.include_router(activity.router) from backend.routers import settings app.include_router(settings.router) diff --git a/backend/routers/activity.py b/backend/routers/activity.py new file mode 100644 index 0000000..b881a8e --- /dev/null +++ b/backend/routers/activity.py @@ -0,0 +1,146 @@ +from fastapi import APIRouter, Depends +from sqlalchemy.orm import Session +from sqlalchemy import desc +from pathlib import Path +from datetime import datetime, timedelta, timezone +from typing import List, Dict, Any +from backend.database import get_db +from backend.models import UnitHistory, Emitter, RosterUnit + +router = APIRouter(prefix="/api", tags=["activity"]) + +PHOTOS_BASE_DIR = Path("data/photos") + + +@router.get("/recent-activity") +def get_recent_activity(limit: int = 20, db: Session = Depends(get_db)): + """ + Get recent activity feed combining unit history changes and photo uploads. + Returns a unified timeline of events sorted by timestamp (newest first). + """ + activities = [] + + # Get recent history entries + history_entries = db.query(UnitHistory)\ + .order_by(desc(UnitHistory.changed_at))\ + .limit(limit * 2)\ + .all() # Get more than needed to mix with photos + + for entry in history_entries: + activity = { + "type": "history", + "timestamp": entry.changed_at.isoformat(), + "timestamp_unix": entry.changed_at.timestamp(), + "unit_id": entry.unit_id, + "change_type": entry.change_type, + "field_name": entry.field_name, + "old_value": entry.old_value, + "new_value": entry.new_value, + "source": entry.source, + "notes": entry.notes + } + activities.append(activity) + + # Get recent photos + if PHOTOS_BASE_DIR.exists(): + image_extensions = {".jpg", ".jpeg", ".png", ".gif", ".webp"} + photo_activities = [] + + for unit_dir in PHOTOS_BASE_DIR.iterdir(): + if not unit_dir.is_dir(): + continue + + unit_id = unit_dir.name + + for file_path in unit_dir.iterdir(): + if file_path.is_file() and file_path.suffix.lower() in image_extensions: + modified_time = file_path.stat().st_mtime + photo_activities.append({ + "type": "photo", + "timestamp": datetime.fromtimestamp(modified_time).isoformat(), + "timestamp_unix": modified_time, + "unit_id": unit_id, + "filename": file_path.name, + "photo_url": f"/api/unit/{unit_id}/photo/{file_path.name}" + }) + + activities.extend(photo_activities) + + # Sort all activities by timestamp (newest first) + activities.sort(key=lambda x: x["timestamp_unix"], reverse=True) + + # Limit to requested number + activities = activities[:limit] + + return { + "activities": activities, + "total": len(activities) + } + + +@router.get("/recent-callins") +def get_recent_callins(hours: int = 6, limit: int = None, db: Session = Depends(get_db)): + """ + Get recent unit call-ins (units that have reported recently). + Returns units sorted by most recent last_seen timestamp. + + Args: + hours: Look back this many hours (default: 6) + limit: Maximum number of results (default: None = all) + """ + # Calculate the time threshold + time_threshold = datetime.now(timezone.utc) - timedelta(hours=hours) + + # Query emitters with recent activity, joined with roster info + recent_emitters = db.query(Emitter)\ + .filter(Emitter.last_seen >= time_threshold)\ + .order_by(desc(Emitter.last_seen))\ + .all() + + # Get roster info for all units + roster_dict = {r.id: r for r in db.query(RosterUnit).all()} + + call_ins = [] + for emitter in recent_emitters: + roster_unit = roster_dict.get(emitter.id) + + # Calculate time since last seen + last_seen_utc = emitter.last_seen.replace(tzinfo=timezone.utc) if emitter.last_seen.tzinfo is None else emitter.last_seen + time_diff = datetime.now(timezone.utc) - last_seen_utc + + # Format time ago + if time_diff.total_seconds() < 60: + time_ago = "just now" + elif time_diff.total_seconds() < 3600: + minutes = int(time_diff.total_seconds() / 60) + time_ago = f"{minutes}m ago" + else: + hours_ago = time_diff.total_seconds() / 3600 + if hours_ago < 24: + time_ago = f"{int(hours_ago)}h {int((hours_ago % 1) * 60)}m ago" + else: + days = int(hours_ago / 24) + time_ago = f"{days}d ago" + + call_in = { + "unit_id": emitter.id, + "last_seen": emitter.last_seen.isoformat(), + "time_ago": time_ago, + "status": emitter.status, + "device_type": roster_unit.device_type if roster_unit else "seismograph", + "deployed": roster_unit.deployed if roster_unit else False, + "note": roster_unit.note if roster_unit and roster_unit.note else "", + "location": roster_unit.address if roster_unit and roster_unit.address else (roster_unit.location if roster_unit else "") + } + call_ins.append(call_in) + + # Apply limit if specified + if limit: + call_ins = call_ins[:limit] + + return { + "call_ins": call_ins, + "total": len(call_ins), + "hours": hours, + "time_threshold": time_threshold.isoformat() + } diff --git a/backend/routers/settings.py b/backend/routers/settings.py index 1af0547..4cd0fb0 100644 --- a/backend/routers/settings.py +++ b/backend/routers/settings.py @@ -1,14 +1,17 @@ from fastapi import APIRouter, Depends, HTTPException, UploadFile, File -from fastapi.responses import StreamingResponse +from fastapi.responses import StreamingResponse, FileResponse from sqlalchemy.orm import Session from datetime import datetime, date from pydantic import BaseModel from typing import Optional import csv import io +import shutil +from pathlib import Path from backend.database import get_db from backend.models import RosterUnit, Emitter, IgnoredUnit, UserPreferences +from backend.services.database_backup import DatabaseBackupService router = APIRouter(prefix="/api/settings", tags=["settings"]) @@ -325,3 +328,144 @@ def update_preferences( "status_pending_threshold_hours": prefs.status_pending_threshold_hours, "updated_at": prefs.updated_at.isoformat() if prefs.updated_at else None } + + +# Database Management Endpoints + +backup_service = DatabaseBackupService() + + +@router.get("/database/stats") +def get_database_stats(): + """Get current database statistics""" + try: + stats = backup_service.get_database_stats() + return stats + except Exception as e: + raise HTTPException(status_code=500, detail=f"Failed to get database stats: {str(e)}") + + +@router.post("/database/snapshot") +def create_database_snapshot(description: Optional[str] = None): + """Create a full database snapshot""" + try: + snapshot = backup_service.create_snapshot(description=description) + return { + "message": "Snapshot created successfully", + "snapshot": snapshot + } + except Exception as e: + raise HTTPException(status_code=500, detail=f"Snapshot creation failed: {str(e)}") + + +@router.get("/database/snapshots") +def list_database_snapshots(): + """List all available database snapshots""" + try: + snapshots = backup_service.list_snapshots() + return { + "snapshots": snapshots, + "count": len(snapshots) + } + except Exception as e: + raise HTTPException(status_code=500, detail=f"Failed to list snapshots: {str(e)}") + + +@router.get("/database/snapshot/{filename}") +def download_snapshot(filename: str): + """Download a specific snapshot file""" + try: + snapshot_path = backup_service.download_snapshot(filename) + return FileResponse( + path=str(snapshot_path), + filename=filename, + media_type="application/x-sqlite3" + ) + except FileNotFoundError: + raise HTTPException(status_code=404, detail=f"Snapshot {filename} not found") + except Exception as e: + raise HTTPException(status_code=500, detail=f"Download failed: {str(e)}") + + +@router.delete("/database/snapshot/{filename}") +def delete_database_snapshot(filename: str): + """Delete a specific snapshot""" + try: + backup_service.delete_snapshot(filename) + return { + "message": f"Snapshot {filename} deleted successfully", + "filename": filename + } + except FileNotFoundError: + raise HTTPException(status_code=404, detail=f"Snapshot {filename} not found") + except Exception as e: + raise HTTPException(status_code=500, detail=f"Delete failed: {str(e)}") + + +class RestoreRequest(BaseModel): + """Schema for restore request""" + filename: str + create_backup: bool = True + + +@router.post("/database/restore") +def restore_database(request: RestoreRequest, db: Session = Depends(get_db)): + """Restore database from a snapshot""" + try: + # Close the database connection before restoring + db.close() + + result = backup_service.restore_snapshot( + filename=request.filename, + create_backup_before_restore=request.create_backup + ) + + return result + except FileNotFoundError: + raise HTTPException(status_code=404, detail=f"Snapshot {request.filename} not found") + except Exception as e: + raise HTTPException(status_code=500, detail=f"Restore failed: {str(e)}") + + +@router.post("/database/upload-snapshot") +async def upload_snapshot(file: UploadFile = File(...)): + """Upload a snapshot file to the backups directory""" + if not file.filename.endswith('.db'): + raise HTTPException(status_code=400, detail="File must be a .db file") + + try: + # Save uploaded file to backups directory + backups_dir = Path("./data/backups") + backups_dir.mkdir(parents=True, exist_ok=True) + + timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S") + uploaded_filename = f"snapshot_uploaded_{timestamp}.db" + file_path = backups_dir / uploaded_filename + + # Save file + with open(file_path, "wb") as buffer: + shutil.copyfileobj(file.file, buffer) + + # Create metadata + metadata = { + "filename": uploaded_filename, + "created_at": timestamp, + "created_at_iso": datetime.utcnow().isoformat(), + "description": f"Uploaded: {file.filename}", + "size_bytes": file_path.stat().st_size, + "size_mb": round(file_path.stat().st_size / (1024 * 1024), 2), + "type": "uploaded" + } + + metadata_path = backups_dir / f"{uploaded_filename}.meta.json" + import json + with open(metadata_path, 'w') as f: + json.dump(metadata, f, indent=2) + + return { + "message": "Snapshot uploaded successfully", + "snapshot": metadata + } + + except Exception as e: + raise HTTPException(status_code=500, detail=f"Upload failed: {str(e)}") diff --git a/backend/services/backup_scheduler.py b/backend/services/backup_scheduler.py new file mode 100644 index 0000000..15168cc --- /dev/null +++ b/backend/services/backup_scheduler.py @@ -0,0 +1,145 @@ +""" +Automatic Database Backup Scheduler +Handles scheduled automatic backups of the database +""" + +import schedule +import time +import threading +from datetime import datetime +from typing import Optional +import logging + +from backend.services.database_backup import DatabaseBackupService + +logger = logging.getLogger(__name__) + + +class BackupScheduler: + """Manages automatic database backups on a schedule""" + + def __init__(self, db_path: str = "./data/seismo_fleet.db", backups_dir: str = "./data/backups"): + self.backup_service = DatabaseBackupService(db_path=db_path, backups_dir=backups_dir) + self.scheduler_thread: Optional[threading.Thread] = None + self.is_running = False + + # Default settings + self.backup_interval_hours = 24 # Daily backups + self.keep_count = 10 # Keep last 10 backups + self.enabled = False + + def configure(self, interval_hours: int = 24, keep_count: int = 10, enabled: bool = True): + """ + Configure backup scheduler settings + + Args: + interval_hours: Hours between automatic backups + keep_count: Number of backups to retain + enabled: Whether automatic backups are enabled + """ + self.backup_interval_hours = interval_hours + self.keep_count = keep_count + self.enabled = enabled + + logger.info(f"Backup scheduler configured: interval={interval_hours}h, keep={keep_count}, enabled={enabled}") + + def create_automatic_backup(self): + """Create an automatic backup and cleanup old ones""" + if not self.enabled: + logger.info("Automatic backups are disabled, skipping") + return + + try: + timestamp = datetime.utcnow().strftime("%Y-%m-%d %H:%M UTC") + description = f"Automatic backup - {timestamp}" + + logger.info("Creating automatic backup...") + snapshot = self.backup_service.create_snapshot(description=description) + + logger.info(f"Automatic backup created: {snapshot['filename']} ({snapshot['size_mb']} MB)") + + # Cleanup old backups + cleanup_result = self.backup_service.cleanup_old_snapshots(keep_count=self.keep_count) + if cleanup_result['deleted'] > 0: + logger.info(f"Cleaned up {cleanup_result['deleted']} old snapshots") + + return snapshot + + except Exception as e: + logger.error(f"Automatic backup failed: {str(e)}") + return None + + def start(self): + """Start the backup scheduler in a background thread""" + if self.is_running: + logger.warning("Backup scheduler is already running") + return + + if not self.enabled: + logger.info("Backup scheduler is disabled, not starting") + return + + logger.info(f"Starting backup scheduler (every {self.backup_interval_hours} hours)") + + # Clear any existing scheduled jobs + schedule.clear() + + # Schedule the backup job + schedule.every(self.backup_interval_hours).hours.do(self.create_automatic_backup) + + # Also run immediately on startup + self.create_automatic_backup() + + # Start the scheduler thread + self.is_running = True + self.scheduler_thread = threading.Thread(target=self._run_scheduler, daemon=True) + self.scheduler_thread.start() + + logger.info("Backup scheduler started successfully") + + def _run_scheduler(self): + """Internal method to run the scheduler loop""" + while self.is_running: + schedule.run_pending() + time.sleep(60) # Check every minute + + def stop(self): + """Stop the backup scheduler""" + if not self.is_running: + logger.warning("Backup scheduler is not running") + return + + logger.info("Stopping backup scheduler...") + self.is_running = False + schedule.clear() + + if self.scheduler_thread: + self.scheduler_thread.join(timeout=5) + + logger.info("Backup scheduler stopped") + + def get_status(self) -> dict: + """Get current scheduler status""" + next_run = None + if self.is_running and schedule.jobs: + next_run = schedule.jobs[0].next_run.isoformat() if schedule.jobs[0].next_run else None + + return { + "enabled": self.enabled, + "running": self.is_running, + "interval_hours": self.backup_interval_hours, + "keep_count": self.keep_count, + "next_run": next_run + } + + +# Global scheduler instance +_scheduler_instance: Optional[BackupScheduler] = None + + +def get_backup_scheduler() -> BackupScheduler: + """Get or create the global backup scheduler instance""" + global _scheduler_instance + if _scheduler_instance is None: + _scheduler_instance = BackupScheduler() + return _scheduler_instance diff --git a/backend/services/database_backup.py b/backend/services/database_backup.py new file mode 100644 index 0000000..2858fd2 --- /dev/null +++ b/backend/services/database_backup.py @@ -0,0 +1,192 @@ +""" +Database Backup and Restore Service +Handles full database snapshots, restoration, and remote synchronization +""" + +import os +import shutil +import sqlite3 +from datetime import datetime +from pathlib import Path +from typing import List, Dict, Optional +import json + + +class DatabaseBackupService: + """Manages database backup operations""" + + def __init__(self, db_path: str = "./data/seismo_fleet.db", backups_dir: str = "./data/backups"): + self.db_path = Path(db_path) + self.backups_dir = Path(backups_dir) + self.backups_dir.mkdir(parents=True, exist_ok=True) + + def create_snapshot(self, description: Optional[str] = None) -> Dict: + """ + Create a full database snapshot using SQLite backup API + Returns snapshot metadata + """ + if not self.db_path.exists(): + raise FileNotFoundError(f"Database not found at {self.db_path}") + + # Generate snapshot filename with timestamp + timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S") + snapshot_name = f"snapshot_{timestamp}.db" + snapshot_path = self.backups_dir / snapshot_name + + # Get database size before backup + db_size = self.db_path.stat().st_size + + try: + # Use SQLite backup API for safe backup (handles concurrent access) + source_conn = sqlite3.connect(str(self.db_path)) + dest_conn = sqlite3.connect(str(snapshot_path)) + + # Perform the backup + with dest_conn: + source_conn.backup(dest_conn) + + source_conn.close() + dest_conn.close() + + # Create metadata + metadata = { + "filename": snapshot_name, + "created_at": timestamp, + "created_at_iso": datetime.utcnow().isoformat(), + "description": description or "Manual snapshot", + "size_bytes": snapshot_path.stat().st_size, + "size_mb": round(snapshot_path.stat().st_size / (1024 * 1024), 2), + "original_db_size_bytes": db_size, + "type": "manual" + } + + # Save metadata as JSON sidecar file + metadata_path = self.backups_dir / f"{snapshot_name}.meta.json" + with open(metadata_path, 'w') as f: + json.dump(metadata, f, indent=2) + + return metadata + + except Exception as e: + # Clean up partial snapshot if it exists + if snapshot_path.exists(): + snapshot_path.unlink() + raise Exception(f"Snapshot creation failed: {str(e)}") + + def list_snapshots(self) -> List[Dict]: + """ + List all available snapshots with metadata + Returns list sorted by creation date (newest first) + """ + snapshots = [] + + for db_file in sorted(self.backups_dir.glob("snapshot_*.db"), reverse=True): + metadata_file = self.backups_dir / f"{db_file.name}.meta.json" + + if metadata_file.exists(): + with open(metadata_file, 'r') as f: + metadata = json.load(f) + else: + # Fallback for legacy snapshots without metadata + stat_info = db_file.stat() + metadata = { + "filename": db_file.name, + "created_at": datetime.fromtimestamp(stat_info.st_mtime).strftime("%Y%m%d_%H%M%S"), + "created_at_iso": datetime.fromtimestamp(stat_info.st_mtime).isoformat(), + "description": "Legacy snapshot", + "size_bytes": stat_info.st_size, + "size_mb": round(stat_info.st_size / (1024 * 1024), 2), + "type": "manual" + } + + snapshots.append(metadata) + + return snapshots + + def delete_snapshot(self, filename: str) -> bool: + """Delete a snapshot and its metadata""" + snapshot_path = self.backups_dir / filename + metadata_path = self.backups_dir / f"{filename}.meta.json" + + if not snapshot_path.exists(): + raise FileNotFoundError(f"Snapshot {filename} not found") + + snapshot_path.unlink() + if metadata_path.exists(): + metadata_path.unlink() + + return True + + def restore_snapshot(self, filename: str, create_backup_before_restore: bool = True) -> Dict: + """ + Restore database from a snapshot + Creates a safety backup before restoring if requested + """ + snapshot_path = self.backups_dir / filename + + if not snapshot_path.exists(): + raise FileNotFoundError(f"Snapshot {filename} not found") + + if not self.db_path.exists(): + raise FileNotFoundError(f"Database not found at {self.db_path}") + + backup_info = None + + # Create safety backup before restore + if create_backup_before_restore: + backup_info = self.create_snapshot(description="Auto-backup before restore") + + try: + # Replace database file + shutil.copy2(str(snapshot_path), str(self.db_path)) + + return { + "message": "Database restored successfully", + "restored_from": filename, + "restored_at": datetime.utcnow().isoformat(), + "backup_created": backup_info["filename"] if backup_info else None + } + + except Exception as e: + raise Exception(f"Restore failed: {str(e)}") + + def get_database_stats(self) -> Dict: + """Get statistics about the current database""" + if not self.db_path.exists(): + return {"error": "Database not found"} + + conn = sqlite3.connect(str(self.db_path)) + cursor = conn.cursor() + + # Get table counts + cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_%'") + tables = cursor.fetchall() + + table_stats = {} + total_rows = 0 + + for (table_name,) in tables: + cursor.execute(f"SELECT COUNT(*) FROM {table_name}") + count = cursor.fetchone()[0] + table_stats[table_name] = count + total_rows += count + + conn.close() + + db_size = self.db_path.stat().st_size + + return { + "database_path": str(self.db_path), + "size_bytes": db_size, + "size_mb": round(db_size / (1024 * 1024), 2), + "total_rows": total_rows, + "tables": table_stats, + "last_modified": datetime.fromtimestamp(self.db_path.stat().st_mtime).isoformat() + } + + def download_snapshot(self, filename: str) -> Path: + """Get the file path for downloading a snapshot""" + snapshot_path = self.backups_dir / filename + if not snapshot_path.exists(): + raise FileNotFoundError(f"Snapshot {filename} not found") + return snapshot_path diff --git a/docs/DATABASE_MANAGEMENT.md b/docs/DATABASE_MANAGEMENT.md new file mode 100644 index 0000000..73e3246 --- /dev/null +++ b/docs/DATABASE_MANAGEMENT.md @@ -0,0 +1,477 @@ +# Database Management Guide + +This guide covers the comprehensive database management features available in the Seismo Fleet Manager, including manual snapshots, restoration, remote cloning, and automatic backups. + +## Table of Contents + +1. [Manual Database Snapshots](#manual-database-snapshots) +2. [Restore from Snapshot](#restore-from-snapshot) +3. [Download and Upload Snapshots](#download-and-upload-snapshots) +4. [Clone Database to Dev Server](#clone-database-to-dev-server) +5. [Automatic Backup Service](#automatic-backup-service) +6. [API Reference](#api-reference) + +--- + +## Manual Database Snapshots + +### Creating a Snapshot via UI + +1. Navigate to **Settings** → **Danger Zone** tab +2. Scroll to the **Database Management** section +3. Click **"Create Snapshot"** +4. Optionally enter a description +5. The snapshot will be created and appear in the "Available Snapshots" list + +### Creating a Snapshot via API + +```bash +curl -X POST http://localhost:8000/api/settings/database/snapshot \ + -H "Content-Type: application/json" \ + -d '{"description": "Pre-deployment backup"}' +``` + +### What Happens + +- A full copy of the SQLite database is created using the SQLite backup API +- The snapshot is stored in `./data/backups/` directory +- A metadata JSON file is created alongside the snapshot +- No downtime or interruption to the running application + +### Snapshot Files + +Snapshots are stored as: +- **Database file**: `snapshot_YYYYMMDD_HHMMSS.db` +- **Metadata file**: `snapshot_YYYYMMDD_HHMMSS.db.meta.json` + +Example: +``` +data/backups/ +├── snapshot_20250101_143022.db +├── snapshot_20250101_143022.db.meta.json +├── snapshot_20250102_080000.db +└── snapshot_20250102_080000.db.meta.json +``` + +--- + +## Restore from Snapshot + +### Restoring via UI + +1. Navigate to **Settings** → **Danger Zone** tab +2. In the **Available Snapshots** section, find the snapshot you want to restore +3. Click the **restore icon** (circular arrow) next to the snapshot +4. Confirm the restoration warning +5. A safety backup of the current database is automatically created +6. The database is replaced with the snapshot +7. The page reloads automatically + +### Restoring via API + +```bash +curl -X POST http://localhost:8000/api/settings/database/restore \ + -H "Content-Type: application/json" \ + -d '{ + "filename": "snapshot_20250101_143022.db", + "create_backup": true + }' +``` + +### Important Notes + +- **Always creates a safety backup** before restoring (unless explicitly disabled) +- **Application reload required** - Users should refresh their browsers +- **Atomic operation** - The entire database is replaced at once +- **Cannot be undone** - But you'll have the safety backup + +--- + +## Download and Upload Snapshots + +### Download a Snapshot + +**Via UI**: Click the download icon next to any snapshot in the list + +**Via Browser**: +``` +http://localhost:8000/api/settings/database/snapshot/snapshot_20250101_143022.db +``` + +**Via Command Line**: +```bash +curl -o backup.db http://localhost:8000/api/settings/database/snapshot/snapshot_20250101_143022.db +``` + +### Upload a Snapshot + +**Via UI**: +1. Navigate to **Settings** → **Danger Zone** tab +2. Find the **Upload Snapshot** section +3. Click **"Choose File"** and select a `.db` file +4. Click **"Upload Snapshot"** + +**Via Command Line**: +```bash +curl -X POST http://localhost:8000/api/settings/database/upload-snapshot \ + -F "file=@/path/to/your/backup.db" +``` + +--- + +## Clone Database to Dev Server + +The clone tool allows you to copy the production database to a remote development server over the network. + +### Prerequisites + +- Remote dev server must have the same Seismo Fleet Manager installation +- Network connectivity between production and dev servers +- Python 3 and `requests` library installed + +### Basic Usage + +```bash +# Clone current database to dev server +python3 scripts/clone_db_to_dev.py --url https://dev.example.com + +# Clone using existing snapshot +python3 scripts/clone_db_to_dev.py \ + --url https://dev.example.com \ + --snapshot snapshot_20250101_143022.db + +# Clone with authentication token +python3 scripts/clone_db_to_dev.py \ + --url https://dev.example.com \ + --token YOUR_AUTH_TOKEN +``` + +### What Happens + +1. Creates a snapshot of the production database (or uses existing one) +2. Uploads the snapshot to the remote dev server +3. Automatically restores the snapshot on the dev server +4. Creates a safety backup on the dev server before restoring + +### Remote Server Setup + +The remote dev server needs no special setup - it just needs to be running the same Seismo Fleet Manager application with the database management endpoints enabled. + +### Use Cases + +- **Testing**: Test changes against production data in a dev environment +- **Debugging**: Investigate production issues with real data safely +- **Training**: Provide realistic data for user training +- **Development**: Build new features with realistic data + +--- + +## Automatic Backup Service + +The automatic backup service runs scheduled backups in the background and manages backup retention. + +### Configuration + +The backup scheduler can be configured programmatically or via environment variables. + +**Programmatic Configuration**: + +```python +from backend.services.backup_scheduler import get_backup_scheduler + +scheduler = get_backup_scheduler() +scheduler.configure( + interval_hours=24, # Backup every 24 hours + keep_count=10, # Keep last 10 backups + enabled=True # Enable automatic backups +) +scheduler.start() +``` + +**Environment Variables** (add to your `.env` or deployment config): + +```bash +AUTO_BACKUP_ENABLED=true +AUTO_BACKUP_INTERVAL_HOURS=24 +AUTO_BACKUP_KEEP_COUNT=10 +``` + +### Integration with Application Startup + +Add to `backend/main.py`: + +```python +from backend.services.backup_scheduler import get_backup_scheduler + +@app.on_event("startup") +async def startup_event(): + # Start automatic backup scheduler + scheduler = get_backup_scheduler() + scheduler.configure( + interval_hours=24, # Daily backups + keep_count=10, # Keep 10 most recent + enabled=True + ) + scheduler.start() + +@app.on_event("shutdown") +async def shutdown_event(): + # Stop backup scheduler gracefully + scheduler = get_backup_scheduler() + scheduler.stop() +``` + +### Manual Control + +```python +from backend.services.backup_scheduler import get_backup_scheduler + +scheduler = get_backup_scheduler() + +# Get current status +status = scheduler.get_status() +print(status) +# {'enabled': True, 'running': True, 'interval_hours': 24, 'keep_count': 10, 'next_run': '2025-01-02T14:00:00'} + +# Create backup immediately +scheduler.create_automatic_backup() + +# Stop scheduler +scheduler.stop() + +# Start scheduler +scheduler.start() +``` + +### Backup Retention + +The scheduler automatically deletes old backups based on the `keep_count` setting. For example, if `keep_count=10`, only the 10 most recent backups are kept, and older ones are automatically deleted. + +--- + +## API Reference + +### Database Statistics + +```http +GET /api/settings/database/stats +``` + +Returns database size, row counts, and last modified time. + +**Response**: +```json +{ + "database_path": "./data/seismo_fleet.db", + "size_bytes": 1048576, + "size_mb": 1.0, + "total_rows": 1250, + "tables": { + "roster": 450, + "emitters": 600, + "ignored_units": 50, + "unit_history": 150 + }, + "last_modified": "2025-01-01T14:30:22" +} +``` + +### Create Snapshot + +```http +POST /api/settings/database/snapshot +Content-Type: application/json + +{ + "description": "Optional description" +} +``` + +**Response**: +```json +{ + "message": "Snapshot created successfully", + "snapshot": { + "filename": "snapshot_20250101_143022.db", + "created_at": "20250101_143022", + "created_at_iso": "2025-01-01T14:30:22", + "description": "Optional description", + "size_bytes": 1048576, + "size_mb": 1.0, + "type": "manual" + } +} +``` + +### List Snapshots + +```http +GET /api/settings/database/snapshots +``` + +**Response**: +```json +{ + "snapshots": [ + { + "filename": "snapshot_20250101_143022.db", + "created_at": "20250101_143022", + "created_at_iso": "2025-01-01T14:30:22", + "description": "Manual backup", + "size_mb": 1.0, + "type": "manual" + } + ], + "count": 1 +} +``` + +### Download Snapshot + +```http +GET /api/settings/database/snapshot/{filename} +``` + +Returns the snapshot file as a download. + +### Delete Snapshot + +```http +DELETE /api/settings/database/snapshot/{filename} +``` + +### Restore Database + +```http +POST /api/settings/database/restore +Content-Type: application/json + +{ + "filename": "snapshot_20250101_143022.db", + "create_backup": true +} +``` + +**Response**: +```json +{ + "message": "Database restored successfully", + "restored_from": "snapshot_20250101_143022.db", + "restored_at": "2025-01-01T15:00:00", + "backup_created": "snapshot_20250101_150000.db" +} +``` + +### Upload Snapshot + +```http +POST /api/settings/database/upload-snapshot +Content-Type: multipart/form-data + +file: +``` + +--- + +## Best Practices + +### 1. Regular Backups + +- **Enable automatic backups** with a 24-hour interval +- **Keep at least 7-10 backups** for historical coverage +- **Create manual snapshots** before major changes + +### 2. Before Major Operations + +Always create a snapshot before: +- Software upgrades +- Bulk data imports +- Database schema changes +- Testing destructive operations + +### 3. Testing Restores + +Periodically test your restore process: +1. Download a snapshot +2. Test restoration on a dev environment +3. Verify data integrity + +### 4. Off-Site Backups + +For production systems: +- **Download snapshots** to external storage regularly +- Use the clone tool to **sync to remote servers** +- Store backups in **multiple geographic locations** + +### 5. Snapshot Management + +- Delete old snapshots when no longer needed +- Use descriptive names/descriptions for manual snapshots +- Keep pre-deployment snapshots separate + +--- + +## Troubleshooting + +### Snapshot Creation Fails + +**Problem**: "Database is locked" error + +**Solution**: The database is being written to. Wait a moment and try again. The SQLite backup API handles most locking automatically. + +### Restore Doesn't Complete + +**Problem**: Restore appears to hang + +**Solution**: +- Check server logs for errors +- Ensure sufficient disk space +- Verify the snapshot file isn't corrupted + +### Upload Fails on Dev Server + +**Problem**: "Permission denied" or "File too large" + +**Solutions**: +- Check file upload size limits in your web server config (nginx/apache) +- Verify write permissions on `./data/backups/` directory +- Ensure sufficient disk space + +### Automatic Backups Not Running + +**Problem**: No automatic backups being created + +**Solutions**: +1. Check if scheduler is enabled: `scheduler.get_status()` +2. Check application logs for scheduler errors +3. Ensure `schedule` library is installed: `pip install schedule` +4. Verify scheduler was started in application startup + +--- + +## Security Considerations + +1. **Access Control**: Restrict access to the Settings → Danger Zone to administrators only +2. **Backup Storage**: Store backups in a secure location with proper permissions +3. **Remote Cloning**: Use authentication tokens when cloning to remote servers +4. **Data Sensitivity**: Remember that snapshots contain all database data - treat them with the same security as the live database + +--- + +## File Locations + +- **Database**: `./data/seismo_fleet.db` +- **Backups Directory**: `./data/backups/` +- **Clone Script**: `./scripts/clone_db_to_dev.py` +- **Backup Service**: `./backend/services/database_backup.py` +- **Scheduler Service**: `./backend/services/backup_scheduler.py` + +--- + +## Support + +For issues or questions: +1. Check application logs in `./logs/` +2. Review this documentation +3. Test with a small database first +4. Contact your system administrator diff --git a/scripts/clone_db_to_dev.py b/scripts/clone_db_to_dev.py new file mode 100755 index 0000000..e9394d8 --- /dev/null +++ b/scripts/clone_db_to_dev.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python3 +""" +Clone Production Database to Dev Server +Helper script to clone the production database to a remote development server +""" + +import argparse +import requests +from pathlib import Path +import sys + +# Add parent directory to path for imports +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from backend.services.database_backup import DatabaseBackupService + + +def clone_to_dev(remote_url: str, snapshot_filename: str = None, auth_token: str = None): + """Clone database to remote dev server""" + + backup_service = DatabaseBackupService() + + print(f"🔄 Cloning database to {remote_url}...") + + try: + # If no snapshot specified, create a new one + if snapshot_filename: + print(f"📦 Using existing snapshot: {snapshot_filename}") + snapshot_path = backup_service.backups_dir / snapshot_filename + if not snapshot_path.exists(): + print(f"❌ Error: Snapshot {snapshot_filename} not found") + return False + else: + print("📸 Creating new snapshot...") + snapshot_info = backup_service.create_snapshot(description="Clone to dev server") + snapshot_filename = snapshot_info["filename"] + snapshot_path = backup_service.backups_dir / snapshot_filename + print(f"✅ Snapshot created: {snapshot_filename} ({snapshot_info['size_mb']} MB)") + + # Upload to remote server + print(f"📤 Uploading to {remote_url}...") + + headers = {} + if auth_token: + headers["Authorization"] = f"Bearer {auth_token}" + + with open(snapshot_path, 'rb') as f: + files = {'file': (snapshot_filename, f, 'application/x-sqlite3')} + + response = requests.post( + f"{remote_url.rstrip('/')}/api/settings/database/upload-snapshot", + files=files, + headers=headers, + timeout=300 + ) + + response.raise_for_status() + result = response.json() + + print(f"✅ Upload successful!") + print(f" Remote filename: {result['snapshot']['filename']}") + print(f" Size: {result['snapshot']['size_mb']} MB") + + # Now restore on remote server + print("🔄 Restoring on remote server...") + restore_response = requests.post( + f"{remote_url.rstrip('/')}/api/settings/database/restore", + json={ + "filename": result['snapshot']['filename'], + "create_backup": True + }, + headers=headers, + timeout=60 + ) + + restore_response.raise_for_status() + restore_result = restore_response.json() + + print(f"✅ Database cloned successfully!") + print(f" Restored from: {restore_result['restored_from']}") + print(f" Remote backup created: {restore_result.get('backup_created', 'N/A')}") + + return True + + except requests.exceptions.RequestException as e: + print(f"❌ Network error: {str(e)}") + return False + except Exception as e: + print(f"❌ Error: {str(e)}") + return False + + +def main(): + parser = argparse.ArgumentParser( + description="Clone production database to development server", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Clone current database to dev server + python clone_db_to_dev.py --url https://dev.example.com + + # Clone using existing snapshot + python clone_db_to_dev.py --url https://dev.example.com --snapshot snapshot_20250101_120000.db + + # Clone with authentication + python clone_db_to_dev.py --url https://dev.example.com --token YOUR_TOKEN + """ + ) + + parser.add_argument( + '--url', + required=True, + help='Remote dev server URL (e.g., https://dev.example.com)' + ) + + parser.add_argument( + '--snapshot', + help='Use existing snapshot instead of creating new one' + ) + + parser.add_argument( + '--token', + help='Authentication token for remote server' + ) + + args = parser.parse_args() + + print("=" * 60) + print(" Database Cloning Tool - Production to Dev") + print("=" * 60) + print() + + success = clone_to_dev( + remote_url=args.url, + snapshot_filename=args.snapshot, + auth_token=args.token + ) + + print() + if success: + print("🎉 Cloning completed successfully!") + sys.exit(0) + else: + print("💥 Cloning failed") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/templates/dashboard.html b/templates/dashboard.html index 5bb86ec..6fdb27d 100644 --- a/templates/dashboard.html +++ b/templates/dashboard.html @@ -116,28 +116,28 @@ - -
-
-

Recent Photos

+ +
+
+

Recent Call-Ins

+ d="M12 8v4l3 3m6-3a9 9 0 11-18 0 9 9 0 0118 0z"> - +
-
- - - - -

No recent photos

+
+
+

Loading recent call-ins...

+
+
@@ -295,7 +295,7 @@ function toggleCard(cardName) { // Restore card states from localStorage on page load function restoreCardStates() { const cardStates = JSON.parse(localStorage.getItem('dashboardCardStates') || '{}'); - const cardNames = ['fleet-summary', 'recent-alerts', 'recent-photos', 'fleet-map', 'fleet-status']; + const cardNames = ['fleet-summary', 'recent-alerts', 'recent-callins', 'fleet-map', 'fleet-status']; cardNames.forEach(cardName => { const content = document.getElementById(`${cardName}-content`); @@ -531,6 +531,90 @@ async function loadRecentPhotos() { // Load recent photos on page load and refresh every 30 seconds loadRecentPhotos(); setInterval(loadRecentPhotos, 30000); + +// Load and display recent call-ins +let showingAllCallins = false; +const DEFAULT_CALLINS_DISPLAY = 5; + +async function loadRecentCallins() { + try { + const response = await fetch('/api/recent-callins?hours=6'); + if (!response.ok) { + throw new Error('Failed to load recent call-ins'); + } + + const data = await response.json(); + const callinsList = document.getElementById('recent-callins-list'); + const showAllButton = document.getElementById('show-all-callins'); + + if (data.call_ins && data.call_ins.length > 0) { + // Determine how many to show + const displayCount = showingAllCallins ? data.call_ins.length : Math.min(DEFAULT_CALLINS_DISPLAY, data.call_ins.length); + const callinsToDisplay = data.call_ins.slice(0, displayCount); + + // Build HTML for call-ins list + let html = ''; + callinsToDisplay.forEach(callin => { + // Status color + const statusColor = callin.status === 'OK' ? 'green' : callin.status === 'Pending' ? 'yellow' : 'red'; + const statusClass = callin.status === 'OK' ? 'bg-green-500' : callin.status === 'Pending' ? 'bg-yellow-500' : 'bg-red-500'; + + // Build location/note line + let subtitle = ''; + if (callin.location) { + subtitle = callin.location; + } else if (callin.note) { + subtitle = callin.note; + } + + html += ` +
+
+ +
+ + ${callin.unit_id} + + ${subtitle ? `

${subtitle}

` : ''} +
+
+ ${callin.time_ago} +
`; + }); + + callinsList.innerHTML = html; + + // Show/hide the "Show all" button + if (data.call_ins.length > DEFAULT_CALLINS_DISPLAY) { + showAllButton.classList.remove('hidden'); + showAllButton.textContent = showingAllCallins + ? `Show fewer (${DEFAULT_CALLINS_DISPLAY})` + : `Show all (${data.call_ins.length})`; + } else { + showAllButton.classList.add('hidden'); + } + } else { + callinsList.innerHTML = '

No units have called in within the past 6 hours

'; + showAllButton.classList.add('hidden'); + } + } catch (error) { + console.error('Error loading recent call-ins:', error); + document.getElementById('recent-callins-list').innerHTML = '

Failed to load recent call-ins

'; + } +} + +// Toggle show all/show fewer +document.addEventListener('DOMContentLoaded', function() { + const showAllButton = document.getElementById('show-all-callins'); + showAllButton.addEventListener('click', function() { + showingAllCallins = !showingAllCallins; + loadRecentCallins(); + }); +}); + +// Load recent call-ins on page load and refresh every 30 seconds +loadRecentCallins(); +setInterval(loadRecentCallins, 30000); {% endblock %} diff --git a/templates/settings.html b/templates/settings.html index 8602236..407f6c3 100644 --- a/templates/settings.html +++ b/templates/settings.html @@ -401,6 +401,99 @@
+ + +
+

Database Management

+

Create snapshots, restore backups, and manage database files

+
+ + +
+

Database Statistics

+
+
+
+ + +
+ + +
+
+
+

Create Database Snapshot

+

+ Create a full backup of the current database state +

+
+ +
+
+ + +
+
+

Available Snapshots

+ +
+ +
+
+
+ + + + +
+ + +
+

Upload Snapshot

+

+ Upload a database snapshot file from another server +

+
+ + +
+ +
@@ -1004,5 +1097,263 @@ async function confirmClearIgnored() { alert('❌ Error: ' + error.message); } } + +// ========== DATABASE MANAGEMENT ========== + +async function loadDatabaseStats() { + const loading = document.getElementById('dbStatsLoading'); + const content = document.getElementById('dbStatsContent'); + + try { + loading.classList.remove('hidden'); + content.classList.add('hidden'); + + const response = await fetch('/api/settings/database/stats'); + const stats = await response.json(); + + // Update stats display + document.getElementById('dbSize').textContent = stats.size_mb + ' MB'; + document.getElementById('dbRows').textContent = stats.total_rows.toLocaleString(); + + const lastMod = new Date(stats.last_modified); + document.getElementById('dbModified').textContent = lastMod.toLocaleDateString(); + + // Load snapshot count + const snapshotsResp = await fetch('/api/settings/database/snapshots'); + const snapshotsData = await snapshotsResp.json(); + document.getElementById('dbSnapshotCount').textContent = snapshotsData.count; + + loading.classList.add('hidden'); + content.classList.remove('hidden'); + } catch (error) { + loading.classList.add('hidden'); + alert('Error loading database stats: ' + error.message); + } +} + +async function createSnapshot() { + const description = prompt('Enter a description for this snapshot (optional):'); + + try { + const response = await fetch('/api/settings/database/snapshot', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ description: description || null }) + }); + + const result = await response.json(); + + if (response.ok) { + alert(`✅ Snapshot created successfully!\n\nFilename: ${result.snapshot.filename}\nSize: ${result.snapshot.size_mb} MB`); + loadSnapshots(); + loadDatabaseStats(); + } else { + alert('❌ Error: ' + (result.detail || 'Unknown error')); + } + } catch (error) { + alert('❌ Error: ' + error.message); + } +} + +async function loadSnapshots() { + const loading = document.getElementById('snapshotsLoading'); + const list = document.getElementById('snapshotsList'); + const empty = document.getElementById('snapshotsEmpty'); + + try { + loading.classList.remove('hidden'); + list.classList.add('hidden'); + empty.classList.add('hidden'); + + const response = await fetch('/api/settings/database/snapshots'); + const data = await response.json(); + + if (data.snapshots.length === 0) { + loading.classList.add('hidden'); + empty.classList.remove('hidden'); + return; + } + + list.innerHTML = data.snapshots.map(snapshot => createSnapshotCard(snapshot)).join(''); + + loading.classList.add('hidden'); + list.classList.remove('hidden'); + } catch (error) { + loading.classList.add('hidden'); + alert('Error loading snapshots: ' + error.message); + } +} + +function createSnapshotCard(snapshot) { + const createdDate = new Date(snapshot.created_at_iso); + const dateStr = createdDate.toLocaleString(); + + return ` +
+
+
+
+

${snapshot.filename}

+ + ${snapshot.type} + +
+

${snapshot.description}

+
+ 📅 ${dateStr} + 💾 ${snapshot.size_mb} MB +
+
+
+ + + +
+
+
+ `; +} + +function downloadSnapshot(filename) { + window.location.href = `/api/settings/database/snapshot/${filename}`; +} + +async function restoreSnapshot(filename) { + const confirmMsg = `⚠️ RESTORE DATABASE WARNING ⚠️ + +This will REPLACE the current database with snapshot: +${filename} + +A backup of the current database will be created automatically before restoring. + +THIS ACTION WILL RESTART THE APPLICATION! + +Continue?`; + + if (!confirm(confirmMsg)) { + return; + } + + try { + const response = await fetch('/api/settings/database/restore', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + filename: filename, + create_backup: true + }) + }); + + const result = await response.json(); + + if (response.ok) { + alert(`✅ Database restored successfully!\n\nRestored from: ${result.restored_from}\nBackup created: ${result.backup_created}\n\nThe page will now reload.`); + location.reload(); + } else { + alert('❌ Error: ' + (result.detail || 'Unknown error')); + } + } catch (error) { + alert('❌ Error: ' + error.message); + } +} + +async function deleteSnapshot(filename) { + if (!confirm(`Delete snapshot ${filename}?\n\nThis cannot be undone.`)) { + return; + } + + try { + const response = await fetch(`/api/settings/database/snapshot/${filename}`, { + method: 'DELETE' + }); + + const result = await response.json(); + + if (response.ok) { + alert(`✅ Snapshot deleted: ${filename}`); + loadSnapshots(); + loadDatabaseStats(); + } else { + alert('❌ Error: ' + (result.detail || 'Unknown error')); + } + } catch (error) { + alert('❌ Error: ' + error.message); + } +} + +// Upload snapshot form handler +document.getElementById('uploadSnapshotForm').addEventListener('submit', async function(e) { + e.preventDefault(); + + const fileInput = document.getElementById('snapshotFileInput'); + const resultDiv = document.getElementById('uploadResult'); + + if (!fileInput.files[0]) { + alert('Please select a file'); + return; + } + + const formData = new FormData(); + formData.append('file', fileInput.files[0]); + + try { + const response = await fetch('/api/settings/database/upload-snapshot', { + method: 'POST', + body: formData + }); + + const result = await response.json(); + + if (response.ok) { + resultDiv.className = 'mt-3 p-3 rounded-lg bg-green-100 dark:bg-green-900 text-green-800 dark:text-green-200'; + resultDiv.innerHTML = `✅ Uploaded: ${result.snapshot.filename} (${result.snapshot.size_mb} MB)`; + resultDiv.classList.remove('hidden'); + + fileInput.value = ''; + loadSnapshots(); + loadDatabaseStats(); + + setTimeout(() => { + resultDiv.classList.add('hidden'); + }, 5000); + } else { + resultDiv.className = 'mt-3 p-3 rounded-lg bg-red-100 dark:bg-red-900 text-red-800 dark:text-red-200'; + resultDiv.innerHTML = `❌ Error: ${result.detail || 'Unknown error'}`; + resultDiv.classList.remove('hidden'); + } + } catch (error) { + resultDiv.className = 'mt-3 p-3 rounded-lg bg-red-100 dark:bg-red-900 text-red-800 dark:text-red-200'; + resultDiv.innerHTML = `❌ Error: ${error.message}`; + resultDiv.classList.remove('hidden'); + } +}); + +// Load database stats and snapshots when danger zone tab is shown +const originalShowTab = showTab; +showTab = function(tabName) { + originalShowTab(tabName); + if (tabName === 'danger') { + loadDatabaseStats(); + loadSnapshots(); + } +}; {% endblock %}