chore: modular monolith folder split (no behavior change)
This commit is contained in:
0
app/seismo/services/__init__.py
Normal file
0
app/seismo/services/__init__.py
Normal file
145
app/seismo/services/backup_scheduler.py
Normal file
145
app/seismo/services/backup_scheduler.py
Normal file
@@ -0,0 +1,145 @@
|
||||
"""
|
||||
Automatic Database Backup Scheduler
|
||||
Handles scheduled automatic backups of the database
|
||||
"""
|
||||
|
||||
import schedule
|
||||
import time
|
||||
import threading
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
import logging
|
||||
|
||||
from backend.services.database_backup import DatabaseBackupService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BackupScheduler:
|
||||
"""Manages automatic database backups on a schedule"""
|
||||
|
||||
def __init__(self, db_path: str = "./data/seismo_fleet.db", backups_dir: str = "./data/backups"):
|
||||
self.backup_service = DatabaseBackupService(db_path=db_path, backups_dir=backups_dir)
|
||||
self.scheduler_thread: Optional[threading.Thread] = None
|
||||
self.is_running = False
|
||||
|
||||
# Default settings
|
||||
self.backup_interval_hours = 24 # Daily backups
|
||||
self.keep_count = 10 # Keep last 10 backups
|
||||
self.enabled = False
|
||||
|
||||
def configure(self, interval_hours: int = 24, keep_count: int = 10, enabled: bool = True):
|
||||
"""
|
||||
Configure backup scheduler settings
|
||||
|
||||
Args:
|
||||
interval_hours: Hours between automatic backups
|
||||
keep_count: Number of backups to retain
|
||||
enabled: Whether automatic backups are enabled
|
||||
"""
|
||||
self.backup_interval_hours = interval_hours
|
||||
self.keep_count = keep_count
|
||||
self.enabled = enabled
|
||||
|
||||
logger.info(f"Backup scheduler configured: interval={interval_hours}h, keep={keep_count}, enabled={enabled}")
|
||||
|
||||
def create_automatic_backup(self):
|
||||
"""Create an automatic backup and cleanup old ones"""
|
||||
if not self.enabled:
|
||||
logger.info("Automatic backups are disabled, skipping")
|
||||
return
|
||||
|
||||
try:
|
||||
timestamp = datetime.utcnow().strftime("%Y-%m-%d %H:%M UTC")
|
||||
description = f"Automatic backup - {timestamp}"
|
||||
|
||||
logger.info("Creating automatic backup...")
|
||||
snapshot = self.backup_service.create_snapshot(description=description)
|
||||
|
||||
logger.info(f"Automatic backup created: {snapshot['filename']} ({snapshot['size_mb']} MB)")
|
||||
|
||||
# Cleanup old backups
|
||||
cleanup_result = self.backup_service.cleanup_old_snapshots(keep_count=self.keep_count)
|
||||
if cleanup_result['deleted'] > 0:
|
||||
logger.info(f"Cleaned up {cleanup_result['deleted']} old snapshots")
|
||||
|
||||
return snapshot
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Automatic backup failed: {str(e)}")
|
||||
return None
|
||||
|
||||
def start(self):
|
||||
"""Start the backup scheduler in a background thread"""
|
||||
if self.is_running:
|
||||
logger.warning("Backup scheduler is already running")
|
||||
return
|
||||
|
||||
if not self.enabled:
|
||||
logger.info("Backup scheduler is disabled, not starting")
|
||||
return
|
||||
|
||||
logger.info(f"Starting backup scheduler (every {self.backup_interval_hours} hours)")
|
||||
|
||||
# Clear any existing scheduled jobs
|
||||
schedule.clear()
|
||||
|
||||
# Schedule the backup job
|
||||
schedule.every(self.backup_interval_hours).hours.do(self.create_automatic_backup)
|
||||
|
||||
# Also run immediately on startup
|
||||
self.create_automatic_backup()
|
||||
|
||||
# Start the scheduler thread
|
||||
self.is_running = True
|
||||
self.scheduler_thread = threading.Thread(target=self._run_scheduler, daemon=True)
|
||||
self.scheduler_thread.start()
|
||||
|
||||
logger.info("Backup scheduler started successfully")
|
||||
|
||||
def _run_scheduler(self):
|
||||
"""Internal method to run the scheduler loop"""
|
||||
while self.is_running:
|
||||
schedule.run_pending()
|
||||
time.sleep(60) # Check every minute
|
||||
|
||||
def stop(self):
|
||||
"""Stop the backup scheduler"""
|
||||
if not self.is_running:
|
||||
logger.warning("Backup scheduler is not running")
|
||||
return
|
||||
|
||||
logger.info("Stopping backup scheduler...")
|
||||
self.is_running = False
|
||||
schedule.clear()
|
||||
|
||||
if self.scheduler_thread:
|
||||
self.scheduler_thread.join(timeout=5)
|
||||
|
||||
logger.info("Backup scheduler stopped")
|
||||
|
||||
def get_status(self) -> dict:
|
||||
"""Get current scheduler status"""
|
||||
next_run = None
|
||||
if self.is_running and schedule.jobs:
|
||||
next_run = schedule.jobs[0].next_run.isoformat() if schedule.jobs[0].next_run else None
|
||||
|
||||
return {
|
||||
"enabled": self.enabled,
|
||||
"running": self.is_running,
|
||||
"interval_hours": self.backup_interval_hours,
|
||||
"keep_count": self.keep_count,
|
||||
"next_run": next_run
|
||||
}
|
||||
|
||||
|
||||
# Global scheduler instance
|
||||
_scheduler_instance: Optional[BackupScheduler] = None
|
||||
|
||||
|
||||
def get_backup_scheduler() -> BackupScheduler:
|
||||
"""Get or create the global backup scheduler instance"""
|
||||
global _scheduler_instance
|
||||
if _scheduler_instance is None:
|
||||
_scheduler_instance = BackupScheduler()
|
||||
return _scheduler_instance
|
||||
192
app/seismo/services/database_backup.py
Normal file
192
app/seismo/services/database_backup.py
Normal file
@@ -0,0 +1,192 @@
|
||||
"""
|
||||
Database Backup and Restore Service
|
||||
Handles full database snapshots, restoration, and remote synchronization
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import sqlite3
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Optional
|
||||
import json
|
||||
|
||||
|
||||
class DatabaseBackupService:
|
||||
"""Manages database backup operations"""
|
||||
|
||||
def __init__(self, db_path: str = "./data/seismo_fleet.db", backups_dir: str = "./data/backups"):
|
||||
self.db_path = Path(db_path)
|
||||
self.backups_dir = Path(backups_dir)
|
||||
self.backups_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def create_snapshot(self, description: Optional[str] = None) -> Dict:
|
||||
"""
|
||||
Create a full database snapshot using SQLite backup API
|
||||
Returns snapshot metadata
|
||||
"""
|
||||
if not self.db_path.exists():
|
||||
raise FileNotFoundError(f"Database not found at {self.db_path}")
|
||||
|
||||
# Generate snapshot filename with timestamp
|
||||
timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S")
|
||||
snapshot_name = f"snapshot_{timestamp}.db"
|
||||
snapshot_path = self.backups_dir / snapshot_name
|
||||
|
||||
# Get database size before backup
|
||||
db_size = self.db_path.stat().st_size
|
||||
|
||||
try:
|
||||
# Use SQLite backup API for safe backup (handles concurrent access)
|
||||
source_conn = sqlite3.connect(str(self.db_path))
|
||||
dest_conn = sqlite3.connect(str(snapshot_path))
|
||||
|
||||
# Perform the backup
|
||||
with dest_conn:
|
||||
source_conn.backup(dest_conn)
|
||||
|
||||
source_conn.close()
|
||||
dest_conn.close()
|
||||
|
||||
# Create metadata
|
||||
metadata = {
|
||||
"filename": snapshot_name,
|
||||
"created_at": timestamp,
|
||||
"created_at_iso": datetime.utcnow().isoformat(),
|
||||
"description": description or "Manual snapshot",
|
||||
"size_bytes": snapshot_path.stat().st_size,
|
||||
"size_mb": round(snapshot_path.stat().st_size / (1024 * 1024), 2),
|
||||
"original_db_size_bytes": db_size,
|
||||
"type": "manual"
|
||||
}
|
||||
|
||||
# Save metadata as JSON sidecar file
|
||||
metadata_path = self.backups_dir / f"{snapshot_name}.meta.json"
|
||||
with open(metadata_path, 'w') as f:
|
||||
json.dump(metadata, f, indent=2)
|
||||
|
||||
return metadata
|
||||
|
||||
except Exception as e:
|
||||
# Clean up partial snapshot if it exists
|
||||
if snapshot_path.exists():
|
||||
snapshot_path.unlink()
|
||||
raise Exception(f"Snapshot creation failed: {str(e)}")
|
||||
|
||||
def list_snapshots(self) -> List[Dict]:
|
||||
"""
|
||||
List all available snapshots with metadata
|
||||
Returns list sorted by creation date (newest first)
|
||||
"""
|
||||
snapshots = []
|
||||
|
||||
for db_file in sorted(self.backups_dir.glob("snapshot_*.db"), reverse=True):
|
||||
metadata_file = self.backups_dir / f"{db_file.name}.meta.json"
|
||||
|
||||
if metadata_file.exists():
|
||||
with open(metadata_file, 'r') as f:
|
||||
metadata = json.load(f)
|
||||
else:
|
||||
# Fallback for legacy snapshots without metadata
|
||||
stat_info = db_file.stat()
|
||||
metadata = {
|
||||
"filename": db_file.name,
|
||||
"created_at": datetime.fromtimestamp(stat_info.st_mtime).strftime("%Y%m%d_%H%M%S"),
|
||||
"created_at_iso": datetime.fromtimestamp(stat_info.st_mtime).isoformat(),
|
||||
"description": "Legacy snapshot",
|
||||
"size_bytes": stat_info.st_size,
|
||||
"size_mb": round(stat_info.st_size / (1024 * 1024), 2),
|
||||
"type": "manual"
|
||||
}
|
||||
|
||||
snapshots.append(metadata)
|
||||
|
||||
return snapshots
|
||||
|
||||
def delete_snapshot(self, filename: str) -> bool:
|
||||
"""Delete a snapshot and its metadata"""
|
||||
snapshot_path = self.backups_dir / filename
|
||||
metadata_path = self.backups_dir / f"{filename}.meta.json"
|
||||
|
||||
if not snapshot_path.exists():
|
||||
raise FileNotFoundError(f"Snapshot {filename} not found")
|
||||
|
||||
snapshot_path.unlink()
|
||||
if metadata_path.exists():
|
||||
metadata_path.unlink()
|
||||
|
||||
return True
|
||||
|
||||
def restore_snapshot(self, filename: str, create_backup_before_restore: bool = True) -> Dict:
|
||||
"""
|
||||
Restore database from a snapshot
|
||||
Creates a safety backup before restoring if requested
|
||||
"""
|
||||
snapshot_path = self.backups_dir / filename
|
||||
|
||||
if not snapshot_path.exists():
|
||||
raise FileNotFoundError(f"Snapshot {filename} not found")
|
||||
|
||||
if not self.db_path.exists():
|
||||
raise FileNotFoundError(f"Database not found at {self.db_path}")
|
||||
|
||||
backup_info = None
|
||||
|
||||
# Create safety backup before restore
|
||||
if create_backup_before_restore:
|
||||
backup_info = self.create_snapshot(description="Auto-backup before restore")
|
||||
|
||||
try:
|
||||
# Replace database file
|
||||
shutil.copy2(str(snapshot_path), str(self.db_path))
|
||||
|
||||
return {
|
||||
"message": "Database restored successfully",
|
||||
"restored_from": filename,
|
||||
"restored_at": datetime.utcnow().isoformat(),
|
||||
"backup_created": backup_info["filename"] if backup_info else None
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise Exception(f"Restore failed: {str(e)}")
|
||||
|
||||
def get_database_stats(self) -> Dict:
|
||||
"""Get statistics about the current database"""
|
||||
if not self.db_path.exists():
|
||||
return {"error": "Database not found"}
|
||||
|
||||
conn = sqlite3.connect(str(self.db_path))
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Get table counts
|
||||
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_%'")
|
||||
tables = cursor.fetchall()
|
||||
|
||||
table_stats = {}
|
||||
total_rows = 0
|
||||
|
||||
for (table_name,) in tables:
|
||||
cursor.execute(f"SELECT COUNT(*) FROM {table_name}")
|
||||
count = cursor.fetchone()[0]
|
||||
table_stats[table_name] = count
|
||||
total_rows += count
|
||||
|
||||
conn.close()
|
||||
|
||||
db_size = self.db_path.stat().st_size
|
||||
|
||||
return {
|
||||
"database_path": str(self.db_path),
|
||||
"size_bytes": db_size,
|
||||
"size_mb": round(db_size / (1024 * 1024), 2),
|
||||
"total_rows": total_rows,
|
||||
"tables": table_stats,
|
||||
"last_modified": datetime.fromtimestamp(self.db_path.stat().st_mtime).isoformat()
|
||||
}
|
||||
|
||||
def download_snapshot(self, filename: str) -> Path:
|
||||
"""Get the file path for downloading a snapshot"""
|
||||
snapshot_path = self.backups_dir / filename
|
||||
if not snapshot_path.exists():
|
||||
raise FileNotFoundError(f"Snapshot {filename} not found")
|
||||
return snapshot_path
|
||||
191
app/seismo/services/snapshot.py
Normal file
191
app/seismo/services/snapshot.py
Normal file
@@ -0,0 +1,191 @@
|
||||
from datetime import datetime, timezone
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from backend.database import get_db_session
|
||||
from backend.models import Emitter, RosterUnit, IgnoredUnit
|
||||
|
||||
|
||||
def ensure_utc(dt):
|
||||
if dt is None:
|
||||
return None
|
||||
if dt.tzinfo is None:
|
||||
return dt.replace(tzinfo=timezone.utc)
|
||||
return dt.astimezone(timezone.utc)
|
||||
|
||||
|
||||
def format_age(last_seen):
|
||||
if not last_seen:
|
||||
return "N/A"
|
||||
last_seen = ensure_utc(last_seen)
|
||||
now = datetime.now(timezone.utc)
|
||||
diff = now - last_seen
|
||||
hours = diff.total_seconds() // 3600
|
||||
mins = (diff.total_seconds() % 3600) // 60
|
||||
return f"{int(hours)}h {int(mins)}m"
|
||||
|
||||
|
||||
def calculate_status(last_seen, status_ok_threshold=12, status_pending_threshold=24):
|
||||
"""
|
||||
Calculate status based on how long ago the unit was last seen.
|
||||
|
||||
Args:
|
||||
last_seen: datetime of last seen (UTC)
|
||||
status_ok_threshold: hours before status becomes Pending (default 12)
|
||||
status_pending_threshold: hours before status becomes Missing (default 24)
|
||||
|
||||
Returns:
|
||||
"OK", "Pending", or "Missing"
|
||||
"""
|
||||
if not last_seen:
|
||||
return "Missing"
|
||||
|
||||
last_seen = ensure_utc(last_seen)
|
||||
now = datetime.now(timezone.utc)
|
||||
hours_ago = (now - last_seen).total_seconds() / 3600
|
||||
|
||||
if hours_ago > status_pending_threshold:
|
||||
return "Missing"
|
||||
elif hours_ago > status_ok_threshold:
|
||||
return "Pending"
|
||||
else:
|
||||
return "OK"
|
||||
|
||||
|
||||
def emit_status_snapshot():
|
||||
"""
|
||||
Merge roster (what we *intend*) with emitter data (what is *actually happening*).
|
||||
Status is recalculated based on current time to ensure accuracy.
|
||||
"""
|
||||
|
||||
db = get_db_session()
|
||||
try:
|
||||
# Get user preferences for status thresholds
|
||||
from backend.models import UserPreferences
|
||||
prefs = db.query(UserPreferences).filter_by(id=1).first()
|
||||
status_ok_threshold = prefs.status_ok_threshold_hours if prefs else 12
|
||||
status_pending_threshold = prefs.status_pending_threshold_hours if prefs else 24
|
||||
|
||||
roster = {r.id: r for r in db.query(RosterUnit).all()}
|
||||
emitters = {e.id: e for e in db.query(Emitter).all()}
|
||||
ignored = {i.id for i in db.query(IgnoredUnit).all()}
|
||||
|
||||
units = {}
|
||||
|
||||
# --- Merge roster entries first ---
|
||||
for unit_id, r in roster.items():
|
||||
e = emitters.get(unit_id)
|
||||
if r.retired:
|
||||
# Retired units get separated later
|
||||
status = "Retired"
|
||||
age = "N/A"
|
||||
last_seen = None
|
||||
fname = ""
|
||||
else:
|
||||
if e:
|
||||
last_seen = ensure_utc(e.last_seen)
|
||||
# RECALCULATE status based on current time, not stored value
|
||||
status = calculate_status(last_seen, status_ok_threshold, status_pending_threshold)
|
||||
age = format_age(last_seen)
|
||||
fname = e.last_file
|
||||
else:
|
||||
# Rostered but no emitter data
|
||||
status = "Missing"
|
||||
last_seen = None
|
||||
age = "N/A"
|
||||
fname = ""
|
||||
|
||||
units[unit_id] = {
|
||||
"id": unit_id,
|
||||
"status": status,
|
||||
"age": age,
|
||||
"last": last_seen.isoformat() if last_seen else None,
|
||||
"fname": fname,
|
||||
"deployed": r.deployed,
|
||||
"note": r.note or "",
|
||||
"retired": r.retired,
|
||||
# Device type and type-specific fields
|
||||
"device_type": r.device_type or "seismograph",
|
||||
"last_calibrated": r.last_calibrated.isoformat() if r.last_calibrated else None,
|
||||
"next_calibration_due": r.next_calibration_due.isoformat() if r.next_calibration_due else None,
|
||||
"deployed_with_modem_id": r.deployed_with_modem_id,
|
||||
"ip_address": r.ip_address,
|
||||
"phone_number": r.phone_number,
|
||||
"hardware_model": r.hardware_model,
|
||||
# Location for mapping
|
||||
"location": r.location or "",
|
||||
"address": r.address or "",
|
||||
"coordinates": r.coordinates or "",
|
||||
}
|
||||
|
||||
# --- Add unexpected emitter-only units ---
|
||||
for unit_id, e in emitters.items():
|
||||
if unit_id not in roster:
|
||||
last_seen = ensure_utc(e.last_seen)
|
||||
# RECALCULATE status for unknown units too
|
||||
status = calculate_status(last_seen, status_ok_threshold, status_pending_threshold)
|
||||
units[unit_id] = {
|
||||
"id": unit_id,
|
||||
"status": status,
|
||||
"age": format_age(last_seen),
|
||||
"last": last_seen.isoformat(),
|
||||
"fname": e.last_file,
|
||||
"deployed": False, # default
|
||||
"note": "",
|
||||
"retired": False,
|
||||
# Device type and type-specific fields (defaults for unknown units)
|
||||
"device_type": "seismograph", # default
|
||||
"last_calibrated": None,
|
||||
"next_calibration_due": None,
|
||||
"deployed_with_modem_id": None,
|
||||
"ip_address": None,
|
||||
"phone_number": None,
|
||||
"hardware_model": None,
|
||||
# Location fields
|
||||
"location": "",
|
||||
"address": "",
|
||||
"coordinates": "",
|
||||
}
|
||||
|
||||
# Separate buckets for UI
|
||||
active_units = {
|
||||
uid: u for uid, u in units.items()
|
||||
if not u["retired"] and u["deployed"] and uid not in ignored
|
||||
}
|
||||
|
||||
benched_units = {
|
||||
uid: u for uid, u in units.items()
|
||||
if not u["retired"] and not u["deployed"] and uid not in ignored
|
||||
}
|
||||
|
||||
retired_units = {
|
||||
uid: u for uid, u in units.items()
|
||||
if u["retired"]
|
||||
}
|
||||
|
||||
# Unknown units - emitters that aren't in the roster and aren't ignored
|
||||
unknown_units = {
|
||||
uid: u for uid, u in units.items()
|
||||
if uid not in roster and uid not in ignored
|
||||
}
|
||||
|
||||
return {
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"units": units,
|
||||
"active": active_units,
|
||||
"benched": benched_units,
|
||||
"retired": retired_units,
|
||||
"unknown": unknown_units,
|
||||
"summary": {
|
||||
"total": len(active_units) + len(benched_units),
|
||||
"active": len(active_units),
|
||||
"benched": len(benched_units),
|
||||
"retired": len(retired_units),
|
||||
"unknown": len(unknown_units),
|
||||
# Status counts only for deployed units (active_units)
|
||||
"ok": sum(1 for u in active_units.values() if u["status"] == "OK"),
|
||||
"pending": sum(1 for u in active_units.values() if u["status"] == "Pending"),
|
||||
"missing": sum(1 for u in active_units.values() if u["status"] == "Missing"),
|
||||
}
|
||||
}
|
||||
finally:
|
||||
db.close()
|
||||
Reference in New Issue
Block a user