db management system added
This commit is contained in:
@@ -9,7 +9,7 @@ from typing import List, Dict
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.database import engine, Base, get_db
|
||||
from backend.routers import roster, units, photos, roster_edit, dashboard, dashboard_tabs
|
||||
from backend.routers import roster, units, photos, roster_edit, dashboard, dashboard_tabs, activity
|
||||
from backend.services.snapshot import emit_status_snapshot
|
||||
from backend.models import IgnoredUnit
|
||||
|
||||
@@ -67,6 +67,7 @@ app.include_router(photos.router)
|
||||
app.include_router(roster_edit.router)
|
||||
app.include_router(dashboard.router)
|
||||
app.include_router(dashboard_tabs.router)
|
||||
app.include_router(activity.router)
|
||||
|
||||
from backend.routers import settings
|
||||
app.include_router(settings.router)
|
||||
|
||||
146
backend/routers/activity.py
Normal file
146
backend/routers/activity.py
Normal file
@@ -0,0 +1,146 @@
|
||||
from fastapi import APIRouter, Depends
|
||||
from sqlalchemy.orm import Session
|
||||
from sqlalchemy import desc
|
||||
from pathlib import Path
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import List, Dict, Any
|
||||
from backend.database import get_db
|
||||
from backend.models import UnitHistory, Emitter, RosterUnit
|
||||
|
||||
router = APIRouter(prefix="/api", tags=["activity"])
|
||||
|
||||
PHOTOS_BASE_DIR = Path("data/photos")
|
||||
|
||||
|
||||
@router.get("/recent-activity")
|
||||
def get_recent_activity(limit: int = 20, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Get recent activity feed combining unit history changes and photo uploads.
|
||||
Returns a unified timeline of events sorted by timestamp (newest first).
|
||||
"""
|
||||
activities = []
|
||||
|
||||
# Get recent history entries
|
||||
history_entries = db.query(UnitHistory)\
|
||||
.order_by(desc(UnitHistory.changed_at))\
|
||||
.limit(limit * 2)\
|
||||
.all() # Get more than needed to mix with photos
|
||||
|
||||
for entry in history_entries:
|
||||
activity = {
|
||||
"type": "history",
|
||||
"timestamp": entry.changed_at.isoformat(),
|
||||
"timestamp_unix": entry.changed_at.timestamp(),
|
||||
"unit_id": entry.unit_id,
|
||||
"change_type": entry.change_type,
|
||||
"field_name": entry.field_name,
|
||||
"old_value": entry.old_value,
|
||||
"new_value": entry.new_value,
|
||||
"source": entry.source,
|
||||
"notes": entry.notes
|
||||
}
|
||||
activities.append(activity)
|
||||
|
||||
# Get recent photos
|
||||
if PHOTOS_BASE_DIR.exists():
|
||||
image_extensions = {".jpg", ".jpeg", ".png", ".gif", ".webp"}
|
||||
photo_activities = []
|
||||
|
||||
for unit_dir in PHOTOS_BASE_DIR.iterdir():
|
||||
if not unit_dir.is_dir():
|
||||
continue
|
||||
|
||||
unit_id = unit_dir.name
|
||||
|
||||
for file_path in unit_dir.iterdir():
|
||||
if file_path.is_file() and file_path.suffix.lower() in image_extensions:
|
||||
modified_time = file_path.stat().st_mtime
|
||||
photo_activities.append({
|
||||
"type": "photo",
|
||||
"timestamp": datetime.fromtimestamp(modified_time).isoformat(),
|
||||
"timestamp_unix": modified_time,
|
||||
"unit_id": unit_id,
|
||||
"filename": file_path.name,
|
||||
"photo_url": f"/api/unit/{unit_id}/photo/{file_path.name}"
|
||||
})
|
||||
|
||||
activities.extend(photo_activities)
|
||||
|
||||
# Sort all activities by timestamp (newest first)
|
||||
activities.sort(key=lambda x: x["timestamp_unix"], reverse=True)
|
||||
|
||||
# Limit to requested number
|
||||
activities = activities[:limit]
|
||||
|
||||
return {
|
||||
"activities": activities,
|
||||
"total": len(activities)
|
||||
}
|
||||
|
||||
|
||||
@router.get("/recent-callins")
|
||||
def get_recent_callins(hours: int = 6, limit: int = None, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Get recent unit call-ins (units that have reported recently).
|
||||
Returns units sorted by most recent last_seen timestamp.
|
||||
|
||||
Args:
|
||||
hours: Look back this many hours (default: 6)
|
||||
limit: Maximum number of results (default: None = all)
|
||||
"""
|
||||
# Calculate the time threshold
|
||||
time_threshold = datetime.now(timezone.utc) - timedelta(hours=hours)
|
||||
|
||||
# Query emitters with recent activity, joined with roster info
|
||||
recent_emitters = db.query(Emitter)\
|
||||
.filter(Emitter.last_seen >= time_threshold)\
|
||||
.order_by(desc(Emitter.last_seen))\
|
||||
.all()
|
||||
|
||||
# Get roster info for all units
|
||||
roster_dict = {r.id: r for r in db.query(RosterUnit).all()}
|
||||
|
||||
call_ins = []
|
||||
for emitter in recent_emitters:
|
||||
roster_unit = roster_dict.get(emitter.id)
|
||||
|
||||
# Calculate time since last seen
|
||||
last_seen_utc = emitter.last_seen.replace(tzinfo=timezone.utc) if emitter.last_seen.tzinfo is None else emitter.last_seen
|
||||
time_diff = datetime.now(timezone.utc) - last_seen_utc
|
||||
|
||||
# Format time ago
|
||||
if time_diff.total_seconds() < 60:
|
||||
time_ago = "just now"
|
||||
elif time_diff.total_seconds() < 3600:
|
||||
minutes = int(time_diff.total_seconds() / 60)
|
||||
time_ago = f"{minutes}m ago"
|
||||
else:
|
||||
hours_ago = time_diff.total_seconds() / 3600
|
||||
if hours_ago < 24:
|
||||
time_ago = f"{int(hours_ago)}h {int((hours_ago % 1) * 60)}m ago"
|
||||
else:
|
||||
days = int(hours_ago / 24)
|
||||
time_ago = f"{days}d ago"
|
||||
|
||||
call_in = {
|
||||
"unit_id": emitter.id,
|
||||
"last_seen": emitter.last_seen.isoformat(),
|
||||
"time_ago": time_ago,
|
||||
"status": emitter.status,
|
||||
"device_type": roster_unit.device_type if roster_unit else "seismograph",
|
||||
"deployed": roster_unit.deployed if roster_unit else False,
|
||||
"note": roster_unit.note if roster_unit and roster_unit.note else "",
|
||||
"location": roster_unit.address if roster_unit and roster_unit.address else (roster_unit.location if roster_unit else "")
|
||||
}
|
||||
call_ins.append(call_in)
|
||||
|
||||
# Apply limit if specified
|
||||
if limit:
|
||||
call_ins = call_ins[:limit]
|
||||
|
||||
return {
|
||||
"call_ins": call_ins,
|
||||
"total": len(call_ins),
|
||||
"hours": hours,
|
||||
"time_threshold": time_threshold.isoformat()
|
||||
}
|
||||
@@ -1,14 +1,17 @@
|
||||
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File
|
||||
from fastapi.responses import StreamingResponse
|
||||
from fastapi.responses import StreamingResponse, FileResponse
|
||||
from sqlalchemy.orm import Session
|
||||
from datetime import datetime, date
|
||||
from pydantic import BaseModel
|
||||
from typing import Optional
|
||||
import csv
|
||||
import io
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
from backend.database import get_db
|
||||
from backend.models import RosterUnit, Emitter, IgnoredUnit, UserPreferences
|
||||
from backend.services.database_backup import DatabaseBackupService
|
||||
|
||||
router = APIRouter(prefix="/api/settings", tags=["settings"])
|
||||
|
||||
@@ -325,3 +328,144 @@ def update_preferences(
|
||||
"status_pending_threshold_hours": prefs.status_pending_threshold_hours,
|
||||
"updated_at": prefs.updated_at.isoformat() if prefs.updated_at else None
|
||||
}
|
||||
|
||||
|
||||
# Database Management Endpoints
|
||||
|
||||
backup_service = DatabaseBackupService()
|
||||
|
||||
|
||||
@router.get("/database/stats")
|
||||
def get_database_stats():
|
||||
"""Get current database statistics"""
|
||||
try:
|
||||
stats = backup_service.get_database_stats()
|
||||
return stats
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get database stats: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/database/snapshot")
|
||||
def create_database_snapshot(description: Optional[str] = None):
|
||||
"""Create a full database snapshot"""
|
||||
try:
|
||||
snapshot = backup_service.create_snapshot(description=description)
|
||||
return {
|
||||
"message": "Snapshot created successfully",
|
||||
"snapshot": snapshot
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Snapshot creation failed: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/database/snapshots")
|
||||
def list_database_snapshots():
|
||||
"""List all available database snapshots"""
|
||||
try:
|
||||
snapshots = backup_service.list_snapshots()
|
||||
return {
|
||||
"snapshots": snapshots,
|
||||
"count": len(snapshots)
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to list snapshots: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/database/snapshot/{filename}")
|
||||
def download_snapshot(filename: str):
|
||||
"""Download a specific snapshot file"""
|
||||
try:
|
||||
snapshot_path = backup_service.download_snapshot(filename)
|
||||
return FileResponse(
|
||||
path=str(snapshot_path),
|
||||
filename=filename,
|
||||
media_type="application/x-sqlite3"
|
||||
)
|
||||
except FileNotFoundError:
|
||||
raise HTTPException(status_code=404, detail=f"Snapshot {filename} not found")
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Download failed: {str(e)}")
|
||||
|
||||
|
||||
@router.delete("/database/snapshot/{filename}")
|
||||
def delete_database_snapshot(filename: str):
|
||||
"""Delete a specific snapshot"""
|
||||
try:
|
||||
backup_service.delete_snapshot(filename)
|
||||
return {
|
||||
"message": f"Snapshot {filename} deleted successfully",
|
||||
"filename": filename
|
||||
}
|
||||
except FileNotFoundError:
|
||||
raise HTTPException(status_code=404, detail=f"Snapshot {filename} not found")
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Delete failed: {str(e)}")
|
||||
|
||||
|
||||
class RestoreRequest(BaseModel):
|
||||
"""Schema for restore request"""
|
||||
filename: str
|
||||
create_backup: bool = True
|
||||
|
||||
|
||||
@router.post("/database/restore")
|
||||
def restore_database(request: RestoreRequest, db: Session = Depends(get_db)):
|
||||
"""Restore database from a snapshot"""
|
||||
try:
|
||||
# Close the database connection before restoring
|
||||
db.close()
|
||||
|
||||
result = backup_service.restore_snapshot(
|
||||
filename=request.filename,
|
||||
create_backup_before_restore=request.create_backup
|
||||
)
|
||||
|
||||
return result
|
||||
except FileNotFoundError:
|
||||
raise HTTPException(status_code=404, detail=f"Snapshot {request.filename} not found")
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Restore failed: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/database/upload-snapshot")
|
||||
async def upload_snapshot(file: UploadFile = File(...)):
|
||||
"""Upload a snapshot file to the backups directory"""
|
||||
if not file.filename.endswith('.db'):
|
||||
raise HTTPException(status_code=400, detail="File must be a .db file")
|
||||
|
||||
try:
|
||||
# Save uploaded file to backups directory
|
||||
backups_dir = Path("./data/backups")
|
||||
backups_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S")
|
||||
uploaded_filename = f"snapshot_uploaded_{timestamp}.db"
|
||||
file_path = backups_dir / uploaded_filename
|
||||
|
||||
# Save file
|
||||
with open(file_path, "wb") as buffer:
|
||||
shutil.copyfileobj(file.file, buffer)
|
||||
|
||||
# Create metadata
|
||||
metadata = {
|
||||
"filename": uploaded_filename,
|
||||
"created_at": timestamp,
|
||||
"created_at_iso": datetime.utcnow().isoformat(),
|
||||
"description": f"Uploaded: {file.filename}",
|
||||
"size_bytes": file_path.stat().st_size,
|
||||
"size_mb": round(file_path.stat().st_size / (1024 * 1024), 2),
|
||||
"type": "uploaded"
|
||||
}
|
||||
|
||||
metadata_path = backups_dir / f"{uploaded_filename}.meta.json"
|
||||
import json
|
||||
with open(metadata_path, 'w') as f:
|
||||
json.dump(metadata, f, indent=2)
|
||||
|
||||
return {
|
||||
"message": "Snapshot uploaded successfully",
|
||||
"snapshot": metadata
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Upload failed: {str(e)}")
|
||||
|
||||
145
backend/services/backup_scheduler.py
Normal file
145
backend/services/backup_scheduler.py
Normal file
@@ -0,0 +1,145 @@
|
||||
"""
|
||||
Automatic Database Backup Scheduler
|
||||
Handles scheduled automatic backups of the database
|
||||
"""
|
||||
|
||||
import schedule
|
||||
import time
|
||||
import threading
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
import logging
|
||||
|
||||
from backend.services.database_backup import DatabaseBackupService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BackupScheduler:
|
||||
"""Manages automatic database backups on a schedule"""
|
||||
|
||||
def __init__(self, db_path: str = "./data/seismo_fleet.db", backups_dir: str = "./data/backups"):
|
||||
self.backup_service = DatabaseBackupService(db_path=db_path, backups_dir=backups_dir)
|
||||
self.scheduler_thread: Optional[threading.Thread] = None
|
||||
self.is_running = False
|
||||
|
||||
# Default settings
|
||||
self.backup_interval_hours = 24 # Daily backups
|
||||
self.keep_count = 10 # Keep last 10 backups
|
||||
self.enabled = False
|
||||
|
||||
def configure(self, interval_hours: int = 24, keep_count: int = 10, enabled: bool = True):
|
||||
"""
|
||||
Configure backup scheduler settings
|
||||
|
||||
Args:
|
||||
interval_hours: Hours between automatic backups
|
||||
keep_count: Number of backups to retain
|
||||
enabled: Whether automatic backups are enabled
|
||||
"""
|
||||
self.backup_interval_hours = interval_hours
|
||||
self.keep_count = keep_count
|
||||
self.enabled = enabled
|
||||
|
||||
logger.info(f"Backup scheduler configured: interval={interval_hours}h, keep={keep_count}, enabled={enabled}")
|
||||
|
||||
def create_automatic_backup(self):
|
||||
"""Create an automatic backup and cleanup old ones"""
|
||||
if not self.enabled:
|
||||
logger.info("Automatic backups are disabled, skipping")
|
||||
return
|
||||
|
||||
try:
|
||||
timestamp = datetime.utcnow().strftime("%Y-%m-%d %H:%M UTC")
|
||||
description = f"Automatic backup - {timestamp}"
|
||||
|
||||
logger.info("Creating automatic backup...")
|
||||
snapshot = self.backup_service.create_snapshot(description=description)
|
||||
|
||||
logger.info(f"Automatic backup created: {snapshot['filename']} ({snapshot['size_mb']} MB)")
|
||||
|
||||
# Cleanup old backups
|
||||
cleanup_result = self.backup_service.cleanup_old_snapshots(keep_count=self.keep_count)
|
||||
if cleanup_result['deleted'] > 0:
|
||||
logger.info(f"Cleaned up {cleanup_result['deleted']} old snapshots")
|
||||
|
||||
return snapshot
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Automatic backup failed: {str(e)}")
|
||||
return None
|
||||
|
||||
def start(self):
|
||||
"""Start the backup scheduler in a background thread"""
|
||||
if self.is_running:
|
||||
logger.warning("Backup scheduler is already running")
|
||||
return
|
||||
|
||||
if not self.enabled:
|
||||
logger.info("Backup scheduler is disabled, not starting")
|
||||
return
|
||||
|
||||
logger.info(f"Starting backup scheduler (every {self.backup_interval_hours} hours)")
|
||||
|
||||
# Clear any existing scheduled jobs
|
||||
schedule.clear()
|
||||
|
||||
# Schedule the backup job
|
||||
schedule.every(self.backup_interval_hours).hours.do(self.create_automatic_backup)
|
||||
|
||||
# Also run immediately on startup
|
||||
self.create_automatic_backup()
|
||||
|
||||
# Start the scheduler thread
|
||||
self.is_running = True
|
||||
self.scheduler_thread = threading.Thread(target=self._run_scheduler, daemon=True)
|
||||
self.scheduler_thread.start()
|
||||
|
||||
logger.info("Backup scheduler started successfully")
|
||||
|
||||
def _run_scheduler(self):
|
||||
"""Internal method to run the scheduler loop"""
|
||||
while self.is_running:
|
||||
schedule.run_pending()
|
||||
time.sleep(60) # Check every minute
|
||||
|
||||
def stop(self):
|
||||
"""Stop the backup scheduler"""
|
||||
if not self.is_running:
|
||||
logger.warning("Backup scheduler is not running")
|
||||
return
|
||||
|
||||
logger.info("Stopping backup scheduler...")
|
||||
self.is_running = False
|
||||
schedule.clear()
|
||||
|
||||
if self.scheduler_thread:
|
||||
self.scheduler_thread.join(timeout=5)
|
||||
|
||||
logger.info("Backup scheduler stopped")
|
||||
|
||||
def get_status(self) -> dict:
|
||||
"""Get current scheduler status"""
|
||||
next_run = None
|
||||
if self.is_running and schedule.jobs:
|
||||
next_run = schedule.jobs[0].next_run.isoformat() if schedule.jobs[0].next_run else None
|
||||
|
||||
return {
|
||||
"enabled": self.enabled,
|
||||
"running": self.is_running,
|
||||
"interval_hours": self.backup_interval_hours,
|
||||
"keep_count": self.keep_count,
|
||||
"next_run": next_run
|
||||
}
|
||||
|
||||
|
||||
# Global scheduler instance
|
||||
_scheduler_instance: Optional[BackupScheduler] = None
|
||||
|
||||
|
||||
def get_backup_scheduler() -> BackupScheduler:
|
||||
"""Get or create the global backup scheduler instance"""
|
||||
global _scheduler_instance
|
||||
if _scheduler_instance is None:
|
||||
_scheduler_instance = BackupScheduler()
|
||||
return _scheduler_instance
|
||||
192
backend/services/database_backup.py
Normal file
192
backend/services/database_backup.py
Normal file
@@ -0,0 +1,192 @@
|
||||
"""
|
||||
Database Backup and Restore Service
|
||||
Handles full database snapshots, restoration, and remote synchronization
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import sqlite3
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Optional
|
||||
import json
|
||||
|
||||
|
||||
class DatabaseBackupService:
|
||||
"""Manages database backup operations"""
|
||||
|
||||
def __init__(self, db_path: str = "./data/seismo_fleet.db", backups_dir: str = "./data/backups"):
|
||||
self.db_path = Path(db_path)
|
||||
self.backups_dir = Path(backups_dir)
|
||||
self.backups_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def create_snapshot(self, description: Optional[str] = None) -> Dict:
|
||||
"""
|
||||
Create a full database snapshot using SQLite backup API
|
||||
Returns snapshot metadata
|
||||
"""
|
||||
if not self.db_path.exists():
|
||||
raise FileNotFoundError(f"Database not found at {self.db_path}")
|
||||
|
||||
# Generate snapshot filename with timestamp
|
||||
timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S")
|
||||
snapshot_name = f"snapshot_{timestamp}.db"
|
||||
snapshot_path = self.backups_dir / snapshot_name
|
||||
|
||||
# Get database size before backup
|
||||
db_size = self.db_path.stat().st_size
|
||||
|
||||
try:
|
||||
# Use SQLite backup API for safe backup (handles concurrent access)
|
||||
source_conn = sqlite3.connect(str(self.db_path))
|
||||
dest_conn = sqlite3.connect(str(snapshot_path))
|
||||
|
||||
# Perform the backup
|
||||
with dest_conn:
|
||||
source_conn.backup(dest_conn)
|
||||
|
||||
source_conn.close()
|
||||
dest_conn.close()
|
||||
|
||||
# Create metadata
|
||||
metadata = {
|
||||
"filename": snapshot_name,
|
||||
"created_at": timestamp,
|
||||
"created_at_iso": datetime.utcnow().isoformat(),
|
||||
"description": description or "Manual snapshot",
|
||||
"size_bytes": snapshot_path.stat().st_size,
|
||||
"size_mb": round(snapshot_path.stat().st_size / (1024 * 1024), 2),
|
||||
"original_db_size_bytes": db_size,
|
||||
"type": "manual"
|
||||
}
|
||||
|
||||
# Save metadata as JSON sidecar file
|
||||
metadata_path = self.backups_dir / f"{snapshot_name}.meta.json"
|
||||
with open(metadata_path, 'w') as f:
|
||||
json.dump(metadata, f, indent=2)
|
||||
|
||||
return metadata
|
||||
|
||||
except Exception as e:
|
||||
# Clean up partial snapshot if it exists
|
||||
if snapshot_path.exists():
|
||||
snapshot_path.unlink()
|
||||
raise Exception(f"Snapshot creation failed: {str(e)}")
|
||||
|
||||
def list_snapshots(self) -> List[Dict]:
|
||||
"""
|
||||
List all available snapshots with metadata
|
||||
Returns list sorted by creation date (newest first)
|
||||
"""
|
||||
snapshots = []
|
||||
|
||||
for db_file in sorted(self.backups_dir.glob("snapshot_*.db"), reverse=True):
|
||||
metadata_file = self.backups_dir / f"{db_file.name}.meta.json"
|
||||
|
||||
if metadata_file.exists():
|
||||
with open(metadata_file, 'r') as f:
|
||||
metadata = json.load(f)
|
||||
else:
|
||||
# Fallback for legacy snapshots without metadata
|
||||
stat_info = db_file.stat()
|
||||
metadata = {
|
||||
"filename": db_file.name,
|
||||
"created_at": datetime.fromtimestamp(stat_info.st_mtime).strftime("%Y%m%d_%H%M%S"),
|
||||
"created_at_iso": datetime.fromtimestamp(stat_info.st_mtime).isoformat(),
|
||||
"description": "Legacy snapshot",
|
||||
"size_bytes": stat_info.st_size,
|
||||
"size_mb": round(stat_info.st_size / (1024 * 1024), 2),
|
||||
"type": "manual"
|
||||
}
|
||||
|
||||
snapshots.append(metadata)
|
||||
|
||||
return snapshots
|
||||
|
||||
def delete_snapshot(self, filename: str) -> bool:
|
||||
"""Delete a snapshot and its metadata"""
|
||||
snapshot_path = self.backups_dir / filename
|
||||
metadata_path = self.backups_dir / f"{filename}.meta.json"
|
||||
|
||||
if not snapshot_path.exists():
|
||||
raise FileNotFoundError(f"Snapshot {filename} not found")
|
||||
|
||||
snapshot_path.unlink()
|
||||
if metadata_path.exists():
|
||||
metadata_path.unlink()
|
||||
|
||||
return True
|
||||
|
||||
def restore_snapshot(self, filename: str, create_backup_before_restore: bool = True) -> Dict:
|
||||
"""
|
||||
Restore database from a snapshot
|
||||
Creates a safety backup before restoring if requested
|
||||
"""
|
||||
snapshot_path = self.backups_dir / filename
|
||||
|
||||
if not snapshot_path.exists():
|
||||
raise FileNotFoundError(f"Snapshot {filename} not found")
|
||||
|
||||
if not self.db_path.exists():
|
||||
raise FileNotFoundError(f"Database not found at {self.db_path}")
|
||||
|
||||
backup_info = None
|
||||
|
||||
# Create safety backup before restore
|
||||
if create_backup_before_restore:
|
||||
backup_info = self.create_snapshot(description="Auto-backup before restore")
|
||||
|
||||
try:
|
||||
# Replace database file
|
||||
shutil.copy2(str(snapshot_path), str(self.db_path))
|
||||
|
||||
return {
|
||||
"message": "Database restored successfully",
|
||||
"restored_from": filename,
|
||||
"restored_at": datetime.utcnow().isoformat(),
|
||||
"backup_created": backup_info["filename"] if backup_info else None
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise Exception(f"Restore failed: {str(e)}")
|
||||
|
||||
def get_database_stats(self) -> Dict:
|
||||
"""Get statistics about the current database"""
|
||||
if not self.db_path.exists():
|
||||
return {"error": "Database not found"}
|
||||
|
||||
conn = sqlite3.connect(str(self.db_path))
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Get table counts
|
||||
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_%'")
|
||||
tables = cursor.fetchall()
|
||||
|
||||
table_stats = {}
|
||||
total_rows = 0
|
||||
|
||||
for (table_name,) in tables:
|
||||
cursor.execute(f"SELECT COUNT(*) FROM {table_name}")
|
||||
count = cursor.fetchone()[0]
|
||||
table_stats[table_name] = count
|
||||
total_rows += count
|
||||
|
||||
conn.close()
|
||||
|
||||
db_size = self.db_path.stat().st_size
|
||||
|
||||
return {
|
||||
"database_path": str(self.db_path),
|
||||
"size_bytes": db_size,
|
||||
"size_mb": round(db_size / (1024 * 1024), 2),
|
||||
"total_rows": total_rows,
|
||||
"tables": table_stats,
|
||||
"last_modified": datetime.fromtimestamp(self.db_path.stat().st_mtime).isoformat()
|
||||
}
|
||||
|
||||
def download_snapshot(self, filename: str) -> Path:
|
||||
"""Get the file path for downloading a snapshot"""
|
||||
snapshot_path = self.backups_dir / filename
|
||||
if not snapshot_path.exists():
|
||||
raise FileNotFoundError(f"Snapshot {filename} not found")
|
||||
return snapshot_path
|
||||
Reference in New Issue
Block a user