chore: modular monolith folder split (no behavior change)
This commit is contained in:
0
app/seismo/__init__.py
Normal file
0
app/seismo/__init__.py
Normal file
36
app/seismo/database.py
Normal file
36
app/seismo/database.py
Normal file
@@ -0,0 +1,36 @@
|
||||
"""
|
||||
Seismograph feature module database connection
|
||||
"""
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
import os
|
||||
|
||||
# Ensure data directory exists
|
||||
os.makedirs("data", exist_ok=True)
|
||||
|
||||
# For now, we'll use the old database (seismo_fleet.db) until we migrate
|
||||
# TODO: Migrate to seismo.db
|
||||
SQLALCHEMY_DATABASE_URL = "sqlite:///./data/seismo_fleet.db"
|
||||
|
||||
engine = create_engine(
|
||||
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
|
||||
)
|
||||
|
||||
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
|
||||
def get_db():
|
||||
"""Dependency for database sessions"""
|
||||
db = SessionLocal()
|
||||
try:
|
||||
yield db
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
|
||||
def get_db_session():
|
||||
"""Get a database session directly (not as a dependency)"""
|
||||
return SessionLocal()
|
||||
110
app/seismo/models.py
Normal file
110
app/seismo/models.py
Normal file
@@ -0,0 +1,110 @@
|
||||
from sqlalchemy import Column, String, DateTime, Boolean, Text, Date, Integer
|
||||
from datetime import datetime
|
||||
from backend.database import Base
|
||||
|
||||
|
||||
class Emitter(Base):
|
||||
__tablename__ = "emitters"
|
||||
|
||||
id = Column(String, primary_key=True, index=True)
|
||||
unit_type = Column(String, nullable=False)
|
||||
last_seen = Column(DateTime, default=datetime.utcnow)
|
||||
last_file = Column(String, nullable=False)
|
||||
status = Column(String, nullable=False)
|
||||
notes = Column(String, nullable=True)
|
||||
|
||||
|
||||
class RosterUnit(Base):
|
||||
"""
|
||||
Roster table: represents our *intended assignment* of a unit.
|
||||
This is editable from the GUI.
|
||||
|
||||
Supports multiple device types (seismograph, modem, sound_level_meter) with type-specific fields.
|
||||
"""
|
||||
__tablename__ = "roster"
|
||||
|
||||
# Core fields (all device types)
|
||||
id = Column(String, primary_key=True, index=True)
|
||||
unit_type = Column(String, default="series3") # Backward compatibility
|
||||
device_type = Column(String, default="seismograph") # "seismograph" | "modem" | "sound_level_meter"
|
||||
deployed = Column(Boolean, default=True)
|
||||
retired = Column(Boolean, default=False)
|
||||
note = Column(String, nullable=True)
|
||||
project_id = Column(String, nullable=True)
|
||||
location = Column(String, nullable=True) # Legacy field - use address/coordinates instead
|
||||
address = Column(String, nullable=True) # Human-readable address
|
||||
coordinates = Column(String, nullable=True) # Lat,Lon format: "34.0522,-118.2437"
|
||||
last_updated = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
# Seismograph-specific fields (nullable for modems and SLMs)
|
||||
last_calibrated = Column(Date, nullable=True)
|
||||
next_calibration_due = Column(Date, nullable=True)
|
||||
|
||||
# Modem assignment (shared by seismographs and SLMs)
|
||||
deployed_with_modem_id = Column(String, nullable=True) # FK to another RosterUnit (device_type=modem)
|
||||
|
||||
# Modem-specific fields (nullable for seismographs and SLMs)
|
||||
ip_address = Column(String, nullable=True)
|
||||
phone_number = Column(String, nullable=True)
|
||||
hardware_model = Column(String, nullable=True)
|
||||
|
||||
# Sound Level Meter-specific fields (nullable for seismographs and modems)
|
||||
slm_host = Column(String, nullable=True) # Device IP or hostname
|
||||
slm_tcp_port = Column(Integer, nullable=True) # TCP control port (default 2255)
|
||||
slm_ftp_port = Column(Integer, nullable=True) # FTP data retrieval port (default 21)
|
||||
slm_model = Column(String, nullable=True) # NL-43, NL-53, etc.
|
||||
slm_serial_number = Column(String, nullable=True) # Device serial number
|
||||
slm_frequency_weighting = Column(String, nullable=True) # A, C, Z
|
||||
slm_time_weighting = Column(String, nullable=True) # F (Fast), S (Slow), I (Impulse)
|
||||
slm_measurement_range = Column(String, nullable=True) # e.g., "30-130 dB"
|
||||
slm_last_check = Column(DateTime, nullable=True) # Last communication check
|
||||
|
||||
|
||||
class IgnoredUnit(Base):
|
||||
"""
|
||||
Ignored units: units that report but should be filtered out from unknown emitters.
|
||||
Used to suppress noise from old projects.
|
||||
"""
|
||||
__tablename__ = "ignored_units"
|
||||
|
||||
id = Column(String, primary_key=True, index=True)
|
||||
reason = Column(String, nullable=True)
|
||||
ignored_at = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
|
||||
class UnitHistory(Base):
|
||||
"""
|
||||
Unit history: complete timeline of changes to each unit.
|
||||
Tracks note changes, status changes, deployment/benched events, and more.
|
||||
"""
|
||||
__tablename__ = "unit_history"
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
unit_id = Column(String, nullable=False, index=True) # FK to RosterUnit.id
|
||||
change_type = Column(String, nullable=False) # note_change, deployed_change, retired_change, etc.
|
||||
field_name = Column(String, nullable=True) # Which field changed
|
||||
old_value = Column(Text, nullable=True) # Previous value
|
||||
new_value = Column(Text, nullable=True) # New value
|
||||
changed_at = Column(DateTime, default=datetime.utcnow, nullable=False, index=True)
|
||||
source = Column(String, default="manual") # manual, csv_import, telemetry, offline_sync
|
||||
notes = Column(Text, nullable=True) # Optional reason/context for the change
|
||||
|
||||
|
||||
class UserPreferences(Base):
|
||||
"""
|
||||
User preferences: persistent storage for application settings.
|
||||
Single-row table (id=1) to store global user preferences.
|
||||
"""
|
||||
__tablename__ = "user_preferences"
|
||||
|
||||
id = Column(Integer, primary_key=True, default=1)
|
||||
timezone = Column(String, default="America/New_York")
|
||||
theme = Column(String, default="auto") # auto, light, dark
|
||||
auto_refresh_interval = Column(Integer, default=10) # seconds
|
||||
date_format = Column(String, default="MM/DD/YYYY")
|
||||
table_rows_per_page = Column(Integer, default=25)
|
||||
calibration_interval_days = Column(Integer, default=365)
|
||||
calibration_warning_days = Column(Integer, default=30)
|
||||
status_ok_threshold_hours = Column(Integer, default=12)
|
||||
status_pending_threshold_hours = Column(Integer, default=24)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
0
app/seismo/routers/__init__.py
Normal file
0
app/seismo/routers/__init__.py
Normal file
146
app/seismo/routers/activity.py
Normal file
146
app/seismo/routers/activity.py
Normal file
@@ -0,0 +1,146 @@
|
||||
from fastapi import APIRouter, Depends
|
||||
from sqlalchemy.orm import Session
|
||||
from sqlalchemy import desc
|
||||
from pathlib import Path
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import List, Dict, Any
|
||||
from backend.database import get_db
|
||||
from backend.models import UnitHistory, Emitter, RosterUnit
|
||||
|
||||
router = APIRouter(prefix="/api", tags=["activity"])
|
||||
|
||||
PHOTOS_BASE_DIR = Path("data/photos")
|
||||
|
||||
|
||||
@router.get("/recent-activity")
|
||||
def get_recent_activity(limit: int = 20, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Get recent activity feed combining unit history changes and photo uploads.
|
||||
Returns a unified timeline of events sorted by timestamp (newest first).
|
||||
"""
|
||||
activities = []
|
||||
|
||||
# Get recent history entries
|
||||
history_entries = db.query(UnitHistory)\
|
||||
.order_by(desc(UnitHistory.changed_at))\
|
||||
.limit(limit * 2)\
|
||||
.all() # Get more than needed to mix with photos
|
||||
|
||||
for entry in history_entries:
|
||||
activity = {
|
||||
"type": "history",
|
||||
"timestamp": entry.changed_at.isoformat(),
|
||||
"timestamp_unix": entry.changed_at.timestamp(),
|
||||
"unit_id": entry.unit_id,
|
||||
"change_type": entry.change_type,
|
||||
"field_name": entry.field_name,
|
||||
"old_value": entry.old_value,
|
||||
"new_value": entry.new_value,
|
||||
"source": entry.source,
|
||||
"notes": entry.notes
|
||||
}
|
||||
activities.append(activity)
|
||||
|
||||
# Get recent photos
|
||||
if PHOTOS_BASE_DIR.exists():
|
||||
image_extensions = {".jpg", ".jpeg", ".png", ".gif", ".webp"}
|
||||
photo_activities = []
|
||||
|
||||
for unit_dir in PHOTOS_BASE_DIR.iterdir():
|
||||
if not unit_dir.is_dir():
|
||||
continue
|
||||
|
||||
unit_id = unit_dir.name
|
||||
|
||||
for file_path in unit_dir.iterdir():
|
||||
if file_path.is_file() and file_path.suffix.lower() in image_extensions:
|
||||
modified_time = file_path.stat().st_mtime
|
||||
photo_activities.append({
|
||||
"type": "photo",
|
||||
"timestamp": datetime.fromtimestamp(modified_time).isoformat(),
|
||||
"timestamp_unix": modified_time,
|
||||
"unit_id": unit_id,
|
||||
"filename": file_path.name,
|
||||
"photo_url": f"/api/unit/{unit_id}/photo/{file_path.name}"
|
||||
})
|
||||
|
||||
activities.extend(photo_activities)
|
||||
|
||||
# Sort all activities by timestamp (newest first)
|
||||
activities.sort(key=lambda x: x["timestamp_unix"], reverse=True)
|
||||
|
||||
# Limit to requested number
|
||||
activities = activities[:limit]
|
||||
|
||||
return {
|
||||
"activities": activities,
|
||||
"total": len(activities)
|
||||
}
|
||||
|
||||
|
||||
@router.get("/recent-callins")
|
||||
def get_recent_callins(hours: int = 6, limit: int = None, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Get recent unit call-ins (units that have reported recently).
|
||||
Returns units sorted by most recent last_seen timestamp.
|
||||
|
||||
Args:
|
||||
hours: Look back this many hours (default: 6)
|
||||
limit: Maximum number of results (default: None = all)
|
||||
"""
|
||||
# Calculate the time threshold
|
||||
time_threshold = datetime.now(timezone.utc) - timedelta(hours=hours)
|
||||
|
||||
# Query emitters with recent activity, joined with roster info
|
||||
recent_emitters = db.query(Emitter)\
|
||||
.filter(Emitter.last_seen >= time_threshold)\
|
||||
.order_by(desc(Emitter.last_seen))\
|
||||
.all()
|
||||
|
||||
# Get roster info for all units
|
||||
roster_dict = {r.id: r for r in db.query(RosterUnit).all()}
|
||||
|
||||
call_ins = []
|
||||
for emitter in recent_emitters:
|
||||
roster_unit = roster_dict.get(emitter.id)
|
||||
|
||||
# Calculate time since last seen
|
||||
last_seen_utc = emitter.last_seen.replace(tzinfo=timezone.utc) if emitter.last_seen.tzinfo is None else emitter.last_seen
|
||||
time_diff = datetime.now(timezone.utc) - last_seen_utc
|
||||
|
||||
# Format time ago
|
||||
if time_diff.total_seconds() < 60:
|
||||
time_ago = "just now"
|
||||
elif time_diff.total_seconds() < 3600:
|
||||
minutes = int(time_diff.total_seconds() / 60)
|
||||
time_ago = f"{minutes}m ago"
|
||||
else:
|
||||
hours_ago = time_diff.total_seconds() / 3600
|
||||
if hours_ago < 24:
|
||||
time_ago = f"{int(hours_ago)}h {int((hours_ago % 1) * 60)}m ago"
|
||||
else:
|
||||
days = int(hours_ago / 24)
|
||||
time_ago = f"{days}d ago"
|
||||
|
||||
call_in = {
|
||||
"unit_id": emitter.id,
|
||||
"last_seen": emitter.last_seen.isoformat(),
|
||||
"time_ago": time_ago,
|
||||
"status": emitter.status,
|
||||
"device_type": roster_unit.device_type if roster_unit else "seismograph",
|
||||
"deployed": roster_unit.deployed if roster_unit else False,
|
||||
"note": roster_unit.note if roster_unit and roster_unit.note else "",
|
||||
"location": roster_unit.address if roster_unit and roster_unit.address else (roster_unit.location if roster_unit else "")
|
||||
}
|
||||
call_ins.append(call_in)
|
||||
|
||||
# Apply limit if specified
|
||||
if limit:
|
||||
call_ins = call_ins[:limit]
|
||||
|
||||
return {
|
||||
"call_ins": call_ins,
|
||||
"total": len(call_ins),
|
||||
"hours": hours,
|
||||
"time_threshold": time_threshold.isoformat()
|
||||
}
|
||||
25
app/seismo/routers/dashboard.py
Normal file
25
app/seismo/routers/dashboard.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from fastapi import APIRouter, Request, Depends
|
||||
from fastapi.templating import Jinja2Templates
|
||||
|
||||
from backend.services.snapshot import emit_status_snapshot
|
||||
|
||||
router = APIRouter()
|
||||
templates = Jinja2Templates(directory="templates")
|
||||
|
||||
|
||||
@router.get("/dashboard/active")
|
||||
def dashboard_active(request: Request):
|
||||
snapshot = emit_status_snapshot()
|
||||
return templates.TemplateResponse(
|
||||
"partials/active_table.html",
|
||||
{"request": request, "units": snapshot["active"]}
|
||||
)
|
||||
|
||||
|
||||
@router.get("/dashboard/benched")
|
||||
def dashboard_benched(request: Request):
|
||||
snapshot = emit_status_snapshot()
|
||||
return templates.TemplateResponse(
|
||||
"partials/benched_table.html",
|
||||
{"request": request, "units": snapshot["benched"]}
|
||||
)
|
||||
34
app/seismo/routers/dashboard_tabs.py
Normal file
34
app/seismo/routers/dashboard_tabs.py
Normal file
@@ -0,0 +1,34 @@
|
||||
# backend/routers/dashboard_tabs.py
|
||||
from fastapi import APIRouter, Depends
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from backend.database import get_db
|
||||
from backend.services.snapshot import emit_status_snapshot
|
||||
|
||||
router = APIRouter(prefix="/dashboard", tags=["dashboard-tabs"])
|
||||
|
||||
@router.get("/active")
|
||||
def get_active_units(db: Session = Depends(get_db)):
|
||||
"""
|
||||
Return only ACTIVE (deployed) units for dashboard table swap.
|
||||
"""
|
||||
snap = emit_status_snapshot()
|
||||
units = {
|
||||
uid: u
|
||||
for uid, u in snap["units"].items()
|
||||
if u["deployed"] is True
|
||||
}
|
||||
return {"units": units}
|
||||
|
||||
@router.get("/benched")
|
||||
def get_benched_units(db: Session = Depends(get_db)):
|
||||
"""
|
||||
Return only BENCHED (not deployed) units for dashboard table swap.
|
||||
"""
|
||||
snap = emit_status_snapshot()
|
||||
units = {
|
||||
uid: u
|
||||
for uid, u in snap["units"].items()
|
||||
if u["deployed"] is False
|
||||
}
|
||||
return {"units": units}
|
||||
242
app/seismo/routers/photos.py
Normal file
242
app/seismo/routers/photos.py
Normal file
@@ -0,0 +1,242 @@
|
||||
from fastapi import APIRouter, HTTPException, UploadFile, File, Depends
|
||||
from fastapi.responses import FileResponse, JSONResponse
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
from datetime import datetime
|
||||
import os
|
||||
import shutil
|
||||
from PIL import Image
|
||||
from PIL.ExifTags import TAGS, GPSTAGS
|
||||
from sqlalchemy.orm import Session
|
||||
from backend.database import get_db
|
||||
from backend.models import RosterUnit
|
||||
|
||||
router = APIRouter(prefix="/api", tags=["photos"])
|
||||
|
||||
PHOTOS_BASE_DIR = Path("data/photos")
|
||||
|
||||
|
||||
def extract_exif_data(image_path: Path) -> dict:
|
||||
"""
|
||||
Extract EXIF metadata from an image file.
|
||||
Returns dict with timestamp, GPS coordinates, and other metadata.
|
||||
"""
|
||||
try:
|
||||
image = Image.open(image_path)
|
||||
exif_data = image._getexif()
|
||||
|
||||
if not exif_data:
|
||||
return {}
|
||||
|
||||
metadata = {}
|
||||
|
||||
# Extract standard EXIF tags
|
||||
for tag_id, value in exif_data.items():
|
||||
tag = TAGS.get(tag_id, tag_id)
|
||||
|
||||
# Extract datetime
|
||||
if tag == "DateTime" or tag == "DateTimeOriginal":
|
||||
try:
|
||||
metadata["timestamp"] = datetime.strptime(str(value), "%Y:%m:%d %H:%M:%S")
|
||||
except:
|
||||
pass
|
||||
|
||||
# Extract GPS data
|
||||
if tag == "GPSInfo":
|
||||
gps_data = {}
|
||||
for gps_tag_id in value:
|
||||
gps_tag = GPSTAGS.get(gps_tag_id, gps_tag_id)
|
||||
gps_data[gps_tag] = value[gps_tag_id]
|
||||
|
||||
# Convert GPS data to decimal degrees
|
||||
lat = gps_data.get("GPSLatitude")
|
||||
lat_ref = gps_data.get("GPSLatitudeRef")
|
||||
lon = gps_data.get("GPSLongitude")
|
||||
lon_ref = gps_data.get("GPSLongitudeRef")
|
||||
|
||||
if lat and lon and lat_ref and lon_ref:
|
||||
# Convert to decimal degrees
|
||||
lat_decimal = convert_to_degrees(lat)
|
||||
if lat_ref == "S":
|
||||
lat_decimal = -lat_decimal
|
||||
|
||||
lon_decimal = convert_to_degrees(lon)
|
||||
if lon_ref == "W":
|
||||
lon_decimal = -lon_decimal
|
||||
|
||||
metadata["latitude"] = lat_decimal
|
||||
metadata["longitude"] = lon_decimal
|
||||
metadata["coordinates"] = f"{lat_decimal},{lon_decimal}"
|
||||
|
||||
return metadata
|
||||
except Exception as e:
|
||||
print(f"Error extracting EXIF data: {e}")
|
||||
return {}
|
||||
|
||||
|
||||
def convert_to_degrees(value):
|
||||
"""
|
||||
Convert GPS coordinates from degrees/minutes/seconds to decimal degrees.
|
||||
"""
|
||||
d, m, s = value
|
||||
return float(d) + (float(m) / 60.0) + (float(s) / 3600.0)
|
||||
|
||||
|
||||
@router.post("/unit/{unit_id}/upload-photo")
|
||||
async def upload_photo(
|
||||
unit_id: str,
|
||||
photo: UploadFile = File(...),
|
||||
auto_populate_coords: bool = True,
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Upload a photo for a unit and extract EXIF metadata.
|
||||
If GPS data exists and auto_populate_coords is True, update the unit's coordinates.
|
||||
"""
|
||||
# Validate file type
|
||||
allowed_extensions = {".jpg", ".jpeg", ".png", ".gif", ".webp"}
|
||||
file_ext = Path(photo.filename).suffix.lower()
|
||||
|
||||
if file_ext not in allowed_extensions:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid file type. Allowed: {', '.join(allowed_extensions)}"
|
||||
)
|
||||
|
||||
# Create photos directory for this unit
|
||||
unit_photo_dir = PHOTOS_BASE_DIR / unit_id
|
||||
unit_photo_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Generate filename with timestamp to avoid collisions
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
filename = f"{timestamp}_{photo.filename}"
|
||||
file_path = unit_photo_dir / filename
|
||||
|
||||
# Save the file
|
||||
try:
|
||||
with open(file_path, "wb") as buffer:
|
||||
shutil.copyfileobj(photo.file, buffer)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to save photo: {str(e)}")
|
||||
|
||||
# Extract EXIF metadata
|
||||
metadata = extract_exif_data(file_path)
|
||||
|
||||
# Update unit coordinates if GPS data exists and auto_populate_coords is True
|
||||
coordinates_updated = False
|
||||
if auto_populate_coords and "coordinates" in metadata:
|
||||
roster_unit = db.query(RosterUnit).filter(RosterUnit.id == unit_id).first()
|
||||
|
||||
if roster_unit:
|
||||
roster_unit.coordinates = metadata["coordinates"]
|
||||
roster_unit.last_updated = datetime.utcnow()
|
||||
db.commit()
|
||||
coordinates_updated = True
|
||||
|
||||
return JSONResponse(content={
|
||||
"success": True,
|
||||
"filename": filename,
|
||||
"file_path": f"/api/unit/{unit_id}/photo/{filename}",
|
||||
"metadata": {
|
||||
"timestamp": metadata.get("timestamp").isoformat() if metadata.get("timestamp") else None,
|
||||
"latitude": metadata.get("latitude"),
|
||||
"longitude": metadata.get("longitude"),
|
||||
"coordinates": metadata.get("coordinates")
|
||||
},
|
||||
"coordinates_updated": coordinates_updated
|
||||
})
|
||||
|
||||
|
||||
@router.get("/unit/{unit_id}/photos")
|
||||
def get_unit_photos(unit_id: str):
|
||||
"""
|
||||
Reads /data/photos/<unit_id>/ and returns list of image filenames.
|
||||
Primary photo = most recent file.
|
||||
"""
|
||||
unit_photo_dir = PHOTOS_BASE_DIR / unit_id
|
||||
|
||||
if not unit_photo_dir.exists():
|
||||
# Return empty list if no photos directory exists
|
||||
return {
|
||||
"unit_id": unit_id,
|
||||
"photos": [],
|
||||
"primary_photo": None
|
||||
}
|
||||
|
||||
# Get all image files
|
||||
image_extensions = {".jpg", ".jpeg", ".png", ".gif", ".webp"}
|
||||
photos = []
|
||||
|
||||
for file_path in unit_photo_dir.iterdir():
|
||||
if file_path.is_file() and file_path.suffix.lower() in image_extensions:
|
||||
photos.append({
|
||||
"filename": file_path.name,
|
||||
"path": f"/api/unit/{unit_id}/photo/{file_path.name}",
|
||||
"modified": file_path.stat().st_mtime
|
||||
})
|
||||
|
||||
# Sort by modification time (most recent first)
|
||||
photos.sort(key=lambda x: x["modified"], reverse=True)
|
||||
|
||||
# Primary photo is the most recent
|
||||
primary_photo = photos[0]["filename"] if photos else None
|
||||
|
||||
return {
|
||||
"unit_id": unit_id,
|
||||
"photos": [p["filename"] for p in photos],
|
||||
"primary_photo": primary_photo,
|
||||
"photo_urls": [p["path"] for p in photos]
|
||||
}
|
||||
|
||||
|
||||
@router.get("/recent-photos")
|
||||
def get_recent_photos(limit: int = 12):
|
||||
"""
|
||||
Get the most recently uploaded photos across all units.
|
||||
Returns photos sorted by modification time (newest first).
|
||||
"""
|
||||
if not PHOTOS_BASE_DIR.exists():
|
||||
return {"photos": []}
|
||||
|
||||
all_photos = []
|
||||
image_extensions = {".jpg", ".jpeg", ".png", ".gif", ".webp"}
|
||||
|
||||
# Scan all unit directories
|
||||
for unit_dir in PHOTOS_BASE_DIR.iterdir():
|
||||
if not unit_dir.is_dir():
|
||||
continue
|
||||
|
||||
unit_id = unit_dir.name
|
||||
|
||||
# Get all photos in this unit's directory
|
||||
for file_path in unit_dir.iterdir():
|
||||
if file_path.is_file() and file_path.suffix.lower() in image_extensions:
|
||||
all_photos.append({
|
||||
"unit_id": unit_id,
|
||||
"filename": file_path.name,
|
||||
"path": f"/api/unit/{unit_id}/photo/{file_path.name}",
|
||||
"modified": file_path.stat().st_mtime,
|
||||
"modified_iso": datetime.fromtimestamp(file_path.stat().st_mtime).isoformat()
|
||||
})
|
||||
|
||||
# Sort by modification time (most recent first) and limit
|
||||
all_photos.sort(key=lambda x: x["modified"], reverse=True)
|
||||
recent_photos = all_photos[:limit]
|
||||
|
||||
return {
|
||||
"photos": recent_photos,
|
||||
"total": len(all_photos)
|
||||
}
|
||||
|
||||
|
||||
@router.get("/unit/{unit_id}/photo/{filename}")
|
||||
def get_photo(unit_id: str, filename: str):
|
||||
"""
|
||||
Serves a specific photo file.
|
||||
"""
|
||||
file_path = PHOTOS_BASE_DIR / unit_id / filename
|
||||
|
||||
if not file_path.exists() or not file_path.is_file():
|
||||
raise HTTPException(status_code=404, detail="Photo not found")
|
||||
|
||||
return FileResponse(file_path)
|
||||
46
app/seismo/routers/roster.py
Normal file
46
app/seismo/routers/roster.py
Normal file
@@ -0,0 +1,46 @@
|
||||
from fastapi import APIRouter, Depends
|
||||
from sqlalchemy.orm import Session
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, Any
|
||||
import random
|
||||
|
||||
from backend.database import get_db
|
||||
from backend.services.snapshot import emit_status_snapshot
|
||||
|
||||
router = APIRouter(prefix="/api", tags=["roster"])
|
||||
|
||||
|
||||
@router.get("/status-snapshot")
|
||||
def get_status_snapshot(db: Session = Depends(get_db)):
|
||||
"""
|
||||
Calls emit_status_snapshot() to get current fleet status.
|
||||
This will be replaced with real Series3 emitter logic later.
|
||||
"""
|
||||
return emit_status_snapshot()
|
||||
|
||||
|
||||
@router.get("/roster")
|
||||
def get_roster(db: Session = Depends(get_db)):
|
||||
"""
|
||||
Returns list of units with their metadata and status.
|
||||
Uses mock data for now.
|
||||
"""
|
||||
snapshot = emit_status_snapshot()
|
||||
units_list = []
|
||||
|
||||
for unit_id, unit_data in snapshot["units"].items():
|
||||
units_list.append({
|
||||
"id": unit_id,
|
||||
"status": unit_data["status"],
|
||||
"age": unit_data["age"],
|
||||
"last_seen": unit_data["last"],
|
||||
"deployed": unit_data["deployed"],
|
||||
"note": unit_data.get("note", ""),
|
||||
"last_file": unit_data.get("fname", "")
|
||||
})
|
||||
|
||||
# Sort by status priority (Missing > Pending > OK) then by ID
|
||||
status_priority = {"Missing": 0, "Pending": 1, "OK": 2}
|
||||
units_list.sort(key=lambda x: (status_priority.get(x["status"], 3), x["id"]))
|
||||
|
||||
return {"units": units_list}
|
||||
720
app/seismo/routers/roster_edit.py
Normal file
720
app/seismo/routers/roster_edit.py
Normal file
@@ -0,0 +1,720 @@
|
||||
from fastapi import APIRouter, Depends, HTTPException, Form, UploadFile, File, Request
|
||||
from fastapi.exceptions import RequestValidationError
|
||||
from sqlalchemy.orm import Session
|
||||
from datetime import datetime, date
|
||||
import csv
|
||||
import io
|
||||
import logging
|
||||
import httpx
|
||||
import os
|
||||
|
||||
from backend.database import get_db
|
||||
from backend.models import RosterUnit, IgnoredUnit, Emitter, UnitHistory
|
||||
|
||||
router = APIRouter(prefix="/api/roster", tags=["roster-edit"])
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# SLMM backend URL for syncing device configs to cache
|
||||
SLMM_BASE_URL = os.getenv("SLMM_BASE_URL", "http://localhost:8100")
|
||||
|
||||
|
||||
def record_history(db: Session, unit_id: str, change_type: str, field_name: str = None,
|
||||
old_value: str = None, new_value: str = None, source: str = "manual", notes: str = None):
|
||||
"""Helper function to record a change in unit history"""
|
||||
history_entry = UnitHistory(
|
||||
unit_id=unit_id,
|
||||
change_type=change_type,
|
||||
field_name=field_name,
|
||||
old_value=old_value,
|
||||
new_value=new_value,
|
||||
changed_at=datetime.utcnow(),
|
||||
source=source,
|
||||
notes=notes
|
||||
)
|
||||
db.add(history_entry)
|
||||
# Note: caller is responsible for db.commit()
|
||||
|
||||
|
||||
def get_or_create_roster_unit(db: Session, unit_id: str):
|
||||
unit = db.query(RosterUnit).filter(RosterUnit.id == unit_id).first()
|
||||
if not unit:
|
||||
unit = RosterUnit(id=unit_id)
|
||||
db.add(unit)
|
||||
db.commit()
|
||||
db.refresh(unit)
|
||||
return unit
|
||||
|
||||
|
||||
async def sync_slm_to_slmm_cache(
|
||||
unit_id: str,
|
||||
host: str = None,
|
||||
tcp_port: int = None,
|
||||
ftp_port: int = None,
|
||||
ftp_username: str = None,
|
||||
ftp_password: str = None,
|
||||
deployed_with_modem_id: str = None,
|
||||
db: Session = None
|
||||
) -> dict:
|
||||
"""
|
||||
Sync SLM device configuration to SLMM backend cache.
|
||||
|
||||
Terra-View is the source of truth for device configs. This function updates
|
||||
SLMM's config cache (NL43Config table) so SLMM can look up device connection
|
||||
info by unit_id without Terra-View passing host:port with every request.
|
||||
|
||||
Args:
|
||||
unit_id: Unique identifier for the SLM device
|
||||
host: Direct IP address/hostname OR will be resolved from modem
|
||||
tcp_port: TCP control port (default: 2255)
|
||||
ftp_port: FTP port (default: 21)
|
||||
ftp_username: FTP username (optional)
|
||||
ftp_password: FTP password (optional)
|
||||
deployed_with_modem_id: If set, resolve modem IP as host
|
||||
db: Database session for modem lookup
|
||||
|
||||
Returns:
|
||||
dict: {"success": bool, "message": str}
|
||||
"""
|
||||
# Resolve host from modem if assigned
|
||||
if deployed_with_modem_id and db:
|
||||
modem = db.query(RosterUnit).filter_by(
|
||||
id=deployed_with_modem_id,
|
||||
device_type="modem"
|
||||
).first()
|
||||
if modem and modem.ip_address:
|
||||
host = modem.ip_address
|
||||
logger.info(f"Resolved host from modem {deployed_with_modem_id}: {host}")
|
||||
|
||||
# Validate required fields
|
||||
if not host:
|
||||
logger.warning(f"Cannot sync SLM {unit_id} to SLMM: no host/IP address provided")
|
||||
return {"success": False, "message": "No host IP address available"}
|
||||
|
||||
# Set defaults
|
||||
tcp_port = tcp_port or 2255
|
||||
ftp_port = ftp_port or 21
|
||||
|
||||
# Build SLMM cache payload
|
||||
config_payload = {
|
||||
"host": host,
|
||||
"tcp_port": tcp_port,
|
||||
"tcp_enabled": True,
|
||||
"ftp_enabled": bool(ftp_username and ftp_password),
|
||||
"web_enabled": False
|
||||
}
|
||||
|
||||
if ftp_username and ftp_password:
|
||||
config_payload["ftp_username"] = ftp_username
|
||||
config_payload["ftp_password"] = ftp_password
|
||||
|
||||
# Call SLMM cache update API
|
||||
slmm_url = f"{SLMM_BASE_URL}/api/nl43/{unit_id}/config"
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||||
response = await client.put(slmm_url, json=config_payload)
|
||||
|
||||
if response.status_code in [200, 201]:
|
||||
logger.info(f"Successfully synced SLM {unit_id} to SLMM cache")
|
||||
return {"success": True, "message": "Device config cached in SLMM"}
|
||||
else:
|
||||
logger.error(f"SLMM cache sync failed for {unit_id}: HTTP {response.status_code}")
|
||||
return {"success": False, "message": f"SLMM returned status {response.status_code}"}
|
||||
|
||||
except httpx.ConnectError:
|
||||
logger.error(f"Cannot connect to SLMM service at {SLMM_BASE_URL}")
|
||||
return {"success": False, "message": "SLMM service unavailable"}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error syncing SLM {unit_id} to SLMM: {e}")
|
||||
return {"success": False, "message": str(e)}
|
||||
|
||||
|
||||
@router.post("/add")
|
||||
async def add_roster_unit(
|
||||
id: str = Form(...),
|
||||
device_type: str = Form("seismograph"),
|
||||
unit_type: str = Form("series3"),
|
||||
deployed: str = Form(None),
|
||||
retired: str = Form(None),
|
||||
note: str = Form(""),
|
||||
project_id: str = Form(None),
|
||||
location: str = Form(None),
|
||||
address: str = Form(None),
|
||||
coordinates: str = Form(None),
|
||||
# Seismograph-specific fields
|
||||
last_calibrated: str = Form(None),
|
||||
next_calibration_due: str = Form(None),
|
||||
deployed_with_modem_id: str = Form(None),
|
||||
# Modem-specific fields
|
||||
ip_address: str = Form(None),
|
||||
phone_number: str = Form(None),
|
||||
hardware_model: str = Form(None),
|
||||
# Sound Level Meter-specific fields
|
||||
slm_host: str = Form(None),
|
||||
slm_tcp_port: str = Form(None),
|
||||
slm_ftp_port: str = Form(None),
|
||||
slm_model: str = Form(None),
|
||||
slm_serial_number: str = Form(None),
|
||||
slm_frequency_weighting: str = Form(None),
|
||||
slm_time_weighting: str = Form(None),
|
||||
slm_measurement_range: str = Form(None),
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
logger.info(f"Adding unit: id={id}, device_type={device_type}, deployed={deployed}, retired={retired}")
|
||||
|
||||
# Convert boolean strings to actual booleans
|
||||
deployed_bool = deployed in ['true', 'True', '1', 'yes'] if deployed else False
|
||||
retired_bool = retired in ['true', 'True', '1', 'yes'] if retired else False
|
||||
|
||||
# Convert port strings to integers
|
||||
slm_tcp_port_int = int(slm_tcp_port) if slm_tcp_port and slm_tcp_port.strip() else None
|
||||
slm_ftp_port_int = int(slm_ftp_port) if slm_ftp_port and slm_ftp_port.strip() else None
|
||||
|
||||
if db.query(RosterUnit).filter(RosterUnit.id == id).first():
|
||||
raise HTTPException(status_code=400, detail="Unit already exists")
|
||||
|
||||
# Parse date fields if provided
|
||||
last_cal_date = None
|
||||
if last_calibrated:
|
||||
try:
|
||||
last_cal_date = datetime.strptime(last_calibrated, "%Y-%m-%d").date()
|
||||
except ValueError:
|
||||
raise HTTPException(status_code=400, detail="Invalid last_calibrated date format. Use YYYY-MM-DD")
|
||||
|
||||
next_cal_date = None
|
||||
if next_calibration_due:
|
||||
try:
|
||||
next_cal_date = datetime.strptime(next_calibration_due, "%Y-%m-%d").date()
|
||||
except ValueError:
|
||||
raise HTTPException(status_code=400, detail="Invalid next_calibration_due date format. Use YYYY-MM-DD")
|
||||
|
||||
unit = RosterUnit(
|
||||
id=id,
|
||||
device_type=device_type,
|
||||
unit_type=unit_type,
|
||||
deployed=deployed_bool,
|
||||
retired=retired_bool,
|
||||
note=note,
|
||||
project_id=project_id,
|
||||
location=location,
|
||||
address=address,
|
||||
coordinates=coordinates,
|
||||
last_updated=datetime.utcnow(),
|
||||
# Seismograph-specific fields
|
||||
last_calibrated=last_cal_date,
|
||||
next_calibration_due=next_cal_date,
|
||||
deployed_with_modem_id=deployed_with_modem_id if deployed_with_modem_id else None,
|
||||
# Modem-specific fields
|
||||
ip_address=ip_address if ip_address else None,
|
||||
phone_number=phone_number if phone_number else None,
|
||||
hardware_model=hardware_model if hardware_model else None,
|
||||
# Sound Level Meter-specific fields
|
||||
slm_host=slm_host if slm_host else None,
|
||||
slm_tcp_port=slm_tcp_port_int,
|
||||
slm_ftp_port=slm_ftp_port_int,
|
||||
slm_model=slm_model if slm_model else None,
|
||||
slm_serial_number=slm_serial_number if slm_serial_number else None,
|
||||
slm_frequency_weighting=slm_frequency_weighting if slm_frequency_weighting else None,
|
||||
slm_time_weighting=slm_time_weighting if slm_time_weighting else None,
|
||||
slm_measurement_range=slm_measurement_range if slm_measurement_range else None,
|
||||
)
|
||||
db.add(unit)
|
||||
db.commit()
|
||||
|
||||
# If sound level meter, sync config to SLMM cache
|
||||
if device_type == "sound_level_meter":
|
||||
logger.info(f"Syncing SLM {id} config to SLMM cache...")
|
||||
result = await sync_slm_to_slmm_cache(
|
||||
unit_id=id,
|
||||
host=slm_host,
|
||||
tcp_port=slm_tcp_port_int,
|
||||
ftp_port=slm_ftp_port_int,
|
||||
deployed_with_modem_id=deployed_with_modem_id,
|
||||
db=db
|
||||
)
|
||||
|
||||
if not result["success"]:
|
||||
logger.warning(f"SLMM cache sync warning for {id}: {result['message']}")
|
||||
# Don't fail the operation - device is still added to Terra-View roster
|
||||
# User can manually sync later or SLMM will be synced on next config update
|
||||
|
||||
return {"message": "Unit added", "id": id, "device_type": device_type}
|
||||
|
||||
|
||||
@router.get("/modems")
|
||||
def get_modems_list(db: Session = Depends(get_db)):
|
||||
"""Get list of all modem units for dropdown selection"""
|
||||
modems = db.query(RosterUnit).filter_by(device_type="modem", retired=False).order_by(RosterUnit.id).all()
|
||||
|
||||
return [
|
||||
{
|
||||
"id": modem.id,
|
||||
"ip_address": modem.ip_address,
|
||||
"phone_number": modem.phone_number,
|
||||
"hardware_model": modem.hardware_model,
|
||||
"deployed": modem.deployed
|
||||
}
|
||||
for modem in modems
|
||||
]
|
||||
|
||||
|
||||
@router.get("/{unit_id}")
|
||||
def get_roster_unit(unit_id: str, db: Session = Depends(get_db)):
|
||||
"""Get a single roster unit by ID"""
|
||||
unit = db.query(RosterUnit).filter(RosterUnit.id == unit_id).first()
|
||||
if not unit:
|
||||
raise HTTPException(status_code=404, detail="Unit not found")
|
||||
|
||||
return {
|
||||
"id": unit.id,
|
||||
"device_type": unit.device_type or "seismograph",
|
||||
"unit_type": unit.unit_type,
|
||||
"deployed": unit.deployed,
|
||||
"retired": unit.retired,
|
||||
"note": unit.note or "",
|
||||
"project_id": unit.project_id or "",
|
||||
"location": unit.location or "",
|
||||
"address": unit.address or "",
|
||||
"coordinates": unit.coordinates or "",
|
||||
"last_calibrated": unit.last_calibrated.isoformat() if unit.last_calibrated else "",
|
||||
"next_calibration_due": unit.next_calibration_due.isoformat() if unit.next_calibration_due else "",
|
||||
"deployed_with_modem_id": unit.deployed_with_modem_id or "",
|
||||
"ip_address": unit.ip_address or "",
|
||||
"phone_number": unit.phone_number or "",
|
||||
"hardware_model": unit.hardware_model or "",
|
||||
"slm_host": unit.slm_host or "",
|
||||
"slm_tcp_port": unit.slm_tcp_port or "",
|
||||
"slm_ftp_port": unit.slm_ftp_port or "",
|
||||
"slm_model": unit.slm_model or "",
|
||||
"slm_serial_number": unit.slm_serial_number or "",
|
||||
"slm_frequency_weighting": unit.slm_frequency_weighting or "",
|
||||
"slm_time_weighting": unit.slm_time_weighting or "",
|
||||
"slm_measurement_range": unit.slm_measurement_range or "",
|
||||
}
|
||||
|
||||
|
||||
@router.post("/edit/{unit_id}")
|
||||
def edit_roster_unit(
|
||||
unit_id: str,
|
||||
device_type: str = Form("seismograph"),
|
||||
unit_type: str = Form("series3"),
|
||||
deployed: str = Form(None),
|
||||
retired: str = Form(None),
|
||||
note: str = Form(""),
|
||||
project_id: str = Form(None),
|
||||
location: str = Form(None),
|
||||
address: str = Form(None),
|
||||
coordinates: str = Form(None),
|
||||
# Seismograph-specific fields
|
||||
last_calibrated: str = Form(None),
|
||||
next_calibration_due: str = Form(None),
|
||||
deployed_with_modem_id: str = Form(None),
|
||||
# Modem-specific fields
|
||||
ip_address: str = Form(None),
|
||||
phone_number: str = Form(None),
|
||||
hardware_model: str = Form(None),
|
||||
# Sound Level Meter-specific fields
|
||||
slm_host: str = Form(None),
|
||||
slm_tcp_port: str = Form(None),
|
||||
slm_ftp_port: str = Form(None),
|
||||
slm_model: str = Form(None),
|
||||
slm_serial_number: str = Form(None),
|
||||
slm_frequency_weighting: str = Form(None),
|
||||
slm_time_weighting: str = Form(None),
|
||||
slm_measurement_range: str = Form(None),
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
unit = db.query(RosterUnit).filter(RosterUnit.id == unit_id).first()
|
||||
if not unit:
|
||||
raise HTTPException(status_code=404, detail="Unit not found")
|
||||
|
||||
# Convert boolean strings to actual booleans
|
||||
deployed_bool = deployed in ['true', 'True', '1', 'yes'] if deployed else False
|
||||
retired_bool = retired in ['true', 'True', '1', 'yes'] if retired else False
|
||||
|
||||
# Convert port strings to integers
|
||||
slm_tcp_port_int = int(slm_tcp_port) if slm_tcp_port and slm_tcp_port.strip() else None
|
||||
slm_ftp_port_int = int(slm_ftp_port) if slm_ftp_port and slm_ftp_port.strip() else None
|
||||
|
||||
# Parse date fields if provided
|
||||
last_cal_date = None
|
||||
if last_calibrated:
|
||||
try:
|
||||
last_cal_date = datetime.strptime(last_calibrated, "%Y-%m-%d").date()
|
||||
except ValueError:
|
||||
raise HTTPException(status_code=400, detail="Invalid last_calibrated date format. Use YYYY-MM-DD")
|
||||
|
||||
next_cal_date = None
|
||||
if next_calibration_due:
|
||||
try:
|
||||
next_cal_date = datetime.strptime(next_calibration_due, "%Y-%m-%d").date()
|
||||
except ValueError:
|
||||
raise HTTPException(status_code=400, detail="Invalid next_calibration_due date format. Use YYYY-MM-DD")
|
||||
|
||||
# Track changes for history
|
||||
old_note = unit.note
|
||||
old_deployed = unit.deployed
|
||||
old_retired = unit.retired
|
||||
|
||||
# Update all fields
|
||||
unit.device_type = device_type
|
||||
unit.unit_type = unit_type
|
||||
unit.deployed = deployed_bool
|
||||
unit.retired = retired_bool
|
||||
unit.note = note
|
||||
unit.project_id = project_id
|
||||
unit.location = location
|
||||
unit.address = address
|
||||
unit.coordinates = coordinates
|
||||
unit.last_updated = datetime.utcnow()
|
||||
|
||||
# Seismograph-specific fields
|
||||
unit.last_calibrated = last_cal_date
|
||||
unit.next_calibration_due = next_cal_date
|
||||
unit.deployed_with_modem_id = deployed_with_modem_id if deployed_with_modem_id else None
|
||||
|
||||
# Modem-specific fields
|
||||
unit.ip_address = ip_address if ip_address else None
|
||||
unit.phone_number = phone_number if phone_number else None
|
||||
unit.hardware_model = hardware_model if hardware_model else None
|
||||
|
||||
# Sound Level Meter-specific fields
|
||||
unit.slm_host = slm_host if slm_host else None
|
||||
unit.slm_tcp_port = slm_tcp_port_int
|
||||
unit.slm_ftp_port = slm_ftp_port_int
|
||||
unit.slm_model = slm_model if slm_model else None
|
||||
unit.slm_serial_number = slm_serial_number if slm_serial_number else None
|
||||
unit.slm_frequency_weighting = slm_frequency_weighting if slm_frequency_weighting else None
|
||||
unit.slm_time_weighting = slm_time_weighting if slm_time_weighting else None
|
||||
unit.slm_measurement_range = slm_measurement_range if slm_measurement_range else None
|
||||
|
||||
# Record history entries for changed fields
|
||||
if old_note != note:
|
||||
record_history(db, unit_id, "note_change", "note", old_note, note, "manual")
|
||||
|
||||
if old_deployed != deployed:
|
||||
status_text = "deployed" if deployed else "benched"
|
||||
old_status_text = "deployed" if old_deployed else "benched"
|
||||
record_history(db, unit_id, "deployed_change", "deployed", old_status_text, status_text, "manual")
|
||||
|
||||
if old_retired != retired:
|
||||
status_text = "retired" if retired else "active"
|
||||
old_status_text = "retired" if old_retired else "active"
|
||||
record_history(db, unit_id, "retired_change", "retired", old_status_text, status_text, "manual")
|
||||
|
||||
db.commit()
|
||||
return {"message": "Unit updated", "id": unit_id, "device_type": device_type}
|
||||
|
||||
|
||||
@router.post("/set-deployed/{unit_id}")
|
||||
def set_deployed(unit_id: str, deployed: bool = Form(...), db: Session = Depends(get_db)):
|
||||
unit = get_or_create_roster_unit(db, unit_id)
|
||||
old_deployed = unit.deployed
|
||||
unit.deployed = deployed
|
||||
unit.last_updated = datetime.utcnow()
|
||||
|
||||
# Record history entry for deployed status change
|
||||
if old_deployed != deployed:
|
||||
status_text = "deployed" if deployed else "benched"
|
||||
old_status_text = "deployed" if old_deployed else "benched"
|
||||
record_history(
|
||||
db=db,
|
||||
unit_id=unit_id,
|
||||
change_type="deployed_change",
|
||||
field_name="deployed",
|
||||
old_value=old_status_text,
|
||||
new_value=status_text,
|
||||
source="manual"
|
||||
)
|
||||
|
||||
db.commit()
|
||||
return {"message": "Updated", "id": unit_id, "deployed": deployed}
|
||||
|
||||
|
||||
@router.post("/set-retired/{unit_id}")
|
||||
def set_retired(unit_id: str, retired: bool = Form(...), db: Session = Depends(get_db)):
|
||||
unit = get_or_create_roster_unit(db, unit_id)
|
||||
old_retired = unit.retired
|
||||
unit.retired = retired
|
||||
unit.last_updated = datetime.utcnow()
|
||||
|
||||
# Record history entry for retired status change
|
||||
if old_retired != retired:
|
||||
status_text = "retired" if retired else "active"
|
||||
old_status_text = "retired" if old_retired else "active"
|
||||
record_history(
|
||||
db=db,
|
||||
unit_id=unit_id,
|
||||
change_type="retired_change",
|
||||
field_name="retired",
|
||||
old_value=old_status_text,
|
||||
new_value=status_text,
|
||||
source="manual"
|
||||
)
|
||||
|
||||
db.commit()
|
||||
return {"message": "Updated", "id": unit_id, "retired": retired}
|
||||
|
||||
|
||||
@router.delete("/{unit_id}")
|
||||
def delete_roster_unit(unit_id: str, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Permanently delete a unit from the database.
|
||||
Checks roster, emitters, and ignored_units tables and deletes from any table where the unit exists.
|
||||
"""
|
||||
deleted = False
|
||||
|
||||
# Try to delete from roster table
|
||||
roster_unit = db.query(RosterUnit).filter(RosterUnit.id == unit_id).first()
|
||||
if roster_unit:
|
||||
db.delete(roster_unit)
|
||||
deleted = True
|
||||
|
||||
# Try to delete from emitters table
|
||||
emitter = db.query(Emitter).filter(Emitter.id == unit_id).first()
|
||||
if emitter:
|
||||
db.delete(emitter)
|
||||
deleted = True
|
||||
|
||||
# Try to delete from ignored_units table
|
||||
ignored_unit = db.query(IgnoredUnit).filter(IgnoredUnit.id == unit_id).first()
|
||||
if ignored_unit:
|
||||
db.delete(ignored_unit)
|
||||
deleted = True
|
||||
|
||||
# If not found in any table, return error
|
||||
if not deleted:
|
||||
raise HTTPException(status_code=404, detail="Unit not found")
|
||||
|
||||
db.commit()
|
||||
return {"message": "Unit deleted", "id": unit_id}
|
||||
|
||||
|
||||
@router.post("/set-note/{unit_id}")
|
||||
def set_note(unit_id: str, note: str = Form(""), db: Session = Depends(get_db)):
|
||||
unit = get_or_create_roster_unit(db, unit_id)
|
||||
old_note = unit.note
|
||||
unit.note = note
|
||||
unit.last_updated = datetime.utcnow()
|
||||
|
||||
# Record history entry for note change
|
||||
if old_note != note:
|
||||
record_history(
|
||||
db=db,
|
||||
unit_id=unit_id,
|
||||
change_type="note_change",
|
||||
field_name="note",
|
||||
old_value=old_note,
|
||||
new_value=note,
|
||||
source="manual"
|
||||
)
|
||||
|
||||
db.commit()
|
||||
return {"message": "Updated", "id": unit_id, "note": note}
|
||||
|
||||
|
||||
@router.post("/import-csv")
|
||||
async def import_csv(
|
||||
file: UploadFile = File(...),
|
||||
update_existing: bool = Form(True),
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Import roster units from CSV file.
|
||||
|
||||
Expected CSV columns (unit_id is required, others are optional):
|
||||
- unit_id: Unique identifier for the unit
|
||||
- unit_type: Type of unit (default: "series3")
|
||||
- deployed: Boolean for deployment status (default: False)
|
||||
- retired: Boolean for retirement status (default: False)
|
||||
- note: Notes about the unit
|
||||
- project_id: Project identifier
|
||||
- location: Location description
|
||||
|
||||
Args:
|
||||
file: CSV file upload
|
||||
update_existing: If True, update existing units; if False, skip them
|
||||
"""
|
||||
|
||||
if not file.filename.endswith('.csv'):
|
||||
raise HTTPException(status_code=400, detail="File must be a CSV")
|
||||
|
||||
# Read file content
|
||||
contents = await file.read()
|
||||
csv_text = contents.decode('utf-8')
|
||||
csv_reader = csv.DictReader(io.StringIO(csv_text))
|
||||
|
||||
results = {
|
||||
"added": [],
|
||||
"updated": [],
|
||||
"skipped": [],
|
||||
"errors": []
|
||||
}
|
||||
|
||||
for row_num, row in enumerate(csv_reader, start=2): # Start at 2 to account for header
|
||||
try:
|
||||
# Validate required field
|
||||
unit_id = row.get('unit_id', '').strip()
|
||||
if not unit_id:
|
||||
results["errors"].append({
|
||||
"row": row_num,
|
||||
"error": "Missing required field: unit_id"
|
||||
})
|
||||
continue
|
||||
|
||||
# Check if unit exists
|
||||
existing_unit = db.query(RosterUnit).filter(RosterUnit.id == unit_id).first()
|
||||
|
||||
if existing_unit:
|
||||
if not update_existing:
|
||||
results["skipped"].append(unit_id)
|
||||
continue
|
||||
|
||||
# Update existing unit
|
||||
existing_unit.unit_type = row.get('unit_type', existing_unit.unit_type or 'series3')
|
||||
existing_unit.deployed = row.get('deployed', '').lower() in ('true', '1', 'yes') if row.get('deployed') else existing_unit.deployed
|
||||
existing_unit.retired = row.get('retired', '').lower() in ('true', '1', 'yes') if row.get('retired') else existing_unit.retired
|
||||
existing_unit.note = row.get('note', existing_unit.note or '')
|
||||
existing_unit.project_id = row.get('project_id', existing_unit.project_id)
|
||||
existing_unit.location = row.get('location', existing_unit.location)
|
||||
existing_unit.address = row.get('address', existing_unit.address)
|
||||
existing_unit.coordinates = row.get('coordinates', existing_unit.coordinates)
|
||||
existing_unit.last_updated = datetime.utcnow()
|
||||
|
||||
results["updated"].append(unit_id)
|
||||
else:
|
||||
# Create new unit
|
||||
new_unit = RosterUnit(
|
||||
id=unit_id,
|
||||
unit_type=row.get('unit_type', 'series3'),
|
||||
deployed=row.get('deployed', '').lower() in ('true', '1', 'yes'),
|
||||
retired=row.get('retired', '').lower() in ('true', '1', 'yes'),
|
||||
note=row.get('note', ''),
|
||||
project_id=row.get('project_id'),
|
||||
location=row.get('location'),
|
||||
address=row.get('address'),
|
||||
coordinates=row.get('coordinates'),
|
||||
last_updated=datetime.utcnow()
|
||||
)
|
||||
db.add(new_unit)
|
||||
results["added"].append(unit_id)
|
||||
|
||||
except Exception as e:
|
||||
results["errors"].append({
|
||||
"row": row_num,
|
||||
"unit_id": row.get('unit_id', 'unknown'),
|
||||
"error": str(e)
|
||||
})
|
||||
|
||||
# Commit all changes
|
||||
try:
|
||||
db.commit()
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
raise HTTPException(status_code=500, detail=f"Database error: {str(e)}")
|
||||
|
||||
return {
|
||||
"message": "CSV import completed",
|
||||
"summary": {
|
||||
"added": len(results["added"]),
|
||||
"updated": len(results["updated"]),
|
||||
"skipped": len(results["skipped"]),
|
||||
"errors": len(results["errors"])
|
||||
},
|
||||
"details": results
|
||||
}
|
||||
|
||||
|
||||
@router.post("/ignore/{unit_id}")
|
||||
def ignore_unit(unit_id: str, reason: str = Form(""), db: Session = Depends(get_db)):
|
||||
"""
|
||||
Add a unit to the ignore list to suppress it from unknown emitters.
|
||||
"""
|
||||
# Check if already ignored
|
||||
if db.query(IgnoredUnit).filter(IgnoredUnit.id == unit_id).first():
|
||||
raise HTTPException(status_code=400, detail="Unit already ignored")
|
||||
|
||||
ignored = IgnoredUnit(
|
||||
id=unit_id,
|
||||
reason=reason,
|
||||
ignored_at=datetime.utcnow()
|
||||
)
|
||||
db.add(ignored)
|
||||
db.commit()
|
||||
return {"message": "Unit ignored", "id": unit_id}
|
||||
|
||||
|
||||
@router.delete("/ignore/{unit_id}")
|
||||
def unignore_unit(unit_id: str, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Remove a unit from the ignore list.
|
||||
"""
|
||||
ignored = db.query(IgnoredUnit).filter(IgnoredUnit.id == unit_id).first()
|
||||
if not ignored:
|
||||
raise HTTPException(status_code=404, detail="Unit not in ignore list")
|
||||
|
||||
db.delete(ignored)
|
||||
db.commit()
|
||||
return {"message": "Unit unignored", "id": unit_id}
|
||||
|
||||
|
||||
@router.get("/ignored")
|
||||
def list_ignored_units(db: Session = Depends(get_db)):
|
||||
"""
|
||||
Get list of all ignored units.
|
||||
"""
|
||||
ignored_units = db.query(IgnoredUnit).all()
|
||||
return {
|
||||
"ignored": [
|
||||
{
|
||||
"id": unit.id,
|
||||
"reason": unit.reason,
|
||||
"ignored_at": unit.ignored_at.isoformat()
|
||||
}
|
||||
for unit in ignored_units
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@router.get("/history/{unit_id}")
|
||||
def get_unit_history(unit_id: str, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Get complete history timeline for a unit.
|
||||
Returns all historical changes ordered by most recent first.
|
||||
"""
|
||||
history_entries = db.query(UnitHistory).filter(
|
||||
UnitHistory.unit_id == unit_id
|
||||
).order_by(UnitHistory.changed_at.desc()).all()
|
||||
|
||||
return {
|
||||
"unit_id": unit_id,
|
||||
"history": [
|
||||
{
|
||||
"id": entry.id,
|
||||
"change_type": entry.change_type,
|
||||
"field_name": entry.field_name,
|
||||
"old_value": entry.old_value,
|
||||
"new_value": entry.new_value,
|
||||
"changed_at": entry.changed_at.isoformat(),
|
||||
"source": entry.source,
|
||||
"notes": entry.notes
|
||||
}
|
||||
for entry in history_entries
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@router.delete("/history/{history_id}")
|
||||
def delete_history_entry(history_id: int, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Delete a specific history entry by ID.
|
||||
Allows manual cleanup of old history entries.
|
||||
"""
|
||||
history_entry = db.query(UnitHistory).filter(UnitHistory.id == history_id).first()
|
||||
if not history_entry:
|
||||
raise HTTPException(status_code=404, detail="History entry not found")
|
||||
|
||||
db.delete(history_entry)
|
||||
db.commit()
|
||||
return {"message": "History entry deleted", "id": history_id}
|
||||
81
app/seismo/routers/seismo_dashboard.py
Normal file
81
app/seismo/routers/seismo_dashboard.py
Normal file
@@ -0,0 +1,81 @@
|
||||
"""
|
||||
Seismograph Dashboard API Router
|
||||
Provides endpoints for the seismograph-specific dashboard
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Request, Depends, Query
|
||||
from fastapi.responses import HTMLResponse
|
||||
from fastapi.templating import Jinja2Templates
|
||||
from sqlalchemy.orm import Session
|
||||
from backend.database import get_db
|
||||
from backend.models import RosterUnit
|
||||
|
||||
router = APIRouter(prefix="/api/seismo-dashboard", tags=["seismo-dashboard"])
|
||||
templates = Jinja2Templates(directory="templates")
|
||||
|
||||
|
||||
@router.get("/stats", response_class=HTMLResponse)
|
||||
async def get_seismo_stats(request: Request, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Returns HTML partial with seismograph statistics summary
|
||||
"""
|
||||
# Get all seismograph units
|
||||
seismos = db.query(RosterUnit).filter_by(
|
||||
device_type="seismograph",
|
||||
retired=False
|
||||
).all()
|
||||
|
||||
total = len(seismos)
|
||||
deployed = sum(1 for s in seismos if s.deployed)
|
||||
benched = sum(1 for s in seismos if not s.deployed)
|
||||
|
||||
# Count modems assigned to deployed seismographs
|
||||
with_modem = sum(1 for s in seismos if s.deployed and s.deployed_with_modem_id)
|
||||
without_modem = deployed - with_modem
|
||||
|
||||
return templates.TemplateResponse(
|
||||
"partials/seismo_stats.html",
|
||||
{
|
||||
"request": request,
|
||||
"total": total,
|
||||
"deployed": deployed,
|
||||
"benched": benched,
|
||||
"with_modem": with_modem,
|
||||
"without_modem": without_modem
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@router.get("/units", response_class=HTMLResponse)
|
||||
async def get_seismo_units(
|
||||
request: Request,
|
||||
db: Session = Depends(get_db),
|
||||
search: str = Query(None)
|
||||
):
|
||||
"""
|
||||
Returns HTML partial with filterable seismograph unit list
|
||||
"""
|
||||
query = db.query(RosterUnit).filter_by(
|
||||
device_type="seismograph",
|
||||
retired=False
|
||||
)
|
||||
|
||||
# Apply search filter
|
||||
if search:
|
||||
search_lower = search.lower()
|
||||
query = query.filter(
|
||||
(RosterUnit.id.ilike(f"%{search}%")) |
|
||||
(RosterUnit.note.ilike(f"%{search}%")) |
|
||||
(RosterUnit.address.ilike(f"%{search}%"))
|
||||
)
|
||||
|
||||
seismos = query.order_by(RosterUnit.id).all()
|
||||
|
||||
return templates.TemplateResponse(
|
||||
"partials/seismo_unit_list.html",
|
||||
{
|
||||
"request": request,
|
||||
"units": seismos,
|
||||
"search": search or ""
|
||||
}
|
||||
)
|
||||
479
app/seismo/routers/settings.py
Normal file
479
app/seismo/routers/settings.py
Normal file
@@ -0,0 +1,479 @@
|
||||
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File
|
||||
from fastapi.responses import StreamingResponse, FileResponse
|
||||
from sqlalchemy.orm import Session
|
||||
from datetime import datetime, date
|
||||
from pydantic import BaseModel
|
||||
from typing import Optional
|
||||
import csv
|
||||
import io
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
from backend.database import get_db
|
||||
from backend.models import RosterUnit, Emitter, IgnoredUnit, UserPreferences
|
||||
from backend.services.database_backup import DatabaseBackupService
|
||||
|
||||
router = APIRouter(prefix="/api/settings", tags=["settings"])
|
||||
|
||||
|
||||
@router.get("/export-csv")
|
||||
def export_roster_csv(db: Session = Depends(get_db)):
|
||||
"""Export all roster units to CSV"""
|
||||
units = db.query(RosterUnit).all()
|
||||
|
||||
# Create CSV in memory
|
||||
output = io.StringIO()
|
||||
fieldnames = [
|
||||
'unit_id', 'unit_type', 'device_type', 'deployed', 'retired',
|
||||
'note', 'project_id', 'location', 'address', 'coordinates',
|
||||
'last_calibrated', 'next_calibration_due', 'deployed_with_modem_id',
|
||||
'ip_address', 'phone_number', 'hardware_model'
|
||||
]
|
||||
|
||||
writer = csv.DictWriter(output, fieldnames=fieldnames)
|
||||
writer.writeheader()
|
||||
|
||||
for unit in units:
|
||||
writer.writerow({
|
||||
'unit_id': unit.id,
|
||||
'unit_type': unit.unit_type or '',
|
||||
'device_type': unit.device_type or 'seismograph',
|
||||
'deployed': 'true' if unit.deployed else 'false',
|
||||
'retired': 'true' if unit.retired else 'false',
|
||||
'note': unit.note or '',
|
||||
'project_id': unit.project_id or '',
|
||||
'location': unit.location or '',
|
||||
'address': unit.address or '',
|
||||
'coordinates': unit.coordinates or '',
|
||||
'last_calibrated': unit.last_calibrated.strftime('%Y-%m-%d') if unit.last_calibrated else '',
|
||||
'next_calibration_due': unit.next_calibration_due.strftime('%Y-%m-%d') if unit.next_calibration_due else '',
|
||||
'deployed_with_modem_id': unit.deployed_with_modem_id or '',
|
||||
'ip_address': unit.ip_address or '',
|
||||
'phone_number': unit.phone_number or '',
|
||||
'hardware_model': unit.hardware_model or ''
|
||||
})
|
||||
|
||||
output.seek(0)
|
||||
filename = f"roster_export_{date.today().isoformat()}.csv"
|
||||
|
||||
return StreamingResponse(
|
||||
io.BytesIO(output.getvalue().encode('utf-8')),
|
||||
media_type="text/csv",
|
||||
headers={"Content-Disposition": f"attachment; filename={filename}"}
|
||||
)
|
||||
|
||||
|
||||
@router.get("/stats")
|
||||
def get_table_stats(db: Session = Depends(get_db)):
|
||||
"""Get counts for all tables"""
|
||||
roster_count = db.query(RosterUnit).count()
|
||||
emitters_count = db.query(Emitter).count()
|
||||
ignored_count = db.query(IgnoredUnit).count()
|
||||
|
||||
return {
|
||||
"roster": roster_count,
|
||||
"emitters": emitters_count,
|
||||
"ignored": ignored_count,
|
||||
"total": roster_count + emitters_count + ignored_count
|
||||
}
|
||||
|
||||
|
||||
@router.get("/roster-units")
|
||||
def get_all_roster_units(db: Session = Depends(get_db)):
|
||||
"""Get all roster units for management table"""
|
||||
units = db.query(RosterUnit).order_by(RosterUnit.id).all()
|
||||
|
||||
return [{
|
||||
"id": unit.id,
|
||||
"device_type": unit.device_type or "seismograph",
|
||||
"unit_type": unit.unit_type or "series3",
|
||||
"deployed": unit.deployed,
|
||||
"retired": unit.retired,
|
||||
"note": unit.note or "",
|
||||
"project_id": unit.project_id or "",
|
||||
"location": unit.location or "",
|
||||
"address": unit.address or "",
|
||||
"coordinates": unit.coordinates or "",
|
||||
"last_calibrated": unit.last_calibrated.isoformat() if unit.last_calibrated else None,
|
||||
"next_calibration_due": unit.next_calibration_due.isoformat() if unit.next_calibration_due else None,
|
||||
"deployed_with_modem_id": unit.deployed_with_modem_id or "",
|
||||
"ip_address": unit.ip_address or "",
|
||||
"phone_number": unit.phone_number or "",
|
||||
"hardware_model": unit.hardware_model or "",
|
||||
"slm_host": unit.slm_host or "",
|
||||
"slm_tcp_port": unit.slm_tcp_port,
|
||||
"slm_model": unit.slm_model or "",
|
||||
"slm_serial_number": unit.slm_serial_number or "",
|
||||
"slm_frequency_weighting": unit.slm_frequency_weighting or "",
|
||||
"slm_time_weighting": unit.slm_time_weighting or "",
|
||||
"slm_measurement_range": unit.slm_measurement_range or "",
|
||||
"slm_last_check": unit.slm_last_check.isoformat() if unit.slm_last_check else None,
|
||||
"last_updated": unit.last_updated.isoformat() if unit.last_updated else None
|
||||
} for unit in units]
|
||||
|
||||
|
||||
def parse_date(date_str):
|
||||
"""Helper function to parse date strings"""
|
||||
if not date_str or not date_str.strip():
|
||||
return None
|
||||
try:
|
||||
return datetime.strptime(date_str.strip(), "%Y-%m-%d").date()
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
@router.post("/import-csv-replace")
|
||||
async def import_csv_replace(
|
||||
file: UploadFile = File(...),
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Replace all roster data with CSV import (atomic transaction).
|
||||
Clears roster table first, then imports all rows from CSV.
|
||||
"""
|
||||
|
||||
if not file.filename.endswith('.csv'):
|
||||
raise HTTPException(status_code=400, detail="File must be a CSV")
|
||||
|
||||
# Read and parse CSV
|
||||
contents = await file.read()
|
||||
csv_text = contents.decode('utf-8')
|
||||
csv_reader = csv.DictReader(io.StringIO(csv_text))
|
||||
|
||||
# Parse all rows FIRST (fail fast before deletion)
|
||||
parsed_units = []
|
||||
for row_num, row in enumerate(csv_reader, start=2):
|
||||
unit_id = row.get('unit_id', '').strip()
|
||||
if not unit_id:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Row {row_num}: Missing required field unit_id"
|
||||
)
|
||||
|
||||
# Parse and validate dates
|
||||
last_cal_date = parse_date(row.get('last_calibrated'))
|
||||
next_cal_date = parse_date(row.get('next_calibration_due'))
|
||||
|
||||
parsed_units.append({
|
||||
'id': unit_id,
|
||||
'unit_type': row.get('unit_type', 'series3'),
|
||||
'device_type': row.get('device_type', 'seismograph'),
|
||||
'deployed': row.get('deployed', '').lower() in ('true', '1', 'yes'),
|
||||
'retired': row.get('retired', '').lower() in ('true', '1', 'yes'),
|
||||
'note': row.get('note', ''),
|
||||
'project_id': row.get('project_id') or None,
|
||||
'location': row.get('location') or None,
|
||||
'address': row.get('address') or None,
|
||||
'coordinates': row.get('coordinates') or None,
|
||||
'last_calibrated': last_cal_date,
|
||||
'next_calibration_due': next_cal_date,
|
||||
'deployed_with_modem_id': row.get('deployed_with_modem_id') or None,
|
||||
'ip_address': row.get('ip_address') or None,
|
||||
'phone_number': row.get('phone_number') or None,
|
||||
'hardware_model': row.get('hardware_model') or None,
|
||||
})
|
||||
|
||||
# Atomic transaction: delete all, then insert all
|
||||
try:
|
||||
deleted_count = db.query(RosterUnit).delete()
|
||||
|
||||
for unit_data in parsed_units:
|
||||
new_unit = RosterUnit(**unit_data, last_updated=datetime.utcnow())
|
||||
db.add(new_unit)
|
||||
|
||||
db.commit()
|
||||
|
||||
return {
|
||||
"message": "Roster replaced successfully",
|
||||
"deleted": deleted_count,
|
||||
"added": len(parsed_units)
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
raise HTTPException(status_code=500, detail=f"Import failed: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/clear-all")
|
||||
def clear_all_data(db: Session = Depends(get_db)):
|
||||
"""Clear all tables (roster, emitters, ignored)"""
|
||||
try:
|
||||
roster_count = db.query(RosterUnit).delete()
|
||||
emitters_count = db.query(Emitter).delete()
|
||||
ignored_count = db.query(IgnoredUnit).delete()
|
||||
|
||||
db.commit()
|
||||
|
||||
return {
|
||||
"message": "All data cleared",
|
||||
"deleted": {
|
||||
"roster": roster_count,
|
||||
"emitters": emitters_count,
|
||||
"ignored": ignored_count,
|
||||
"total": roster_count + emitters_count + ignored_count
|
||||
}
|
||||
}
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
raise HTTPException(status_code=500, detail=f"Clear failed: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/clear-roster")
|
||||
def clear_roster(db: Session = Depends(get_db)):
|
||||
"""Clear roster table only"""
|
||||
try:
|
||||
count = db.query(RosterUnit).delete()
|
||||
db.commit()
|
||||
return {"message": "Roster cleared", "deleted": count}
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
raise HTTPException(status_code=500, detail=f"Clear failed: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/clear-emitters")
|
||||
def clear_emitters(db: Session = Depends(get_db)):
|
||||
"""Clear emitters table only"""
|
||||
try:
|
||||
count = db.query(Emitter).delete()
|
||||
db.commit()
|
||||
return {"message": "Emitters cleared", "deleted": count}
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
raise HTTPException(status_code=500, detail=f"Clear failed: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/clear-ignored")
|
||||
def clear_ignored(db: Session = Depends(get_db)):
|
||||
"""Clear ignored units table only"""
|
||||
try:
|
||||
count = db.query(IgnoredUnit).delete()
|
||||
db.commit()
|
||||
return {"message": "Ignored units cleared", "deleted": count}
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
raise HTTPException(status_code=500, detail=f"Clear failed: {str(e)}")
|
||||
|
||||
|
||||
# User Preferences Endpoints
|
||||
|
||||
class PreferencesUpdate(BaseModel):
|
||||
"""Schema for updating user preferences (all fields optional)"""
|
||||
timezone: Optional[str] = None
|
||||
theme: Optional[str] = None
|
||||
auto_refresh_interval: Optional[int] = None
|
||||
date_format: Optional[str] = None
|
||||
table_rows_per_page: Optional[int] = None
|
||||
calibration_interval_days: Optional[int] = None
|
||||
calibration_warning_days: Optional[int] = None
|
||||
status_ok_threshold_hours: Optional[int] = None
|
||||
status_pending_threshold_hours: Optional[int] = None
|
||||
|
||||
|
||||
@router.get("/preferences")
|
||||
def get_preferences(db: Session = Depends(get_db)):
|
||||
"""
|
||||
Get user preferences. Creates default preferences if none exist.
|
||||
"""
|
||||
prefs = db.query(UserPreferences).filter(UserPreferences.id == 1).first()
|
||||
|
||||
if not prefs:
|
||||
# Create default preferences
|
||||
prefs = UserPreferences(id=1)
|
||||
db.add(prefs)
|
||||
db.commit()
|
||||
db.refresh(prefs)
|
||||
|
||||
return {
|
||||
"timezone": prefs.timezone,
|
||||
"theme": prefs.theme,
|
||||
"auto_refresh_interval": prefs.auto_refresh_interval,
|
||||
"date_format": prefs.date_format,
|
||||
"table_rows_per_page": prefs.table_rows_per_page,
|
||||
"calibration_interval_days": prefs.calibration_interval_days,
|
||||
"calibration_warning_days": prefs.calibration_warning_days,
|
||||
"status_ok_threshold_hours": prefs.status_ok_threshold_hours,
|
||||
"status_pending_threshold_hours": prefs.status_pending_threshold_hours,
|
||||
"updated_at": prefs.updated_at.isoformat() if prefs.updated_at else None
|
||||
}
|
||||
|
||||
|
||||
@router.put("/preferences")
|
||||
def update_preferences(
|
||||
updates: PreferencesUpdate,
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Update user preferences. Accepts partial updates.
|
||||
Creates default preferences if none exist.
|
||||
"""
|
||||
prefs = db.query(UserPreferences).filter(UserPreferences.id == 1).first()
|
||||
|
||||
if not prefs:
|
||||
# Create default preferences
|
||||
prefs = UserPreferences(id=1)
|
||||
db.add(prefs)
|
||||
|
||||
# Update only provided fields
|
||||
update_data = updates.dict(exclude_unset=True)
|
||||
for field, value in update_data.items():
|
||||
setattr(prefs, field, value)
|
||||
|
||||
prefs.updated_at = datetime.utcnow()
|
||||
|
||||
db.commit()
|
||||
db.refresh(prefs)
|
||||
|
||||
return {
|
||||
"message": "Preferences updated successfully",
|
||||
"timezone": prefs.timezone,
|
||||
"theme": prefs.theme,
|
||||
"auto_refresh_interval": prefs.auto_refresh_interval,
|
||||
"date_format": prefs.date_format,
|
||||
"table_rows_per_page": prefs.table_rows_per_page,
|
||||
"calibration_interval_days": prefs.calibration_interval_days,
|
||||
"calibration_warning_days": prefs.calibration_warning_days,
|
||||
"status_ok_threshold_hours": prefs.status_ok_threshold_hours,
|
||||
"status_pending_threshold_hours": prefs.status_pending_threshold_hours,
|
||||
"updated_at": prefs.updated_at.isoformat() if prefs.updated_at else None
|
||||
}
|
||||
|
||||
|
||||
# Database Management Endpoints
|
||||
|
||||
backup_service = DatabaseBackupService()
|
||||
|
||||
|
||||
@router.get("/database/stats")
|
||||
def get_database_stats():
|
||||
"""Get current database statistics"""
|
||||
try:
|
||||
stats = backup_service.get_database_stats()
|
||||
return stats
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get database stats: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/database/snapshot")
|
||||
def create_database_snapshot(description: Optional[str] = None):
|
||||
"""Create a full database snapshot"""
|
||||
try:
|
||||
snapshot = backup_service.create_snapshot(description=description)
|
||||
return {
|
||||
"message": "Snapshot created successfully",
|
||||
"snapshot": snapshot
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Snapshot creation failed: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/database/snapshots")
|
||||
def list_database_snapshots():
|
||||
"""List all available database snapshots"""
|
||||
try:
|
||||
snapshots = backup_service.list_snapshots()
|
||||
return {
|
||||
"snapshots": snapshots,
|
||||
"count": len(snapshots)
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to list snapshots: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/database/snapshot/{filename}")
|
||||
def download_snapshot(filename: str):
|
||||
"""Download a specific snapshot file"""
|
||||
try:
|
||||
snapshot_path = backup_service.download_snapshot(filename)
|
||||
return FileResponse(
|
||||
path=str(snapshot_path),
|
||||
filename=filename,
|
||||
media_type="application/x-sqlite3"
|
||||
)
|
||||
except FileNotFoundError:
|
||||
raise HTTPException(status_code=404, detail=f"Snapshot {filename} not found")
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Download failed: {str(e)}")
|
||||
|
||||
|
||||
@router.delete("/database/snapshot/{filename}")
|
||||
def delete_database_snapshot(filename: str):
|
||||
"""Delete a specific snapshot"""
|
||||
try:
|
||||
backup_service.delete_snapshot(filename)
|
||||
return {
|
||||
"message": f"Snapshot {filename} deleted successfully",
|
||||
"filename": filename
|
||||
}
|
||||
except FileNotFoundError:
|
||||
raise HTTPException(status_code=404, detail=f"Snapshot {filename} not found")
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Delete failed: {str(e)}")
|
||||
|
||||
|
||||
class RestoreRequest(BaseModel):
|
||||
"""Schema for restore request"""
|
||||
filename: str
|
||||
create_backup: bool = True
|
||||
|
||||
|
||||
@router.post("/database/restore")
|
||||
def restore_database(request: RestoreRequest, db: Session = Depends(get_db)):
|
||||
"""Restore database from a snapshot"""
|
||||
try:
|
||||
# Close the database connection before restoring
|
||||
db.close()
|
||||
|
||||
result = backup_service.restore_snapshot(
|
||||
filename=request.filename,
|
||||
create_backup_before_restore=request.create_backup
|
||||
)
|
||||
|
||||
return result
|
||||
except FileNotFoundError:
|
||||
raise HTTPException(status_code=404, detail=f"Snapshot {request.filename} not found")
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Restore failed: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/database/upload-snapshot")
|
||||
async def upload_snapshot(file: UploadFile = File(...)):
|
||||
"""Upload a snapshot file to the backups directory"""
|
||||
if not file.filename.endswith('.db'):
|
||||
raise HTTPException(status_code=400, detail="File must be a .db file")
|
||||
|
||||
try:
|
||||
# Save uploaded file to backups directory
|
||||
backups_dir = Path("./data/backups")
|
||||
backups_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S")
|
||||
uploaded_filename = f"snapshot_uploaded_{timestamp}.db"
|
||||
file_path = backups_dir / uploaded_filename
|
||||
|
||||
# Save file
|
||||
with open(file_path, "wb") as buffer:
|
||||
shutil.copyfileobj(file.file, buffer)
|
||||
|
||||
# Create metadata
|
||||
metadata = {
|
||||
"filename": uploaded_filename,
|
||||
"created_at": timestamp,
|
||||
"created_at_iso": datetime.utcnow().isoformat(),
|
||||
"description": f"Uploaded: {file.filename}",
|
||||
"size_bytes": file_path.stat().st_size,
|
||||
"size_mb": round(file_path.stat().st_size / (1024 * 1024), 2),
|
||||
"type": "uploaded"
|
||||
}
|
||||
|
||||
metadata_path = backups_dir / f"{uploaded_filename}.meta.json"
|
||||
import json
|
||||
with open(metadata_path, 'w') as f:
|
||||
json.dump(metadata, f, indent=2)
|
||||
|
||||
return {
|
||||
"message": "Snapshot uploaded successfully",
|
||||
"snapshot": metadata
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Upload failed: {str(e)}")
|
||||
44
app/seismo/routers/units.py
Normal file
44
app/seismo/routers/units.py
Normal file
@@ -0,0 +1,44 @@
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from sqlalchemy.orm import Session
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any
|
||||
|
||||
from backend.database import get_db
|
||||
from backend.services.snapshot import emit_status_snapshot
|
||||
|
||||
router = APIRouter(prefix="/api", tags=["units"])
|
||||
|
||||
|
||||
@router.get("/unit/{unit_id}")
|
||||
def get_unit_detail(unit_id: str, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Returns detailed data for a single unit.
|
||||
"""
|
||||
snapshot = emit_status_snapshot()
|
||||
|
||||
if unit_id not in snapshot["units"]:
|
||||
raise HTTPException(status_code=404, detail=f"Unit {unit_id} not found")
|
||||
|
||||
unit_data = snapshot["units"][unit_id]
|
||||
|
||||
# Mock coordinates for now (will be replaced with real data)
|
||||
mock_coords = {
|
||||
"BE1234": {"lat": 37.7749, "lon": -122.4194, "location": "San Francisco, CA"},
|
||||
"BE5678": {"lat": 34.0522, "lon": -118.2437, "location": "Los Angeles, CA"},
|
||||
"BE9012": {"lat": 40.7128, "lon": -74.0060, "location": "New York, NY"},
|
||||
"BE3456": {"lat": 41.8781, "lon": -87.6298, "location": "Chicago, IL"},
|
||||
"BE7890": {"lat": 29.7604, "lon": -95.3698, "location": "Houston, TX"},
|
||||
}
|
||||
|
||||
coords = mock_coords.get(unit_id, {"lat": 39.8283, "lon": -98.5795, "location": "Unknown"})
|
||||
|
||||
return {
|
||||
"id": unit_id,
|
||||
"status": unit_data["status"],
|
||||
"age": unit_data["age"],
|
||||
"last_seen": unit_data["last"],
|
||||
"last_file": unit_data.get("fname", ""),
|
||||
"deployed": unit_data["deployed"],
|
||||
"note": unit_data.get("note", ""),
|
||||
"coordinates": coords
|
||||
}
|
||||
286
app/seismo/routes.py
Normal file
286
app/seismo/routes.py
Normal file
@@ -0,0 +1,286 @@
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from sqlalchemy.orm import Session
|
||||
from pydantic import BaseModel
|
||||
from datetime import datetime
|
||||
from typing import Optional, List
|
||||
|
||||
from backend.database import get_db
|
||||
from backend.models import Emitter
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
# Helper function to detect unit type from unit ID
|
||||
def detect_unit_type(unit_id: str) -> str:
|
||||
"""
|
||||
Automatically detect if a unit is Series 3 or Series 4 based on ID pattern.
|
||||
|
||||
Series 4 (Micromate) units have IDs starting with "UM" followed by digits (e.g., UM11719)
|
||||
Series 3 units typically have other patterns
|
||||
|
||||
Returns:
|
||||
"series4" if the unit ID matches Micromate pattern (UM#####)
|
||||
"series3" otherwise
|
||||
"""
|
||||
if not unit_id:
|
||||
return "unknown"
|
||||
|
||||
# Series 4 (Micromate) pattern: UM followed by digits
|
||||
if unit_id.upper().startswith("UM") and len(unit_id) > 2:
|
||||
# Check if remaining characters after "UM" are digits
|
||||
rest = unit_id[2:]
|
||||
if rest.isdigit():
|
||||
return "series4"
|
||||
|
||||
# Default to series3 for other patterns
|
||||
return "series3"
|
||||
|
||||
|
||||
# Pydantic schemas for request/response validation
|
||||
class EmitterReport(BaseModel):
|
||||
unit: str
|
||||
unit_type: str
|
||||
timestamp: str
|
||||
file: str
|
||||
status: str
|
||||
|
||||
|
||||
class EmitterResponse(BaseModel):
|
||||
id: str
|
||||
unit_type: str
|
||||
last_seen: datetime
|
||||
last_file: str
|
||||
status: str
|
||||
notes: Optional[str] = None
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
@router.post("/emitters/report", status_code=200)
|
||||
def report_emitter(report: EmitterReport, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Endpoint for emitters to report their status.
|
||||
Creates a new emitter if it doesn't exist, or updates an existing one.
|
||||
"""
|
||||
try:
|
||||
# Parse the timestamp
|
||||
timestamp = datetime.fromisoformat(report.timestamp.replace('Z', '+00:00'))
|
||||
except ValueError:
|
||||
raise HTTPException(status_code=400, detail="Invalid timestamp format")
|
||||
|
||||
# Check if emitter already exists
|
||||
emitter = db.query(Emitter).filter(Emitter.id == report.unit).first()
|
||||
|
||||
if emitter:
|
||||
# Update existing emitter
|
||||
emitter.unit_type = report.unit_type
|
||||
emitter.last_seen = timestamp
|
||||
emitter.last_file = report.file
|
||||
emitter.status = report.status
|
||||
else:
|
||||
# Create new emitter
|
||||
emitter = Emitter(
|
||||
id=report.unit,
|
||||
unit_type=report.unit_type,
|
||||
last_seen=timestamp,
|
||||
last_file=report.file,
|
||||
status=report.status
|
||||
)
|
||||
db.add(emitter)
|
||||
|
||||
db.commit()
|
||||
db.refresh(emitter)
|
||||
|
||||
return {
|
||||
"message": "Emitter report received",
|
||||
"unit": emitter.id,
|
||||
"status": emitter.status
|
||||
}
|
||||
|
||||
|
||||
@router.get("/fleet/status", response_model=List[EmitterResponse])
|
||||
def get_fleet_status(db: Session = Depends(get_db)):
|
||||
"""
|
||||
Returns a list of all emitters and their current status.
|
||||
"""
|
||||
emitters = db.query(Emitter).all()
|
||||
return emitters
|
||||
|
||||
# series3v1.1 Standardized Heartbeat Schema (multi-unit)
|
||||
from fastapi import Request
|
||||
|
||||
@router.post("/api/series3/heartbeat", status_code=200)
|
||||
async def series3_heartbeat(request: Request, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Accepts a full telemetry payload from the Series3 emitter.
|
||||
Updates or inserts each unit into the database.
|
||||
"""
|
||||
payload = await request.json()
|
||||
|
||||
source = payload.get("source_id")
|
||||
units = payload.get("units", [])
|
||||
|
||||
print("\n=== Series 3 Heartbeat ===")
|
||||
print("Source:", source)
|
||||
print("Units received:", len(units))
|
||||
print("==========================\n")
|
||||
|
||||
results = []
|
||||
|
||||
for u in units:
|
||||
uid = u.get("unit_id")
|
||||
last_event_time = u.get("last_event_time")
|
||||
event_meta = u.get("event_metadata", {})
|
||||
age_minutes = u.get("age_minutes")
|
||||
|
||||
try:
|
||||
if last_event_time:
|
||||
ts = datetime.fromisoformat(last_event_time.replace("Z", "+00:00"))
|
||||
else:
|
||||
ts = None
|
||||
except:
|
||||
ts = None
|
||||
|
||||
# Pull from DB
|
||||
emitter = db.query(Emitter).filter(Emitter.id == uid).first()
|
||||
|
||||
# File name (from event_metadata)
|
||||
last_file = event_meta.get("file_name")
|
||||
status = "Unknown"
|
||||
|
||||
# Determine status based on age
|
||||
if age_minutes is None:
|
||||
status = "Missing"
|
||||
elif age_minutes > 24 * 60:
|
||||
status = "Missing"
|
||||
elif age_minutes > 12 * 60:
|
||||
status = "Pending"
|
||||
else:
|
||||
status = "OK"
|
||||
|
||||
if emitter:
|
||||
# Update existing
|
||||
emitter.last_seen = ts
|
||||
emitter.last_file = last_file
|
||||
emitter.status = status
|
||||
# Update unit_type if it was incorrectly classified
|
||||
detected_type = detect_unit_type(uid)
|
||||
if emitter.unit_type != detected_type:
|
||||
emitter.unit_type = detected_type
|
||||
else:
|
||||
# Insert new - auto-detect unit type from ID
|
||||
detected_type = detect_unit_type(uid)
|
||||
emitter = Emitter(
|
||||
id=uid,
|
||||
unit_type=detected_type,
|
||||
last_seen=ts,
|
||||
last_file=last_file,
|
||||
status=status
|
||||
)
|
||||
db.add(emitter)
|
||||
|
||||
results.append({"unit": uid, "status": status})
|
||||
|
||||
db.commit()
|
||||
|
||||
return {
|
||||
"message": "Heartbeat processed",
|
||||
"source": source,
|
||||
"units_processed": len(results),
|
||||
"results": results
|
||||
}
|
||||
|
||||
|
||||
# series4 (Micromate) Standardized Heartbeat Schema
|
||||
@router.post("/api/series4/heartbeat", status_code=200)
|
||||
async def series4_heartbeat(request: Request, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Accepts a full telemetry payload from the Series4 (Micromate) emitter.
|
||||
Updates or inserts each unit into the database.
|
||||
|
||||
Expected payload:
|
||||
{
|
||||
"source": "series4_emitter",
|
||||
"generated_at": "2025-12-04T20:01:00",
|
||||
"units": [
|
||||
{
|
||||
"unit_id": "UM11719",
|
||||
"type": "micromate",
|
||||
"project_hint": "Clearwater - ECMS 57940",
|
||||
"last_call": "2025-12-04T19:30:42",
|
||||
"status": "OK",
|
||||
"age_days": 0.04,
|
||||
"age_hours": 0.9,
|
||||
"mlg_path": "C:\\THORDATA\\..."
|
||||
}
|
||||
]
|
||||
}
|
||||
"""
|
||||
payload = await request.json()
|
||||
|
||||
source = payload.get("source", "series4_emitter")
|
||||
units = payload.get("units", [])
|
||||
|
||||
print("\n=== Series 4 Heartbeat ===")
|
||||
print("Source:", source)
|
||||
print("Units received:", len(units))
|
||||
print("==========================\n")
|
||||
|
||||
results = []
|
||||
|
||||
for u in units:
|
||||
uid = u.get("unit_id")
|
||||
last_call_str = u.get("last_call")
|
||||
status = u.get("status", "Unknown")
|
||||
mlg_path = u.get("mlg_path")
|
||||
project_hint = u.get("project_hint")
|
||||
|
||||
# Parse last_call timestamp
|
||||
try:
|
||||
if last_call_str:
|
||||
ts = datetime.fromisoformat(last_call_str.replace("Z", "+00:00"))
|
||||
else:
|
||||
ts = None
|
||||
except:
|
||||
ts = None
|
||||
|
||||
# Pull from DB
|
||||
emitter = db.query(Emitter).filter(Emitter.id == uid).first()
|
||||
|
||||
if emitter:
|
||||
# Update existing
|
||||
emitter.last_seen = ts
|
||||
emitter.last_file = mlg_path
|
||||
emitter.status = status
|
||||
# Update unit_type if it was incorrectly classified
|
||||
detected_type = detect_unit_type(uid)
|
||||
if emitter.unit_type != detected_type:
|
||||
emitter.unit_type = detected_type
|
||||
# Optionally update notes with project hint if it exists
|
||||
if project_hint and not emitter.notes:
|
||||
emitter.notes = f"Project: {project_hint}"
|
||||
else:
|
||||
# Insert new - auto-detect unit type from ID
|
||||
detected_type = detect_unit_type(uid)
|
||||
notes = f"Project: {project_hint}" if project_hint else None
|
||||
emitter = Emitter(
|
||||
id=uid,
|
||||
unit_type=detected_type,
|
||||
last_seen=ts,
|
||||
last_file=mlg_path,
|
||||
status=status,
|
||||
notes=notes
|
||||
)
|
||||
db.add(emitter)
|
||||
|
||||
results.append({"unit": uid, "status": status})
|
||||
|
||||
db.commit()
|
||||
|
||||
return {
|
||||
"message": "Heartbeat processed",
|
||||
"source": source,
|
||||
"units_processed": len(results),
|
||||
"results": results
|
||||
}
|
||||
0
app/seismo/services/__init__.py
Normal file
0
app/seismo/services/__init__.py
Normal file
145
app/seismo/services/backup_scheduler.py
Normal file
145
app/seismo/services/backup_scheduler.py
Normal file
@@ -0,0 +1,145 @@
|
||||
"""
|
||||
Automatic Database Backup Scheduler
|
||||
Handles scheduled automatic backups of the database
|
||||
"""
|
||||
|
||||
import schedule
|
||||
import time
|
||||
import threading
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
import logging
|
||||
|
||||
from backend.services.database_backup import DatabaseBackupService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BackupScheduler:
|
||||
"""Manages automatic database backups on a schedule"""
|
||||
|
||||
def __init__(self, db_path: str = "./data/seismo_fleet.db", backups_dir: str = "./data/backups"):
|
||||
self.backup_service = DatabaseBackupService(db_path=db_path, backups_dir=backups_dir)
|
||||
self.scheduler_thread: Optional[threading.Thread] = None
|
||||
self.is_running = False
|
||||
|
||||
# Default settings
|
||||
self.backup_interval_hours = 24 # Daily backups
|
||||
self.keep_count = 10 # Keep last 10 backups
|
||||
self.enabled = False
|
||||
|
||||
def configure(self, interval_hours: int = 24, keep_count: int = 10, enabled: bool = True):
|
||||
"""
|
||||
Configure backup scheduler settings
|
||||
|
||||
Args:
|
||||
interval_hours: Hours between automatic backups
|
||||
keep_count: Number of backups to retain
|
||||
enabled: Whether automatic backups are enabled
|
||||
"""
|
||||
self.backup_interval_hours = interval_hours
|
||||
self.keep_count = keep_count
|
||||
self.enabled = enabled
|
||||
|
||||
logger.info(f"Backup scheduler configured: interval={interval_hours}h, keep={keep_count}, enabled={enabled}")
|
||||
|
||||
def create_automatic_backup(self):
|
||||
"""Create an automatic backup and cleanup old ones"""
|
||||
if not self.enabled:
|
||||
logger.info("Automatic backups are disabled, skipping")
|
||||
return
|
||||
|
||||
try:
|
||||
timestamp = datetime.utcnow().strftime("%Y-%m-%d %H:%M UTC")
|
||||
description = f"Automatic backup - {timestamp}"
|
||||
|
||||
logger.info("Creating automatic backup...")
|
||||
snapshot = self.backup_service.create_snapshot(description=description)
|
||||
|
||||
logger.info(f"Automatic backup created: {snapshot['filename']} ({snapshot['size_mb']} MB)")
|
||||
|
||||
# Cleanup old backups
|
||||
cleanup_result = self.backup_service.cleanup_old_snapshots(keep_count=self.keep_count)
|
||||
if cleanup_result['deleted'] > 0:
|
||||
logger.info(f"Cleaned up {cleanup_result['deleted']} old snapshots")
|
||||
|
||||
return snapshot
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Automatic backup failed: {str(e)}")
|
||||
return None
|
||||
|
||||
def start(self):
|
||||
"""Start the backup scheduler in a background thread"""
|
||||
if self.is_running:
|
||||
logger.warning("Backup scheduler is already running")
|
||||
return
|
||||
|
||||
if not self.enabled:
|
||||
logger.info("Backup scheduler is disabled, not starting")
|
||||
return
|
||||
|
||||
logger.info(f"Starting backup scheduler (every {self.backup_interval_hours} hours)")
|
||||
|
||||
# Clear any existing scheduled jobs
|
||||
schedule.clear()
|
||||
|
||||
# Schedule the backup job
|
||||
schedule.every(self.backup_interval_hours).hours.do(self.create_automatic_backup)
|
||||
|
||||
# Also run immediately on startup
|
||||
self.create_automatic_backup()
|
||||
|
||||
# Start the scheduler thread
|
||||
self.is_running = True
|
||||
self.scheduler_thread = threading.Thread(target=self._run_scheduler, daemon=True)
|
||||
self.scheduler_thread.start()
|
||||
|
||||
logger.info("Backup scheduler started successfully")
|
||||
|
||||
def _run_scheduler(self):
|
||||
"""Internal method to run the scheduler loop"""
|
||||
while self.is_running:
|
||||
schedule.run_pending()
|
||||
time.sleep(60) # Check every minute
|
||||
|
||||
def stop(self):
|
||||
"""Stop the backup scheduler"""
|
||||
if not self.is_running:
|
||||
logger.warning("Backup scheduler is not running")
|
||||
return
|
||||
|
||||
logger.info("Stopping backup scheduler...")
|
||||
self.is_running = False
|
||||
schedule.clear()
|
||||
|
||||
if self.scheduler_thread:
|
||||
self.scheduler_thread.join(timeout=5)
|
||||
|
||||
logger.info("Backup scheduler stopped")
|
||||
|
||||
def get_status(self) -> dict:
|
||||
"""Get current scheduler status"""
|
||||
next_run = None
|
||||
if self.is_running and schedule.jobs:
|
||||
next_run = schedule.jobs[0].next_run.isoformat() if schedule.jobs[0].next_run else None
|
||||
|
||||
return {
|
||||
"enabled": self.enabled,
|
||||
"running": self.is_running,
|
||||
"interval_hours": self.backup_interval_hours,
|
||||
"keep_count": self.keep_count,
|
||||
"next_run": next_run
|
||||
}
|
||||
|
||||
|
||||
# Global scheduler instance
|
||||
_scheduler_instance: Optional[BackupScheduler] = None
|
||||
|
||||
|
||||
def get_backup_scheduler() -> BackupScheduler:
|
||||
"""Get or create the global backup scheduler instance"""
|
||||
global _scheduler_instance
|
||||
if _scheduler_instance is None:
|
||||
_scheduler_instance = BackupScheduler()
|
||||
return _scheduler_instance
|
||||
192
app/seismo/services/database_backup.py
Normal file
192
app/seismo/services/database_backup.py
Normal file
@@ -0,0 +1,192 @@
|
||||
"""
|
||||
Database Backup and Restore Service
|
||||
Handles full database snapshots, restoration, and remote synchronization
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import sqlite3
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Optional
|
||||
import json
|
||||
|
||||
|
||||
class DatabaseBackupService:
|
||||
"""Manages database backup operations"""
|
||||
|
||||
def __init__(self, db_path: str = "./data/seismo_fleet.db", backups_dir: str = "./data/backups"):
|
||||
self.db_path = Path(db_path)
|
||||
self.backups_dir = Path(backups_dir)
|
||||
self.backups_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def create_snapshot(self, description: Optional[str] = None) -> Dict:
|
||||
"""
|
||||
Create a full database snapshot using SQLite backup API
|
||||
Returns snapshot metadata
|
||||
"""
|
||||
if not self.db_path.exists():
|
||||
raise FileNotFoundError(f"Database not found at {self.db_path}")
|
||||
|
||||
# Generate snapshot filename with timestamp
|
||||
timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S")
|
||||
snapshot_name = f"snapshot_{timestamp}.db"
|
||||
snapshot_path = self.backups_dir / snapshot_name
|
||||
|
||||
# Get database size before backup
|
||||
db_size = self.db_path.stat().st_size
|
||||
|
||||
try:
|
||||
# Use SQLite backup API for safe backup (handles concurrent access)
|
||||
source_conn = sqlite3.connect(str(self.db_path))
|
||||
dest_conn = sqlite3.connect(str(snapshot_path))
|
||||
|
||||
# Perform the backup
|
||||
with dest_conn:
|
||||
source_conn.backup(dest_conn)
|
||||
|
||||
source_conn.close()
|
||||
dest_conn.close()
|
||||
|
||||
# Create metadata
|
||||
metadata = {
|
||||
"filename": snapshot_name,
|
||||
"created_at": timestamp,
|
||||
"created_at_iso": datetime.utcnow().isoformat(),
|
||||
"description": description or "Manual snapshot",
|
||||
"size_bytes": snapshot_path.stat().st_size,
|
||||
"size_mb": round(snapshot_path.stat().st_size / (1024 * 1024), 2),
|
||||
"original_db_size_bytes": db_size,
|
||||
"type": "manual"
|
||||
}
|
||||
|
||||
# Save metadata as JSON sidecar file
|
||||
metadata_path = self.backups_dir / f"{snapshot_name}.meta.json"
|
||||
with open(metadata_path, 'w') as f:
|
||||
json.dump(metadata, f, indent=2)
|
||||
|
||||
return metadata
|
||||
|
||||
except Exception as e:
|
||||
# Clean up partial snapshot if it exists
|
||||
if snapshot_path.exists():
|
||||
snapshot_path.unlink()
|
||||
raise Exception(f"Snapshot creation failed: {str(e)}")
|
||||
|
||||
def list_snapshots(self) -> List[Dict]:
|
||||
"""
|
||||
List all available snapshots with metadata
|
||||
Returns list sorted by creation date (newest first)
|
||||
"""
|
||||
snapshots = []
|
||||
|
||||
for db_file in sorted(self.backups_dir.glob("snapshot_*.db"), reverse=True):
|
||||
metadata_file = self.backups_dir / f"{db_file.name}.meta.json"
|
||||
|
||||
if metadata_file.exists():
|
||||
with open(metadata_file, 'r') as f:
|
||||
metadata = json.load(f)
|
||||
else:
|
||||
# Fallback for legacy snapshots without metadata
|
||||
stat_info = db_file.stat()
|
||||
metadata = {
|
||||
"filename": db_file.name,
|
||||
"created_at": datetime.fromtimestamp(stat_info.st_mtime).strftime("%Y%m%d_%H%M%S"),
|
||||
"created_at_iso": datetime.fromtimestamp(stat_info.st_mtime).isoformat(),
|
||||
"description": "Legacy snapshot",
|
||||
"size_bytes": stat_info.st_size,
|
||||
"size_mb": round(stat_info.st_size / (1024 * 1024), 2),
|
||||
"type": "manual"
|
||||
}
|
||||
|
||||
snapshots.append(metadata)
|
||||
|
||||
return snapshots
|
||||
|
||||
def delete_snapshot(self, filename: str) -> bool:
|
||||
"""Delete a snapshot and its metadata"""
|
||||
snapshot_path = self.backups_dir / filename
|
||||
metadata_path = self.backups_dir / f"{filename}.meta.json"
|
||||
|
||||
if not snapshot_path.exists():
|
||||
raise FileNotFoundError(f"Snapshot {filename} not found")
|
||||
|
||||
snapshot_path.unlink()
|
||||
if metadata_path.exists():
|
||||
metadata_path.unlink()
|
||||
|
||||
return True
|
||||
|
||||
def restore_snapshot(self, filename: str, create_backup_before_restore: bool = True) -> Dict:
|
||||
"""
|
||||
Restore database from a snapshot
|
||||
Creates a safety backup before restoring if requested
|
||||
"""
|
||||
snapshot_path = self.backups_dir / filename
|
||||
|
||||
if not snapshot_path.exists():
|
||||
raise FileNotFoundError(f"Snapshot {filename} not found")
|
||||
|
||||
if not self.db_path.exists():
|
||||
raise FileNotFoundError(f"Database not found at {self.db_path}")
|
||||
|
||||
backup_info = None
|
||||
|
||||
# Create safety backup before restore
|
||||
if create_backup_before_restore:
|
||||
backup_info = self.create_snapshot(description="Auto-backup before restore")
|
||||
|
||||
try:
|
||||
# Replace database file
|
||||
shutil.copy2(str(snapshot_path), str(self.db_path))
|
||||
|
||||
return {
|
||||
"message": "Database restored successfully",
|
||||
"restored_from": filename,
|
||||
"restored_at": datetime.utcnow().isoformat(),
|
||||
"backup_created": backup_info["filename"] if backup_info else None
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise Exception(f"Restore failed: {str(e)}")
|
||||
|
||||
def get_database_stats(self) -> Dict:
|
||||
"""Get statistics about the current database"""
|
||||
if not self.db_path.exists():
|
||||
return {"error": "Database not found"}
|
||||
|
||||
conn = sqlite3.connect(str(self.db_path))
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Get table counts
|
||||
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_%'")
|
||||
tables = cursor.fetchall()
|
||||
|
||||
table_stats = {}
|
||||
total_rows = 0
|
||||
|
||||
for (table_name,) in tables:
|
||||
cursor.execute(f"SELECT COUNT(*) FROM {table_name}")
|
||||
count = cursor.fetchone()[0]
|
||||
table_stats[table_name] = count
|
||||
total_rows += count
|
||||
|
||||
conn.close()
|
||||
|
||||
db_size = self.db_path.stat().st_size
|
||||
|
||||
return {
|
||||
"database_path": str(self.db_path),
|
||||
"size_bytes": db_size,
|
||||
"size_mb": round(db_size / (1024 * 1024), 2),
|
||||
"total_rows": total_rows,
|
||||
"tables": table_stats,
|
||||
"last_modified": datetime.fromtimestamp(self.db_path.stat().st_mtime).isoformat()
|
||||
}
|
||||
|
||||
def download_snapshot(self, filename: str) -> Path:
|
||||
"""Get the file path for downloading a snapshot"""
|
||||
snapshot_path = self.backups_dir / filename
|
||||
if not snapshot_path.exists():
|
||||
raise FileNotFoundError(f"Snapshot {filename} not found")
|
||||
return snapshot_path
|
||||
191
app/seismo/services/snapshot.py
Normal file
191
app/seismo/services/snapshot.py
Normal file
@@ -0,0 +1,191 @@
|
||||
from datetime import datetime, timezone
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from backend.database import get_db_session
|
||||
from backend.models import Emitter, RosterUnit, IgnoredUnit
|
||||
|
||||
|
||||
def ensure_utc(dt):
|
||||
if dt is None:
|
||||
return None
|
||||
if dt.tzinfo is None:
|
||||
return dt.replace(tzinfo=timezone.utc)
|
||||
return dt.astimezone(timezone.utc)
|
||||
|
||||
|
||||
def format_age(last_seen):
|
||||
if not last_seen:
|
||||
return "N/A"
|
||||
last_seen = ensure_utc(last_seen)
|
||||
now = datetime.now(timezone.utc)
|
||||
diff = now - last_seen
|
||||
hours = diff.total_seconds() // 3600
|
||||
mins = (diff.total_seconds() % 3600) // 60
|
||||
return f"{int(hours)}h {int(mins)}m"
|
||||
|
||||
|
||||
def calculate_status(last_seen, status_ok_threshold=12, status_pending_threshold=24):
|
||||
"""
|
||||
Calculate status based on how long ago the unit was last seen.
|
||||
|
||||
Args:
|
||||
last_seen: datetime of last seen (UTC)
|
||||
status_ok_threshold: hours before status becomes Pending (default 12)
|
||||
status_pending_threshold: hours before status becomes Missing (default 24)
|
||||
|
||||
Returns:
|
||||
"OK", "Pending", or "Missing"
|
||||
"""
|
||||
if not last_seen:
|
||||
return "Missing"
|
||||
|
||||
last_seen = ensure_utc(last_seen)
|
||||
now = datetime.now(timezone.utc)
|
||||
hours_ago = (now - last_seen).total_seconds() / 3600
|
||||
|
||||
if hours_ago > status_pending_threshold:
|
||||
return "Missing"
|
||||
elif hours_ago > status_ok_threshold:
|
||||
return "Pending"
|
||||
else:
|
||||
return "OK"
|
||||
|
||||
|
||||
def emit_status_snapshot():
|
||||
"""
|
||||
Merge roster (what we *intend*) with emitter data (what is *actually happening*).
|
||||
Status is recalculated based on current time to ensure accuracy.
|
||||
"""
|
||||
|
||||
db = get_db_session()
|
||||
try:
|
||||
# Get user preferences for status thresholds
|
||||
from backend.models import UserPreferences
|
||||
prefs = db.query(UserPreferences).filter_by(id=1).first()
|
||||
status_ok_threshold = prefs.status_ok_threshold_hours if prefs else 12
|
||||
status_pending_threshold = prefs.status_pending_threshold_hours if prefs else 24
|
||||
|
||||
roster = {r.id: r for r in db.query(RosterUnit).all()}
|
||||
emitters = {e.id: e for e in db.query(Emitter).all()}
|
||||
ignored = {i.id for i in db.query(IgnoredUnit).all()}
|
||||
|
||||
units = {}
|
||||
|
||||
# --- Merge roster entries first ---
|
||||
for unit_id, r in roster.items():
|
||||
e = emitters.get(unit_id)
|
||||
if r.retired:
|
||||
# Retired units get separated later
|
||||
status = "Retired"
|
||||
age = "N/A"
|
||||
last_seen = None
|
||||
fname = ""
|
||||
else:
|
||||
if e:
|
||||
last_seen = ensure_utc(e.last_seen)
|
||||
# RECALCULATE status based on current time, not stored value
|
||||
status = calculate_status(last_seen, status_ok_threshold, status_pending_threshold)
|
||||
age = format_age(last_seen)
|
||||
fname = e.last_file
|
||||
else:
|
||||
# Rostered but no emitter data
|
||||
status = "Missing"
|
||||
last_seen = None
|
||||
age = "N/A"
|
||||
fname = ""
|
||||
|
||||
units[unit_id] = {
|
||||
"id": unit_id,
|
||||
"status": status,
|
||||
"age": age,
|
||||
"last": last_seen.isoformat() if last_seen else None,
|
||||
"fname": fname,
|
||||
"deployed": r.deployed,
|
||||
"note": r.note or "",
|
||||
"retired": r.retired,
|
||||
# Device type and type-specific fields
|
||||
"device_type": r.device_type or "seismograph",
|
||||
"last_calibrated": r.last_calibrated.isoformat() if r.last_calibrated else None,
|
||||
"next_calibration_due": r.next_calibration_due.isoformat() if r.next_calibration_due else None,
|
||||
"deployed_with_modem_id": r.deployed_with_modem_id,
|
||||
"ip_address": r.ip_address,
|
||||
"phone_number": r.phone_number,
|
||||
"hardware_model": r.hardware_model,
|
||||
# Location for mapping
|
||||
"location": r.location or "",
|
||||
"address": r.address or "",
|
||||
"coordinates": r.coordinates or "",
|
||||
}
|
||||
|
||||
# --- Add unexpected emitter-only units ---
|
||||
for unit_id, e in emitters.items():
|
||||
if unit_id not in roster:
|
||||
last_seen = ensure_utc(e.last_seen)
|
||||
# RECALCULATE status for unknown units too
|
||||
status = calculate_status(last_seen, status_ok_threshold, status_pending_threshold)
|
||||
units[unit_id] = {
|
||||
"id": unit_id,
|
||||
"status": status,
|
||||
"age": format_age(last_seen),
|
||||
"last": last_seen.isoformat(),
|
||||
"fname": e.last_file,
|
||||
"deployed": False, # default
|
||||
"note": "",
|
||||
"retired": False,
|
||||
# Device type and type-specific fields (defaults for unknown units)
|
||||
"device_type": "seismograph", # default
|
||||
"last_calibrated": None,
|
||||
"next_calibration_due": None,
|
||||
"deployed_with_modem_id": None,
|
||||
"ip_address": None,
|
||||
"phone_number": None,
|
||||
"hardware_model": None,
|
||||
# Location fields
|
||||
"location": "",
|
||||
"address": "",
|
||||
"coordinates": "",
|
||||
}
|
||||
|
||||
# Separate buckets for UI
|
||||
active_units = {
|
||||
uid: u for uid, u in units.items()
|
||||
if not u["retired"] and u["deployed"] and uid not in ignored
|
||||
}
|
||||
|
||||
benched_units = {
|
||||
uid: u for uid, u in units.items()
|
||||
if not u["retired"] and not u["deployed"] and uid not in ignored
|
||||
}
|
||||
|
||||
retired_units = {
|
||||
uid: u for uid, u in units.items()
|
||||
if u["retired"]
|
||||
}
|
||||
|
||||
# Unknown units - emitters that aren't in the roster and aren't ignored
|
||||
unknown_units = {
|
||||
uid: u for uid, u in units.items()
|
||||
if uid not in roster and uid not in ignored
|
||||
}
|
||||
|
||||
return {
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"units": units,
|
||||
"active": active_units,
|
||||
"benched": benched_units,
|
||||
"retired": retired_units,
|
||||
"unknown": unknown_units,
|
||||
"summary": {
|
||||
"total": len(active_units) + len(benched_units),
|
||||
"active": len(active_units),
|
||||
"benched": len(benched_units),
|
||||
"retired": len(retired_units),
|
||||
"unknown": len(unknown_units),
|
||||
# Status counts only for deployed units (active_units)
|
||||
"ok": sum(1 for u in active_units.values() if u["status"] == "OK"),
|
||||
"pending": sum(1 for u in active_units.values() if u["status"] == "Pending"),
|
||||
"missing": sum(1 for u in active_units.values() if u["status"] == "Missing"),
|
||||
}
|
||||
}
|
||||
finally:
|
||||
db.close()
|
||||
Reference in New Issue
Block a user