feat: Enhance dashboard with filtering options and sync SLM status

- Added a new filtering system to the dashboard for device types and statuses.
- Implemented asynchronous SLM status synchronization to update the Emitter table.
- Updated the status snapshot endpoint to sync SLM status before generating the snapshot.
- Refactored the dashboard HTML to include filter controls and JavaScript for managing filter state.
- Improved the unit detail page to handle modem associations and cascade updates to paired devices.
- Removed redundant code related to syncing start time for measuring devices.
This commit is contained in:
serversdwn
2026-01-28 20:02:10 +00:00
parent 6492fdff82
commit 5ee6f5eb28
7 changed files with 696 additions and 120 deletions

View File

@@ -2,20 +2,32 @@ from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session
from datetime import datetime, timedelta
from typing import Dict, Any
import asyncio
import logging
import random
from backend.database import get_db
from backend.services.snapshot import emit_status_snapshot
from backend.services.slm_status_sync import sync_slm_status_to_emitters
router = APIRouter(prefix="/api", tags=["roster"])
logger = logging.getLogger(__name__)
@router.get("/status-snapshot")
def get_status_snapshot(db: Session = Depends(get_db)):
async def get_status_snapshot(db: Session = Depends(get_db)):
"""
Calls emit_status_snapshot() to get current fleet status.
This will be replaced with real Series3 emitter logic later.
Syncs SLM status from SLMM before generating snapshot.
"""
# Sync SLM status from SLMM (with timeout to prevent blocking)
try:
await asyncio.wait_for(sync_slm_status_to_emitters(), timeout=2.0)
except asyncio.TimeoutError:
logger.warning("SLM status sync timed out, using cached data")
except Exception as e:
logger.warning(f"SLM status sync failed: {e}")
return emit_status_snapshot()

View File

@@ -167,23 +167,7 @@ async def get_live_view(request: Request, unit_id: str, db: Session = Depends(ge
measurement_state = state_data.get("measurement_state", "Unknown")
is_measuring = state_data.get("is_measuring", False)
# If measuring, sync start time from FTP to database (fixes wrong timestamps)
if is_measuring:
try:
sync_response = await client.post(
f"{SLMM_BASE_URL}/api/nl43/{unit_id}/sync-start-time",
timeout=10.0
)
if sync_response.status_code == 200:
sync_data = sync_response.json()
logger.info(f"Synced start time for {unit_id}: {sync_data.get('message')}")
else:
logger.warning(f"Failed to sync start time for {unit_id}: {sync_response.status_code}")
except Exception as e:
# Don't fail the whole request if sync fails
logger.warning(f"Could not sync start time for {unit_id}: {e}")
# Get live status (now with corrected start time)
# Get live status (measurement_start_time is already stored in SLMM database)
status_response = await client.get(
f"{SLMM_BASE_URL}/api/nl43/{unit_id}/live"
)

View File

@@ -0,0 +1,125 @@
"""
SLM Status Synchronization Service
Syncs SLM device status from SLMM backend to Terra-View's Emitter table.
This bridges SLMM's polling data with Terra-View's status snapshot system.
SLMM tracks device reachability via background polling. This service
fetches that data and creates/updates Emitter records so SLMs appear
correctly in the dashboard status snapshot.
"""
import logging
from datetime import datetime, timezone
from typing import Dict, Any
from backend.database import get_db_session
from backend.models import Emitter
from backend.services.slmm_client import get_slmm_client, SLMMClientError
logger = logging.getLogger(__name__)
async def sync_slm_status_to_emitters() -> Dict[str, Any]:
"""
Fetch SLM status from SLMM and sync to Terra-View's Emitter table.
For each device in SLMM's polling status:
- If last_success exists, create/update Emitter with that timestamp
- If not reachable, update Emitter with last known timestamp (or None)
Returns:
Dict with synced_count, error_count, errors list
"""
client = get_slmm_client()
synced = 0
errors = []
try:
# Get polling status from SLMM
status_response = await client.get_polling_status()
# Handle nested response structure
data = status_response.get("data", status_response)
devices = data.get("devices", [])
if not devices:
logger.debug("No SLM devices in SLMM polling status")
return {"synced_count": 0, "error_count": 0, "errors": []}
db = get_db_session()
try:
for device in devices:
unit_id = device.get("unit_id")
if not unit_id:
continue
try:
# Get or create Emitter record
emitter = db.query(Emitter).filter(Emitter.id == unit_id).first()
# Determine last_seen from SLMM data
last_success_str = device.get("last_success")
is_reachable = device.get("is_reachable", False)
if last_success_str:
# Parse ISO format timestamp
last_seen = datetime.fromisoformat(
last_success_str.replace("Z", "+00:00")
)
# Convert to naive UTC for consistency with existing code
if last_seen.tzinfo:
last_seen = last_seen.astimezone(timezone.utc).replace(tzinfo=None)
else:
last_seen = None
# Status will be recalculated by snapshot.py based on time thresholds
# Just store a provisional status here
status = "OK" if is_reachable else "Missing"
# Store last error message if available
last_error = device.get("last_error") or ""
if emitter:
# Update existing record
emitter.last_seen = last_seen
emitter.status = status
emitter.unit_type = "slm"
emitter.last_file = last_error
else:
# Create new record
emitter = Emitter(
id=unit_id,
unit_type="slm",
last_seen=last_seen,
last_file=last_error,
status=status
)
db.add(emitter)
synced += 1
except Exception as e:
errors.append(f"{unit_id}: {str(e)}")
logger.error(f"Error syncing SLM {unit_id}: {e}")
db.commit()
finally:
db.close()
if synced > 0:
logger.info(f"Synced {synced} SLM device(s) to Emitter table")
except SLMMClientError as e:
logger.warning(f"Could not reach SLMM for status sync: {e}")
errors.append(f"SLMM unreachable: {str(e)}")
except Exception as e:
logger.error(f"Error in SLM status sync: {e}", exc_info=True)
errors.append(str(e))
return {
"synced_count": synced,
"error_count": len(errors),
"errors": errors
}

View File

@@ -146,6 +146,22 @@ def emit_status_snapshot():
"coordinates": "",
}
# --- Derive modem status from paired devices ---
# Modems don't have their own check-in system, so we inherit status
# from whatever device they're paired with (seismograph or SLM)
for unit_id, unit_data in units.items():
if unit_data.get("device_type") == "modem" and not unit_data.get("retired"):
roster_unit = roster.get(unit_id)
if roster_unit and roster_unit.deployed_with_unit_id:
paired_unit_id = roster_unit.deployed_with_unit_id
paired_unit = units.get(paired_unit_id)
if paired_unit:
# Inherit status from paired device
unit_data["status"] = paired_unit.get("status", "Missing")
unit_data["age"] = paired_unit.get("age", "N/A")
unit_data["last"] = paired_unit.get("last")
unit_data["derived_from"] = paired_unit_id
# Separate buckets for UI
active_units = {
uid: u for uid, u in units.items()