diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..f26b729
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,41 @@
+# Python cache / compiled
+__pycache__
+*.pyc
+*.pyo
+*.pyd
+.Python
+
+# Build artifacts
+*.so
+*.egg
+*.egg-info
+dist
+build
+
+# VCS
+.git
+.gitignore
+
+# Databases (must live in volumes)
+*.db
+*.db-journal
+
+# Environment / virtualenv
+.env
+.venv
+venv/
+ENV/
+
+# Runtime data (mounted volumes)
+data/
+
+# Editors / OS junk
+.vscode/
+.idea/
+.DS_Store
+Thumbs.db
+.claude
+sfm.code-workspace
+
+# Tests (optional)
+tests/
diff --git a/.gitignore b/.gitignore
index b7faf40..b697ede 100644
--- a/.gitignore
+++ b/.gitignore
@@ -205,3 +205,9 @@ cython_debug/
marimo/_static/
marimo/_lsp/
__marimo__/
+
+# Seismo Fleet Manager
+# SQLite database files
+*.db
+*.db-journal
+data/
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..99ff90c
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,304 @@
+# Changelog
+
+All notable changes to Seismo Fleet Manager will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [0.4.0] - 2025-12-16
+
+### Added
+- **Database Management System**: Comprehensive backup and restore capabilities
+ - **Manual Snapshots**: Create on-demand backups of the entire database with optional descriptions
+ - **Restore from Snapshot**: Restore database from any snapshot with automatic safety backup
+ - **Upload/Download Snapshots**: Transfer database snapshots to/from the server
+ - **Database Tab**: New dedicated tab in Settings for all database management operations
+ - **Database Statistics**: View database size, row counts by table, and last modified time
+ - **Snapshot Metadata**: Each snapshot includes creation time, description, size, and type (manual/automatic)
+ - **Safety Backups**: Automatic backup created before any restore operation
+- **Remote Database Cloning**: Dev tools for cloning production database to remote development servers
+ - **Clone Script**: `scripts/clone_db_to_dev.py` for copying database over WAN
+ - **Network Upload**: Upload snapshots via HTTP to remote servers
+ - **Auto-restore**: Automatically restore uploaded database on target server
+ - **Authentication Support**: Optional token-based authentication for secure transfers
+- **Automatic Backup Scheduler**: Background service for automated database backups
+ - **Configurable Intervals**: Set backup frequency (default: 24 hours)
+ - **Retention Management**: Automatically delete old backups (configurable keep count)
+ - **Manual Trigger**: Force immediate backup via API
+ - **Status Monitoring**: Check scheduler status and next scheduled run time
+ - **Background Thread**: Non-blocking operation using Python threading
+- **Settings Reorganization**: Improved tab structure for better organization
+ - Renamed "Data Management" tab to "Roster Management"
+ - Moved CSV Replace Mode from Advanced tab to Roster Management tab
+ - Created dedicated Database tab for all backup/restore operations
+- **Comprehensive Documentation**: New `docs/DATABASE_MANAGEMENT.md` guide covering:
+ - Manual snapshot creation and restoration workflows
+ - Download/upload procedures for off-site backups
+ - Remote database cloning setup and usage
+ - Automatic backup configuration and integration
+ - API reference for all database endpoints
+ - Best practices and troubleshooting guide
+
+### Changed
+- **Settings Tab Organization**: Restructured for better logical grouping
+ - **General**: Display preferences (timezone, theme, auto-refresh)
+ - **Roster Management**: CSV operations and roster table (now includes Replace Mode)
+ - **Database**: All backup/restore operations (NEW)
+ - **Advanced**: Power user settings (calibration, thresholds)
+ - **Danger Zone**: Destructive operations
+- CSV Replace Mode warnings enhanced and moved to Roster Management context
+
+### Technical Details
+- **SQLite Backup API**: Uses native SQLite backup API for concurrent-safe snapshots
+- **Metadata Tracking**: JSON sidecar files store snapshot metadata alongside database files
+- **Atomic Operations**: Database restoration is atomic with automatic rollback on failure
+- **File Structure**: Snapshots stored in `./data/backups/` with timestamped filenames
+- **API Endpoints**: 7 new endpoints for database management operations
+- **Backup Service**: `backend/services/database_backup.py` - Core backup/restore logic
+- **Scheduler Service**: `backend/services/backup_scheduler.py` - Automatic backup automation
+- **Clone Utility**: `scripts/clone_db_to_dev.py` - Remote database synchronization tool
+
+### Security Considerations
+- Snapshots contain full database data and should be secured appropriately
+- Remote cloning supports optional authentication tokens
+- Restore operations require safety backup creation by default
+- All destructive operations remain in Danger Zone with warnings
+
+### Migration Notes
+No database migration required for v0.4.0. All new features use existing database structure and add new backup management capabilities without modifying the core schema.
+
+## [0.3.3] - 2025-12-12
+
+### Changed
+- **Mobile Navigation**: Moved hamburger menu button from floating top-right to bottom navigation bar
+ - Bottom nav now shows: Menu (hamburger), Dashboard, Roster, Settings
+ - Removed "Add Unit" from bottom nav (still accessible via sidebar menu)
+ - Hamburger no longer floats over content on mobile
+- **Status Dot Visibility**: Increased status dot size from 12px to 16px (w-3/h-3 → w-4/h-4) in dashboard fleet overview for better at-a-glance visibility
+ - Affects both Active and Benched tabs in dashboard
+ - Makes status colors (green/yellow/red) easier to spot during quick scroll
+
+### Fixed
+- **Location Navigation**: Moved tap-to-navigate functionality from roster card view to unit detail modal only
+ - Roster cards now show simple location text with pin emoji
+ - Navigation links (opening Maps app) only appear in the modal when tapping a unit
+ - Reduces visual clutter and accidental navigation triggers
+
+### Technical Details
+- Bottom navigation remains at 4 buttons, first button now triggers sidebar menu
+- Removed standalone hamburger button element and associated CSS
+- Modal already had navigation links, no changes needed there
+
+## [0.3.2] - 2025-12-12
+
+### Added
+- **Progressive Web App (PWA) Mobile Optimization**: Complete mobile-first redesign for field deployment usage
+ - **Responsive Navigation**: Hamburger menu with slide-in sidebar for mobile, always-visible sidebar for desktop
+ - **Bottom Navigation Bar**: Quick access to Dashboard, Roster, Add Unit, and Settings (mobile only)
+ - **Mobile Card View**: Compact card layout for roster units with status dots, location, and project ID
+ - **Tap-to-Navigate**: Location addresses and coordinates are clickable and open in user's default navigation app (Google Maps, Apple Maps, Waze, etc.)
+ - **Unit Detail Modal**: Bottom sheet modal showing full unit details with edit capabilities (tap any unit card to open)
+ - **Touch Optimization**: 44x44px minimum button targets following iOS/Android accessibility guidelines
+ - **Service Worker**: Network-first caching strategy for offline-capable operation
+ - **IndexedDB Storage**: Offline data persistence for unit information and pending edits
+ - **Background Sync**: Queues edits made while offline and syncs automatically when connection returns
+ - **Offline Indicator**: Visual banner showing offline status with manual sync button
+ - **PWA Manifest**: Installable as a standalone app on mobile devices with custom icons
+ - **Hard Reload Button**: "Clear Cache & Reload" utility in sidebar menu to force fresh JavaScript/CSS
+- **Mobile-Specific Files**:
+ - `backend/static/mobile.css` - Mobile UI styles, hamburger menu, bottom nav, cards, modals
+ - `backend/static/mobile.js` - Mobile interactions, offline sync, modal management
+ - `backend/static/sw.js` - Service worker for PWA functionality
+ - `backend/static/offline-db.js` - IndexedDB wrapper for offline storage
+ - `backend/static/manifest.json` - PWA configuration
+ - `backend/static/icons/` - 8 PWA icon sizes (72px-512px)
+
+### Changed
+- **Dashboard Alerts**: Only show Missing units in notifications (Pending units no longer appear in alerts)
+- **Roster Template**: Mobile card view shows status from server-side render instead of fetching separately
+- **Mobile Status Display**: Benched units show "Benched" label instead of "Unknown" or "N/A"
+- **Base Template**: Added cache-busting query parameters to JavaScript files (e.g., `mobile.js?v=0.3.2`)
+- **Sidebar Menu**: Added utility section with "Toggle theme" and "Clear Cache & Reload" buttons
+
+### Fixed
+- **Modal Status Display**: Fixed unit detail modal showing "Unknown" status by passing status data from card to modal
+- **Mobile Card Status**: Fixed grey dot with "Unknown" label for benched units - now properly shows deployment state
+- **Status Data Passing**: Roster cards now pass status and age to modal via function parameters and global status map
+- **Service Worker Caching**: Aggressive browser caching issue resolved with version query parameters and hard reload function
+
+### Technical Details
+- Mobile breakpoint at 768px (`md:` prefix in TailwindCSS)
+- PWA installable via Add to Home Screen on iOS/Android
+- Service worker caches all static assets with network-first strategy
+- Google Maps search API used for universal navigation links (works across all map apps)
+- Status map stored in `window.rosterStatusMap` from server-side rendered data
+- Hard reload function clears service worker caches, unregisters workers, and deletes IndexedDB
+
+## [0.3.1] - 2025-12-12
+
+### Fixed
+- **Dashboard Notifications**: Removed Pending units from alert list - only Missing units now trigger notifications
+- **Status Dots**: Verified deployed units display correct status dots (OK=green, Pending=yellow, Missing=red) in both active and benched tables
+- **Mobile Card View**: Fixed roster cards showing "Unknown" status by using `.get()` with defaults in backend routes
+- **Backend Status Handling**: Added default values for status, age, last_seen fields to prevent KeyError exceptions
+
+### Changed
+- Backend roster partial routes (`/partials/roster-deployed`, `/partials/roster-benched`) now use `.get()` method with sensible defaults
+- Deployed units default to "Unknown" status when data unavailable
+- Benched units default to "N/A" status when data unavailable
+
+## [0.3.0] - 2025-12-09
+
+### Added
+- **Series 4 (Micromate) Support**: New `/api/series4/heartbeat` endpoint for receiving telemetry from Series 4 Micromate units
+ - Auto-detection of Series 4 units via UM##### ID pattern
+ - Stores project hints from emitter payload in unit notes
+ - Automatic unit type classification across both Series 3 and Series 4 endpoints
+- **Development Environment Labels**: Visual indicators to distinguish dev from production deployments
+ - Yellow "DEV" badge in sidebar navigation
+ - "[DEV]" prefix in browser title
+ - Yellow banner on dashboard when running in development mode
+ - Environment variable support in docker-compose.yml (ENVIRONMENT=production|development)
+- **Quality of Life Improvements**:
+ - Human-readable relative timestamps (e.g., "2h 15m ago", "3d ago") with full date in tooltips
+ - "Last Updated" timestamp indicator on dashboard
+ - Status icons for colorblind accessibility (checkmark for OK, clock for Pending, X for Missing)
+ - Breadcrumb navigation on unit detail pages
+ - Copy-to-clipboard buttons for unit IDs
+ - Search/filter functionality for fleet roster table
+ - Improved empty state messages with icons
+- **Timezone Support**: Comprehensive timezone handling across the application
+ - Timezone selector in Settings (defaults to America/New_York EST)
+ - Human-readable timestamp format (e.g., "9/10/2020 8:00 AM EST")
+ - Timezone-aware display for all timestamps site-wide
+ - Settings stored in localStorage for immediate effect
+- **Settings Page Redesign**: Complete overhaul with tabbed interface and persistent preferences
+ - **General Tab**: Display preferences (timezone, theme, auto-refresh interval)
+ - **Data Management Tab**: Safe operations (CSV export, merge import, roster table)
+ - **Advanced Tab**: Power user settings (replace mode import, calibration defaults, status thresholds)
+ - **Danger Zone Tab**: Destructive operations isolated with enhanced warnings
+ - Backend preferences storage via new UserPreferences model
+ - Tab state persistence in localStorage
+ - Smooth animations and consistent styling with existing pages
+- **User Preferences API**: New backend endpoints for persistent settings storage
+ - `GET /api/settings/preferences` - Retrieve all user preferences
+ - `PUT /api/settings/preferences` - Update preferences (supports partial updates)
+ - Database-backed storage for cross-device preference sync
+ - Migration script: `backend/migrate_add_user_preferences.py`
+
+### Changed
+- Timestamps now display in user-selected timezone with human-readable format throughout the application
+- Settings page reorganized from 644-line flat layout to clean 4-tab interface
+- CSV Replace Mode moved from Data Management to Advanced tab with additional warnings
+- Import operations separated: safe merge in Data Management tab, destructive replace in Advanced tab
+- Page title changed from "Roster Manager" to "Settings" for better clarity
+- All preferences now persist to backend database instead of relying solely on localStorage
+
+### Fixed
+- Unit type classification now consistent across Series 3 and Series 4 heartbeat endpoints
+- Auto-correction of misclassified unit types when they report to wrong endpoint
+
+### Technical Details
+- New `detect_unit_type()` helper function for pattern-based unit classification
+- UserPreferences model with single-row table pattern (id=1) for global settings
+- Series 4 units identified by UM prefix followed by digits (e.g., UM11719)
+- JavaScript Intl API used for client-side timezone conversion
+- Pydantic schema for partial preference updates (PreferencesUpdate model)
+- Environment context injection via custom FastAPI template response wrapper
+
+## [0.2.1] - 2025-12-03
+
+### Added
+- `/settings` roster manager page with CSV export/import, live stats, and danger-zone reset controls.
+- `/api/settings` router that exposes `export-csv`, `stats`, `roster-units`, `import-csv-replace`, and the clear-* endpoints backing the UI.
+- Dedicated HTMX partials/tabs for deployed, benched, retired, and ignored units plus new ignored-table UI to unignore or delete entries.
+
+### Changed
+- Roster and unit detail templates now display device-type specific metadata (calibration windows, modem pairings, IP/phone fields) alongside inline actions.
+- Base navigation highlights the new settings workflow and routes retired/ignored buckets through dedicated endpoints + partials.
+
+### Fixed
+- Snapshot summary counts only consider deployed units, preventing dashboard alerts from including benched hardware.
+- Snapshot payloads now include address/coordinate metadata so map widgets and CSV exports stay accurate.
+
+## [0.2.0] - 2025-12-03
+
+### Added
+- Device-type aware roster schema (seismographs vs modems) with new metadata columns plus `backend/migrate_add_device_types.py` for upgrading existing SQLite files.
+- `create_test_db.py` helper that generates a ready-to-use demo database with sample seismographs, modems, and emitter rows.
+- Ignore list persistence/API so noisy legacy emitters can be quarantined via `/api/roster/ignore` and surfaced in the UI.
+- Roster page enhancements: Add Unit modal, CSV import modal, and HTMX-powered table fed by `/partials/roster-table`.
+- Unit detail view rewritten to fetch data via API, expose deployment status, and allow edits to all metadata.
+
+### Changed
+- Snapshot service now merges roster + emitter data into active/benched/retired/unknown buckets and includes device-specific metadata in each record.
+- Roster edit endpoints parse date fields, manage modem/seismograph specific attributes, and guarantee records exist when toggling deployed/retired states.
+- Dashboard partial endpoints are grouped under `/dashboard/*` so HTMX tabs stay in sync with the consolidated snapshot payload.
+
+### Fixed
+- Toggling deployed/retired flags no longer fails when a unit does not exist because the router now auto-creates placeholder roster rows.
+- CSV import applies address/coordinate updates instead of silently dropping unknown columns.
+
+## [0.1.1] - 2025-12-02
+
+### Added
+- **Roster Editing API**: Full CRUD operations for roster management
+ - `POST /api/roster/add` - Add new units to roster
+ - `POST /api/roster/set-deployed/{unit_id}` - Toggle deployment status
+ - `POST /api/roster/set-retired/{unit_id}` - Toggle retired status
+ - `POST /api/roster/set-note/{unit_id}` - Update unit notes
+- **CSV Import**: Bulk roster import functionality
+ - `POST /api/roster/import-csv` - Import units from CSV file
+ - Support for all roster fields: unit_id, unit_type, deployed, retired, note, project_id, location
+ - Optional update_existing parameter to control duplicate handling
+ - Detailed import summary with added/updated/skipped/error counts
+- **Enhanced Database Models**:
+ - Added `project_id` field to RosterUnit model
+ - Added `location` field to RosterUnit model
+ - Added `last_updated` timestamp tracking
+- **Dashboard Enhancements**:
+ - Separate views for Active, Benched, and Retired units
+ - New endpoints: `/dashboard/active` and `/dashboard/benched`
+
+### Fixed
+- Database session management bug in `emit_status_snapshot()`
+ - Added `get_db_session()` helper function for direct session access
+ - Implemented proper session cleanup with try/finally blocks
+- Database schema synchronization issues
+ - Database now properly recreates when model changes are detected
+
+### Changed
+- Updated RosterUnit model to include additional metadata fields
+- Improved error handling in CSV import with row-level error reporting
+- Enhanced snapshot service to properly manage database connections
+
+### Technical Details
+- All roster editing endpoints use Form data for better HTML form compatibility
+- CSV import uses multipart/form-data for file uploads
+- Boolean fields in CSV accept: 'true', '1', 'yes' (case-insensitive)
+- Database sessions now properly closed to prevent connection leaks
+
+## [0.1.0] - 2024-11-20
+
+### Added
+- Initial release of Seismo Fleet Manager
+- FastAPI-based REST API for fleet management
+- SQLite database with SQLAlchemy ORM
+- Emitter reporting endpoints
+- Basic fleet status monitoring
+- Docker and Docker Compose support
+- Web-based dashboard with HTMX
+- Dark/light mode toggle
+- Interactive maps with Leaflet
+- Photo management per unit
+- Automated status categorization (OK/Pending/Missing)
+
+[0.4.0]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.3.3...v0.4.0
+[0.3.3]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.3.2...v0.3.3
+[0.3.2]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.3.1...v0.3.2
+[0.3.1]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.3.0...v0.3.1
+[0.3.0]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.2.1...v0.3.0
+[0.2.1]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.2.0...v0.2.1
+[0.2.0]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.1.1...v0.2.0
+[0.1.1]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.1.0...v0.1.1
+[0.1.0]: https://github.com/serversdwn/seismo-fleet-manager/releases/tag/v0.1.0
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..d567991
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,19 @@
+FROM python:3.11-slim
+
+# Set working directory
+WORKDIR /app
+
+# Copy requirements first for better caching
+COPY requirements.txt .
+
+# Install dependencies
+RUN pip install --no-cache-dir -r requirements.txt
+
+# Copy application code
+COPY . .
+
+# Expose port
+EXPOSE 8001
+
+# Run the application using the new backend structure
+CMD ["uvicorn", "backend.main:app", "--host", "0.0.0.0", "--port", "8001"]
diff --git a/FRONTEND_README.md b/FRONTEND_README.md
new file mode 100644
index 0000000..b9e2fdb
--- /dev/null
+++ b/FRONTEND_README.md
@@ -0,0 +1,303 @@
+# Seismo Fleet Manager - Frontend Documentation
+
+## Overview
+
+This is the MVP frontend scaffold for **Seismo Fleet Manager**, built with:
+- **FastAPI** (backend framework)
+- **HTMX** (dynamic updates without JavaScript frameworks)
+- **TailwindCSS** (utility-first styling)
+- **Jinja2** (server-side templating)
+- **Leaflet** (interactive maps)
+
+No React, Vue, or other frontend frameworks are used.
+
+## Project Structure
+
+```
+seismo-fleet-manager/
+├── backend/
+│ ├── main.py # FastAPI app entry point
+│ ├── routers/
+│ │ ├── roster.py # Fleet roster endpoints
+│ │ ├── units.py # Individual unit endpoints
+│ │ └── photos.py # Photo management endpoints
+│ ├── services/
+│ │ └── snapshot.py # Mock status snapshot (replace with real logic)
+│ ├── static/
+│ │ └── style.css # Custom CSS
+│ ├── database.py # SQLAlchemy database setup
+│ ├── models.py # Database models
+│ └── routes.py # Legacy API routes
+├── templates/
+│ ├── base.html # Base layout with sidebar & dark mode
+│ ├── dashboard.html # Main dashboard page
+│ ├── roster.html # Fleet roster page
+│ ├── unit_detail.html # Unit detail page
+│ └── partials/
+│ └── roster_table.html # HTMX partial for roster table
+├── data/
+│ └── photos/ # Photo storage (organized by unit_id)
+└── requirements.txt
+```
+
+## Running the Application
+
+### Install Dependencies
+
+```bash
+pip install -r requirements.txt
+```
+
+### Run the Server
+
+```bash
+uvicorn backend.main:app --host 0.0.0.0 --port 8001 --reload
+```
+
+The application will be available at:
+- **Web UI**: http://localhost:8001/
+- **API Docs**: http://localhost:8001/docs
+- **Health Check**: http://localhost:8001/health
+
+## Features
+
+### 1. Dashboard (`/`)
+
+The main dashboard provides an at-a-glance view of the fleet:
+
+- **Fleet Summary Card**: Total units, deployed units, status breakdown
+- **Recent Alerts Card**: Shows units with Missing or Pending status
+- **Recent Photos Card**: Placeholder for photo gallery
+- **Fleet Status Preview**: Quick view of first 5 units
+
+**Auto-refresh**: Dashboard updates every 10 seconds via HTMX
+
+### 2. Fleet Roster (`/roster`)
+
+A comprehensive table view of all seismograph units:
+
+**Columns**:
+- Status indicator (colored dot: green=OK, yellow=Pending, red=Missing)
+- Deployment indicator (blue dot if deployed)
+- Unit ID
+- Last seen timestamp
+- Age since last contact
+- Notes
+- Actions (View detail button)
+
+**Features**:
+- Auto-refresh every 10 seconds
+- Sorted by priority (Missing > Pending > OK)
+- Click any row to view unit details
+
+### 3. Unit Detail Page (`/unit/{unit_id}`)
+
+Split-screen layout with detailed information:
+
+**Left Column**:
+- Status card with real-time updates
+- Deployment status
+- Last contact time and file
+- Notes section
+- Editable metadata (mock form)
+
+**Right Column - Tabbed Interface**:
+- **Photos Tab**: Primary photo with thumbnail gallery
+- **Map Tab**: Interactive Leaflet map showing unit location
+- **History Tab**: Placeholder for event history
+
+**Auto-refresh**: Unit data updates every 10 seconds
+
+### 4. Dark/Light Mode
+
+Toggle button in sidebar switches between themes:
+- Uses Tailwind's `dark:` classes
+- Preference saved to localStorage
+- Smooth transitions on theme change
+
+## API Endpoints
+
+### Status & Fleet Data
+
+```http
+GET /api/status-snapshot
+```
+Returns complete fleet status snapshot with statistics.
+
+```http
+GET /api/roster
+```
+Returns sorted list of all units for roster table.
+
+```http
+GET /api/unit/{unit_id}
+```
+Returns detailed information for a single unit including coordinates.
+
+### Photo Management
+
+```http
+GET /api/unit/{unit_id}/photos
+```
+Returns list of photos for a unit, sorted by recency.
+
+```http
+GET /api/unit/{unit_id}/photo/{filename}
+```
+Serves a specific photo file.
+
+### Legacy Endpoints
+
+```http
+POST /emitters/report
+```
+Endpoint for emitters to report status (from original backend).
+
+```http
+GET /fleet/status
+```
+Returns database-backed fleet status (from original backend).
+
+## Mock Data
+
+### Location: `backend/services/snapshot.py`
+
+The `emit_status_snapshot()` function currently returns mock data with 8 units:
+
+- **BE1234**: OK, deployed (San Francisco)
+- **BE5678**: Pending, deployed (Los Angeles)
+- **BE9012**: Missing, deployed (New York)
+- **BE3456**: OK, benched (Chicago)
+- **BE7890**: OK, deployed (Houston)
+- **BE2468**: Pending, deployed
+- **BE1357**: OK, benched
+- **BE8642**: Missing, deployed
+
+**To replace with real data**: Update the `emit_status_snapshot()` function to call your Series3 emitter logic.
+
+## Styling
+
+### Color Palette
+
+The application uses your brand colors:
+
+```css
+orange: #f48b1c
+navy: #142a66
+burgundy: #7d234d
+```
+
+These are configured in the Tailwind config as `seismo-orange`, `seismo-navy`, `seismo-burgundy`.
+
+### Cards
+
+All cards use the consistent styling:
+```html
+
+```
+
+### Status Indicators
+
+- Green dot: OK status
+- Yellow dot: Pending status
+- Red dot: Missing status
+- Blue dot: Deployed
+- Gray dot: Benched
+
+## HTMX Usage
+
+HTMX enables dynamic updates without writing JavaScript:
+
+### Auto-refresh Example (Dashboard)
+
+```html
+
+```
+
+This fetches the snapshot on page load and every 10 seconds, then calls a JavaScript function to update the DOM.
+
+### Partial Template Loading (Roster)
+
+```html
+
+```
+
+This replaces the entire inner HTML with the server-rendered roster table every 10 seconds.
+
+## Adding Photos
+
+To add photos for a unit:
+
+1. Create a directory: `data/photos/{unit_id}/`
+2. Add image files (jpg, jpeg, png, gif, webp)
+3. Photos will automatically appear on the unit detail page
+4. Most recent file becomes the primary photo
+
+Example:
+```bash
+mkdir -p data/photos/BE1234
+cp my-photo.jpg data/photos/BE1234/deployment-site.jpg
+```
+
+## Customization
+
+### Adding New Pages
+
+1. Create a template in `templates/`
+2. Add a route in `backend/main.py`:
+
+```python
+@app.get("/my-page", response_class=HTMLResponse)
+async def my_page(request: Request):
+ return templates.TemplateResponse("my_page.html", {"request": request})
+```
+
+3. Add a navigation link in `templates/base.html`
+
+### Adding New API Endpoints
+
+1. Create a router file in `backend/routers/`
+2. Include the router in `backend/main.py`:
+
+```python
+from backend.routers import my_router
+app.include_router(my_router.router)
+```
+
+## Docker Deployment
+
+The project includes Docker configuration:
+
+```bash
+docker-compose up
+```
+
+This will start the application on port 8001 (configured to avoid conflicts with port 8000).
+
+## Next Steps
+
+1. **Replace Mock Data**: Update `backend/services/snapshot.py` with real Series3 emitter logic
+2. **Database Integration**: The existing SQLAlchemy models can store historical data
+3. **Photo Upload**: Add a form to upload photos from the UI
+4. **Projects Management**: Implement the "Projects" page
+5. **Settings**: Add user preferences and configuration
+6. **Event History**: Populate the History tab with real event data
+7. **Authentication**: Add user login/authentication if needed
+8. **Notifications**: Add real-time alerts for critical status changes
+
+## Development Tips
+
+- The `--reload` flag auto-reloads the server when code changes
+- Use browser DevTools to debug HTMX requests (look for `HX-Request` headers)
+- Check `/docs` for interactive API documentation (Swagger UI)
+- Dark mode state persists in browser localStorage
+- All timestamps are currently mock data - replace with real values
+
+## License
+
+See main README.md for license information.
diff --git a/README.md b/README.md
index 661646a..3451713 100644
--- a/README.md
+++ b/README.md
@@ -1,2 +1,551 @@
-# seismo-fleet-manager
-Web app and backend for tracking deployed units.
+# Seismo Fleet Manager v0.4.0
+Backend API and HTMX-powered web interface for managing a mixed fleet of seismographs and field modems. Track deployments, monitor health in real time, merge roster intent with incoming telemetry, and control your fleet through a unified database and dashboard.
+
+## Features
+
+- **Progressive Web App (PWA)**: Mobile-first responsive design optimized for field deployment operations
+ - **Install as App**: Add to home screen on iOS/Android for native app experience
+ - **Offline Capable**: Service worker caching with IndexedDB storage for offline operation
+ - **Touch Optimized**: 44x44px minimum touch targets, hamburger menu, bottom navigation bar
+ - **Mobile Card View**: Compact unit cards with status dots, tap-to-navigate locations, and detail modals
+ - **Background Sync**: Queue edits while offline and automatically sync when connection returns
+- **Web Dashboard**: Modern, responsive UI with dark/light mode, live HTMX updates, and integrated fleet map
+- **Fleet Monitoring**: Track deployed, benched, retired, and ignored units in separate buckets with unknown-emitter triage
+- **Roster Management**: Full CRUD + CSV import/export, device-type aware metadata, and inline actions from the roster tables
+- **Settings & Safeguards**: `/settings` page exposes roster stats, exports, replace-all imports, and danger-zone reset tools
+- **Device & Modem Metadata**: Capture calibration windows, modem pairings, phone/IP details, and addresses per unit
+- **Status Management**: Automatically mark deployed units as OK, Pending (>12h), or Missing (>24h) based on recent telemetry
+- **Data Ingestion**: Accept reports from emitter scripts via REST API
+- **Photo Management**: Upload and view photos for each unit
+- **Interactive Maps**: Leaflet-based maps showing unit locations with tap-to-navigate for mobile
+- **SQLite Storage**: Lightweight, file-based database for easy deployment
+- **Database Management**: Comprehensive backup and restore system
+ - **Manual Snapshots**: Create on-demand backups with descriptions
+ - **Restore from Snapshot**: Restore database with automatic safety backups
+ - **Upload/Download**: Transfer database snapshots for off-site storage
+ - **Remote Cloning**: Copy production database to remote dev servers over WAN
+ - **Automatic Backups**: Scheduled background backups with configurable retention
+
+## Roster Manager & Settings
+
+Visit [`/settings`](http://localhost:8001/settings) to perform bulk roster operations with guardrails:
+
+- **CSV export/import**: Download the entire roster, merge updates, or replace all units in one transaction.
+- **Live roster table**: Fetch every unit via HTMX, edit metadata, toggle deployed/retired states, move emitters to the ignore list, or delete records in-place.
+- **Database backups**: Create snapshots, restore from backups, upload/download database files, view database statistics.
+- **Remote cloning**: Clone production database to remote development servers over the network (see `scripts/clone_db_to_dev.py`).
+- **Stats at a glance**: View counts for the roster, emitters, and ignored units to confirm import/cleanup operations worked.
+- **Danger zone controls**: Clear specific tables or wipe all fleet data when resetting a lab/demo environment.
+
+All UI actions call `GET/POST /api/settings/*` endpoints so you can automate the same workflows from scripts. See [docs/DATABASE_MANAGEMENT.md](docs/DATABASE_MANAGEMENT.md) for comprehensive database backup and restore documentation.
+
+## Tech Stack
+
+- **FastAPI**: Modern, fast web framework
+- **SQLAlchemy**: SQL toolkit and ORM
+- **SQLite**: Lightweight database
+- **HTMX**: Dynamic updates without heavy JavaScript frameworks
+- **TailwindCSS**: Utility-first CSS framework
+- **Leaflet**: Interactive maps
+- **Jinja2**: Server-side templating
+- **uvicorn**: ASGI server
+- **Docker**: Containerization for easy deployment
+
+## Quick Start with Docker Compose (Recommended)
+
+### Prerequisites
+- Docker and Docker Compose installed
+
+### Running the Application
+
+1. **Start the service:**
+ ```bash
+ docker compose up -d
+ ```
+
+2. **Check logs:**
+ ```bash
+ docker compose logs -f
+ ```
+
+3. **Stop the service:**
+ ```bash
+ docker compose down
+ ```
+
+The application will be available at:
+- **Web Interface**: http://localhost:8001
+- **API Documentation**: http://localhost:8001/docs
+- **Health Check**: http://localhost:8001/health
+
+### Data Persistence
+
+The SQLite database and photos are stored in the `./data` directory, which is mounted as a volume. Your data will persist even if you restart or rebuild the container.
+
+## Local Development (Without Docker)
+
+### Prerequisites
+- Python 3.11+
+- pip
+
+### Setup
+
+1. **Install dependencies:**
+ ```bash
+ pip install -r requirements.txt
+ ```
+
+2. **Run the server:**
+ ```bash
+ uvicorn backend.main:app --host 0.0.0.0 --port 8001 --reload
+ ```
+
+The application will be available at http://localhost:8001
+
+### Optional: Generate Sample Data
+
+Need realistic data quickly? Run:
+
+```bash
+python create_test_db.py
+cp /tmp/sfm_test.db data/seismo_fleet.db
+```
+
+The helper script creates a modem/seismograph mix so you can exercise the dashboard, roster tabs, and unit detail screens immediately.
+
+## Upgrading from Previous Versions
+
+### From v0.2.x to v0.3.0
+
+Version 0.3.0 introduces user preferences storage. Run the migration once per database file:
+
+```bash
+python backend/migrate_add_user_preferences.py
+```
+
+This creates the `user_preferences` table for persistent settings storage (timezone, theme, auto-refresh interval, calibration defaults, status thresholds).
+
+### From v0.1.x to v0.2.x or later
+
+Versions ≥0.2 introduce new roster columns (device_type, calibration dates, modem metadata, addresses, etc.). Run the migration once per database file:
+
+```bash
+python backend/migrate_add_device_types.py
+```
+
+Both migration scripts are idempotent—if the columns/tables already exist, they simply exit.
+
+## API Endpoints
+
+### Web Pages
+- **GET** `/` - Dashboard home page
+- **GET** `/roster` - Fleet roster page
+- **GET** `/unit/{unit_id}` - Unit detail page
+- **GET** `/settings` - Roster manager, CSV import/export, and danger-zone utilities
+
+### Fleet Status & Monitoring
+- **GET** `/api/status-snapshot` - Complete fleet status snapshot
+- **GET** `/api/roster` - List of all units with metadata
+- **GET** `/api/unit/{unit_id}` - Detailed unit information
+- **GET** `/health` - Health check endpoint
+
+### Roster Management
+- **POST** `/api/roster/add` - Add new unit to roster
+ ```bash
+ curl -X POST http://localhost:8001/api/roster/add \
+ -F "id=BE1234" \
+ -F "device_type=seismograph" \
+ -F "unit_type=series3" \
+ -F "project_id=PROJ-001" \
+ -F "deployed=true" \
+ -F "note=Main site sensor"
+ ```
+- **GET** `/api/roster/{unit_id}` - Fetch a single roster entry for editing
+- **POST** `/api/roster/edit/{unit_id}` - Update all metadata (device type, calibration dates, modem fields, etc.)
+- **POST** `/api/roster/set-deployed/{unit_id}` - Toggle deployment status
+- **POST** `/api/roster/set-retired/{unit_id}` - Toggle retired status
+- **POST** `/api/roster/set-note/{unit_id}` - Update unit notes
+- **DELETE** `/api/roster/{unit_id}` - Remove a roster/emitter pair entirely
+- **POST** `/api/roster/import-csv` - Bulk import from CSV (merge/update mode)
+ ```bash
+ curl -X POST http://localhost:8001/api/roster/import-csv \
+ -F "file=@roster.csv" \
+ -F "update_existing=true"
+ ```
+- **POST** `/api/roster/ignore/{unit_id}` - Move an unknown emitter to the ignore list
+- **DELETE** `/api/roster/ignore/{unit_id}` - Remove a unit from the ignore list
+- **GET** `/api/roster/ignored` - List all ignored units with reasons
+
+### Settings & Data Management
+- **GET** `/api/settings/export-csv` - Download the entire roster as CSV
+- **GET** `/api/settings/stats` - Counts for roster, emitters, and ignored tables
+- **GET** `/api/settings/roster-units` - Raw roster dump for the settings data grid
+- **POST** `/api/settings/import-csv-replace` - Replace the entire roster in one atomic transaction
+- **GET** `/api/settings/preferences` - Get user preferences (timezone, theme, calibration defaults, etc.)
+- **PUT** `/api/settings/preferences` - Update user preferences (supports partial updates)
+- **POST** `/api/settings/clear-all` - Danger-zone action that wipes roster, emitters, and ignored tables
+- **POST** `/api/settings/clear-roster` - Delete only roster entries
+- **POST** `/api/settings/clear-emitters` - Delete auto-discovered emitters
+- **POST** `/api/settings/clear-ignored` - Reset ignore list
+
+### Database Management
+- **GET** `/api/settings/database/stats` - Database size, row counts, and last modified time
+- **POST** `/api/settings/database/snapshot` - Create manual database snapshot with optional description
+- **GET** `/api/settings/database/snapshots` - List all available snapshots with metadata
+- **GET** `/api/settings/database/snapshot/{filename}` - Download a specific snapshot file
+- **DELETE** `/api/settings/database/snapshot/{filename}` - Delete a snapshot
+- **POST** `/api/settings/database/restore` - Restore database from snapshot (creates safety backup)
+- **POST** `/api/settings/database/upload-snapshot` - Upload snapshot file to server
+
+See [docs/DATABASE_MANAGEMENT.md](docs/DATABASE_MANAGEMENT.md) for detailed documentation and examples.
+
+### CSV Import Format
+Create a CSV file with the following columns (only `unit_id` is required, everything else is optional):
+
+```
+unit_id,unit_type,device_type,deployed,retired,note,project_id,location,address,coordinates,last_calibrated,next_calibration_due,deployed_with_modem_id,ip_address,phone_number,hardware_model
+```
+
+Boolean columns accept `true/false`, `1/0`, or `yes/no` (case-insensitive). Date columns expect `YYYY-MM-DD`. Use the same schema whether you merge via `/api/roster/import-csv` or replace everything with `/api/settings/import-csv-replace`.
+
+#### Example
+
+```csv
+unit_id,unit_type,device_type,deployed,retired,note,project_id,location,address,coordinates,last_calibrated,next_calibration_due,deployed_with_modem_id,ip_address,phone_number,hardware_model
+BE1234,series3,seismograph,true,false,Primary sensor,PROJ-001,"Station A","123 Market St, San Francisco, CA","37.7937,-122.3965",2025-01-15,2026-01-15,MDM001,,,
+MDM001,modem,modem,true,false,Field modem,PROJ-001,"Station A","123 Market St, San Francisco, CA","37.7937,-122.3965",,,,"192.0.2.10","+1-555-0100","Raven XTV"
+```
+
+See [sample_roster.csv](sample_roster.csv) for a minimal working example.
+
+### Emitter Reporting
+- **POST** `/emitters/report` - Submit status report from a seismograph unit
+- **POST** `/api/series3/heartbeat` - Series 3 multi-unit telemetry payload
+- **POST** `/api/series4/heartbeat` - Series 4 (Micromate) multi-unit telemetry payload
+- **GET** `/fleet/status` - Retrieve status of all seismograph units (legacy)
+
+### Photo Management
+- **GET** `/api/unit/{unit_id}/photos` - List photos for a unit
+- **GET** `/api/unit/{unit_id}/photo/{filename}` - Serve specific photo file
+
+## API Documentation
+
+Once running, interactive API documentation is available at:
+- **Swagger UI**: http://localhost:8001/docs
+- **ReDoc**: http://localhost:8001/redoc
+
+## Testing the API
+
+### Using curl
+
+**Submit a report:**
+```bash
+curl -X POST http://localhost:8001/emitters/report \
+ -H "Content-Type: application/json" \
+ -d '{
+ "unit": "SEISMO-001",
+ "unit_type": "series3",
+ "timestamp": "2025-11-20T10:30:00",
+ "file": "event_20251120_103000.dat",
+ "status": "OK"
+ }'
+```
+
+**Get fleet status:**
+```bash
+curl http://localhost:8001/api/roster
+```
+
+**Import roster from CSV:**
+```bash
+curl -X POST http://localhost:8001/api/roster/import-csv \
+ -F "file=@sample_roster.csv" \
+ -F "update_existing=true"
+```
+
+### Using Python
+
+```python
+import requests
+from datetime import datetime
+
+# Submit report
+response = requests.post(
+ "http://localhost:8001/emitters/report",
+ json={
+ "unit": "SEISMO-001",
+ "unit_type": "series3",
+ "timestamp": datetime.utcnow().isoformat(),
+ "file": "event_20251120_103000.dat",
+ "status": "OK"
+ }
+)
+print(response.json())
+
+# Get fleet status
+response = requests.get("http://localhost:8001/api/roster")
+print(response.json())
+
+# Import CSV
+with open('roster.csv', 'rb') as f:
+ files = {'file': f}
+ data = {'update_existing': 'true'}
+ response = requests.post(
+ "http://localhost:8001/api/roster/import-csv",
+ files=files,
+ data=data
+ )
+print(response.json())
+```
+
+## Data Model
+
+### RosterUnit Table (Fleet Roster)
+
+**Common fields**
+
+| Field | Type | Description |
+|-------|------|-------------|
+| id | string | Unit identifier (primary key) |
+| unit_type | string | Hardware model name (default: `series3`) |
+| device_type | string | `seismograph` or `modem` discriminator |
+| deployed | boolean | Whether the unit is in the field |
+| retired | boolean | Removes the unit from deployments but preserves history |
+| note | string | Notes about the unit |
+| project_id | string | Associated project identifier |
+| location | string | Legacy location label |
+| address | string | Human-readable address |
+| coordinates | string | `lat,lon` pair used by the map |
+| last_updated | datetime | Last modification timestamp |
+
+**Seismograph-only fields**
+
+| Field | Type | Description |
+|-------|------|-------------|
+| last_calibrated | date | Last calibration date |
+| next_calibration_due | date | Next calibration date |
+| deployed_with_modem_id | string | Which modem is paired during deployment |
+
+**Modem-only fields**
+
+| Field | Type | Description |
+|-------|------|-------------|
+| ip_address | string | Assigned IP (static or DHCP) |
+| phone_number | string | Cellular number for the modem |
+| hardware_model | string | Modem hardware reference |
+
+### Emitter Table (Device Check-ins)
+
+| Field | Type | Description |
+|-------|------|-------------|
+| id | string | Unit identifier (primary key) |
+| unit_type | string | Reported device type/model |
+| last_seen | datetime | Last report timestamp |
+| last_file | string | Last file processed |
+| status | string | Current status: OK, Pending, Missing |
+| notes | string | Optional notes (nullable) |
+
+### IgnoredUnit Table (Noise Management)
+
+| Field | Type | Description |
+|-------|------|-------------|
+| id | string | Unit identifier (primary key) |
+| reason | string | Optional context for ignoring |
+| ignored_at | datetime | When the ignore action occurred |
+
+### UserPreferences Table (Settings Storage)
+
+| Field | Type | Description |
+|-------|------|-------------|
+| id | integer | Always 1 (single-row table) |
+| timezone | string | Display timezone (default: America/New_York) |
+| theme | string | UI theme: auto, light, or dark |
+| auto_refresh_interval | integer | Dashboard refresh interval in seconds |
+| date_format | string | Date format preference |
+| table_rows_per_page | integer | Default pagination size |
+| calibration_interval_days | integer | Default days between calibrations |
+| calibration_warning_days | integer | Warning threshold before calibration due |
+| status_ok_threshold_hours | integer | Hours for OK status threshold |
+| status_pending_threshold_hours | integer | Hours for Pending status threshold |
+| updated_at | datetime | Last preference update timestamp |
+
+## Project Structure
+
+```
+seismo-fleet-manager/
+├── backend/
+│ ├── main.py # FastAPI app entry point
+│ ├── database.py # SQLAlchemy database configuration
+│ ├── models.py # Database models (RosterUnit, Emitter, IgnoredUnit, UserPreferences)
+│ ├── routes.py # Legacy API endpoints + Series 3/4 heartbeat endpoints
+│ ├── routers/ # Modular API routers
+│ │ ├── roster.py # Fleet status endpoints
+│ │ ├── roster_edit.py # Roster management & CSV import
+│ │ ├── units.py # Unit detail endpoints
+│ │ ├── photos.py # Photo management
+│ │ ├── dashboard.py # Dashboard partials
+│ │ ├── dashboard_tabs.py # Dashboard tab endpoints
+│ │ └── settings.py # Settings, preferences, and data management
+│ ├── services/
+│ │ ├── snapshot.py # Fleet status snapshot logic
+│ │ ├── database_backup.py # Database backup and restore service
+│ │ └── backup_scheduler.py # Automatic backup scheduler
+│ ├── migrate_add_device_types.py # SQLite migration for v0.2 schema
+│ ├── migrate_add_user_preferences.py # SQLite migration for v0.3 schema
+│ └── static/ # Static assets (CSS, etc.)
+├── create_test_db.py # Generate a sample SQLite DB with mixed devices
+├── templates/ # Jinja2 HTML templates
+│ ├── base.html # Base layout with sidebar
+│ ├── dashboard.html # Main dashboard
+│ ├── roster.html # Fleet roster table
+│ ├── unit_detail.html # Unit detail page
+│ ├── settings.html # Roster manager UI
+│ └── partials/ # HTMX partial templates
+│ ├── roster_table.html
+│ ├── retired_table.html
+│ ├── ignored_table.html
+│ └── unknown_emitters.html
+├── data/ # SQLite database & photos (persisted)
+│ └── backups/ # Database snapshots directory
+├── scripts/
+│ └── clone_db_to_dev.py # Remote database cloning utility
+├── docs/
+│ └── DATABASE_MANAGEMENT.md # Database backup/restore guide
+├── requirements.txt # Python dependencies
+├── Dockerfile # Docker container definition
+├── docker-compose.yml # Docker Compose configuration
+├── CHANGELOG.md # Version history
+├── FRONTEND_README.md # Frontend documentation
+└── README.md # This file
+```
+
+## Docker Commands
+
+**Build the image:**
+```bash
+docker compose build
+```
+
+**Start in foreground:**
+```bash
+docker compose up
+```
+
+**Start in background:**
+```bash
+docker compose up -d
+```
+
+**View logs:**
+```bash
+docker compose logs -f seismo-backend
+```
+
+**Restart service:**
+```bash
+docker compose restart
+```
+
+**Rebuild and restart:**
+```bash
+docker compose up -d --build
+```
+
+**Stop and remove containers:**
+```bash
+docker compose down
+```
+
+**Remove containers and volumes:**
+```bash
+docker compose down -v
+```
+
+## Release Highlights
+
+### v0.4.0 — 2025-12-16
+- **Database Management System**: Complete backup and restore functionality with manual snapshots, restore operations, and upload/download capabilities
+- **Remote Database Cloning**: New `clone_db_to_dev.py` script for copying production database to remote dev servers over WAN
+- **Automatic Backup Scheduler**: Background service for scheduled backups with configurable retention management
+- **Database Tab**: New dedicated tab in Settings for all database operations with real-time statistics
+- **Settings Reorganization**: Improved tab structure - renamed "Data Management" to "Roster Management", moved CSV Replace Mode, created Database tab
+- **Comprehensive Documentation**: New `docs/DATABASE_MANAGEMENT.md` with complete guide to backup/restore workflows, API reference, and best practices
+
+### v0.3.3 — 2025-12-12
+- **Improved Mobile Navigation**: Hamburger menu moved to bottom nav bar (no more floating button covering content)
+- **Better Status Visibility**: Larger status dots (16px) in dashboard fleet overview for easier at-a-glance status checks
+- **Cleaner Roster Cards**: Location navigation links moved to detail modal only, reducing clutter in card view
+
+### v0.3.2 — 2025-12-12
+- **Progressive Web App (PWA)**: Complete mobile optimization with offline support, installable as standalone app
+- **Mobile-First UI**: Hamburger menu, bottom navigation bar, card-based roster view optimized for touch
+- **Tap-to-Navigate**: Location links open in user's preferred navigation app (Google Maps, Apple Maps, Waze)
+- **Offline Editing**: Service worker + IndexedDB for offline operation with automatic sync when online
+- **Unit Detail Modals**: Bottom sheet modals for quick unit info access with full edit capabilities
+- **Hard Reload Utility**: "Clear Cache & Reload" button to force fresh assets (helpful for development)
+
+### v0.3.1 — 2025-12-12
+- **Dashboard Alerts**: Only Missing units show in notifications (Pending units no longer alert)
+- **Status Fixes**: Fixed "Unknown" status issues in mobile card views and detail modals
+- **Backend Improvements**: Safer data access with `.get()` defaults to prevent errors
+
+### v0.3.0 — 2025-12-09
+- **Series 4 Support**: New `/api/series4/heartbeat` endpoint with auto-detection for Micromate units (UM##### pattern)
+- **Settings Redesign**: Completely redesigned Settings page with 4-tab interface (General, Data Management, Advanced, Danger Zone)
+- **User Preferences**: Backend storage for timezone, theme, auto-refresh interval, calibration defaults, and status thresholds
+- **Development Labels**: Visual indicators to distinguish dev from production environments
+- **Timezone Support**: Comprehensive timezone handling with human-readable timestamps site-wide
+- **Quality of Life**: Relative timestamps, status icons for accessibility, breadcrumb navigation, copy-to-clipboard, search functionality
+
+### v0.2.1 — 2025-12-03
+- Added the `/settings` roster manager with CSV export/import, live stats, and danger-zone table reset actions
+- Deployed/Benched/Retired/Ignored tabs now have dedicated HTMX partials, sorting, and inline actions
+- Unit detail pages expose device-type specific metadata (calibration windows, modem pairing, IP/phone fields)
+- Snapshot summary and dashboard counts now focus on deployed units and include address/coordinate data
+
+### v0.2.0 — 2025-12-03
+- Introduced device-type aware roster schema (seismograph vs modem) plus migration + `create_test_db.py` helper
+- Added Ignore list model/endpoints to quarantine noisy emitters directly from the roster
+- Roster page gained Add Unit + CSV Import modals, HTMX-driven updates, and unknown emitter callouts
+- Snapshot service now returns active/benched/retired/unknown buckets containing richer metadata
+
+### v0.1.1 — 2025-12-02
+- **Roster Editing API**: Full CRUD operations for managing your fleet roster
+- **CSV Import**: Bulk upload roster data from CSV files
+- **Enhanced Data Model**: Added project_id and location fields to roster
+- **Bug Fixes**: Improved database session management and error handling
+
+See [CHANGELOG.md](CHANGELOG.md) for the full release notes.
+
+## Future Enhancements
+
+- Email/SMS alerts for missing units
+- Historical data tracking and reporting
+- Multi-user authentication
+- PostgreSQL support for larger deployments
+- Advanced filtering and search
+- Export roster to various formats
+
+## License
+
+MIT
+
+## Version
+
+**Current: 0.4.0** — Database management system with backup/restore and remote cloning (2025-12-16)
+
+Previous: 0.3.3 — Mobile navigation improvements and better status visibility (2025-12-12)
+
+0.3.2 — Progressive Web App with mobile optimization (2025-12-12)
+
+0.3.1 — Dashboard alerts and status fixes (2025-12-12)
+
+0.3.0 — Series 4 support, settings redesign, user preferences (2025-12-09)
+
+0.2.1 — Settings & roster manager refresh (2025-12-03)
+
+0.2.0 — Device-type aware roster + ignore list (2025-12-03)
+
+0.1.1 — Roster Management & CSV Import (2025-12-02)
+
+0.1.0 — Initial Release (2024-11-20)
diff --git a/backend/database.py b/backend/database.py
new file mode 100644
index 0000000..7889459
--- /dev/null
+++ b/backend/database.py
@@ -0,0 +1,31 @@
+from sqlalchemy import create_engine
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.orm import sessionmaker
+import os
+
+# Ensure data directory exists
+os.makedirs("data", exist_ok=True)
+
+SQLALCHEMY_DATABASE_URL = "sqlite:///./data/seismo_fleet.db"
+
+engine = create_engine(
+ SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
+)
+
+SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
+
+Base = declarative_base()
+
+
+def get_db():
+ """Dependency for database sessions"""
+ db = SessionLocal()
+ try:
+ yield db
+ finally:
+ db.close()
+
+
+def get_db_session():
+ """Get a database session directly (not as a dependency)"""
+ return SessionLocal()
diff --git a/backend/main.py b/backend/main.py
new file mode 100644
index 0000000..ed55a49
--- /dev/null
+++ b/backend/main.py
@@ -0,0 +1,373 @@
+import os
+from fastapi import FastAPI, Request, Depends
+from fastapi.middleware.cors import CORSMiddleware
+from fastapi.staticfiles import StaticFiles
+from fastapi.templating import Jinja2Templates
+from fastapi.responses import HTMLResponse, FileResponse, JSONResponse
+from sqlalchemy.orm import Session
+from typing import List, Dict
+from pydantic import BaseModel
+
+from backend.database import engine, Base, get_db
+from backend.routers import roster, units, photos, roster_edit, dashboard, dashboard_tabs, activity, slmm, slm_ui
+from backend.services.snapshot import emit_status_snapshot
+from backend.models import IgnoredUnit
+
+# Create database tables
+Base.metadata.create_all(bind=engine)
+
+# Read environment (development or production)
+ENVIRONMENT = os.getenv("ENVIRONMENT", "production")
+
+# Initialize FastAPI app
+VERSION = "0.4.0"
+app = FastAPI(
+ title="Seismo Fleet Manager",
+ description="Backend API for managing seismograph fleet status",
+ version=VERSION
+)
+
+# Configure CORS
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=["*"],
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+
+# Mount static files
+app.mount("/static", StaticFiles(directory="backend/static"), name="static")
+
+# Setup Jinja2 templates
+templates = Jinja2Templates(directory="templates")
+
+# Add custom context processor to inject environment variable into all templates
+@app.middleware("http")
+async def add_environment_to_context(request: Request, call_next):
+ """Middleware to add environment variable to request state"""
+ request.state.environment = ENVIRONMENT
+ response = await call_next(request)
+ return response
+
+# Override TemplateResponse to include environment and version in context
+original_template_response = templates.TemplateResponse
+def custom_template_response(name, context=None, *args, **kwargs):
+ if context is None:
+ context = {}
+ context["environment"] = ENVIRONMENT
+ context["version"] = VERSION
+ return original_template_response(name, context, *args, **kwargs)
+templates.TemplateResponse = custom_template_response
+
+# Include API routers
+app.include_router(roster.router)
+app.include_router(units.router)
+app.include_router(photos.router)
+app.include_router(roster_edit.router)
+app.include_router(dashboard.router)
+app.include_router(dashboard_tabs.router)
+app.include_router(activity.router)
+app.include_router(slmm.router)
+app.include_router(slm_ui.router)
+
+from backend.routers import settings
+app.include_router(settings.router)
+
+
+
+# Legacy routes from the original backend
+from backend import routes as legacy_routes
+app.include_router(legacy_routes.router)
+
+
+# HTML page routes
+@app.get("/", response_class=HTMLResponse)
+async def dashboard(request: Request):
+ """Dashboard home page"""
+ return templates.TemplateResponse("dashboard.html", {"request": request})
+
+
+@app.get("/roster", response_class=HTMLResponse)
+async def roster_page(request: Request):
+ """Fleet roster page"""
+ return templates.TemplateResponse("roster.html", {"request": request})
+
+
+@app.get("/unit/{unit_id}", response_class=HTMLResponse)
+async def unit_detail_page(request: Request, unit_id: str):
+ """Unit detail page"""
+ return templates.TemplateResponse("unit_detail.html", {
+ "request": request,
+ "unit_id": unit_id
+ })
+
+
+@app.get("/settings", response_class=HTMLResponse)
+async def settings_page(request: Request):
+ """Settings page for roster management"""
+ return templates.TemplateResponse("settings.html", {"request": request})
+
+
+# ===== PWA ROUTES =====
+
+@app.get("/sw.js")
+async def service_worker():
+ """Serve service worker with proper headers for PWA"""
+ return FileResponse(
+ "backend/static/sw.js",
+ media_type="application/javascript",
+ headers={
+ "Service-Worker-Allowed": "/",
+ "Cache-Control": "no-cache"
+ }
+ )
+
+
+@app.get("/offline-db.js")
+async def offline_db_script():
+ """Serve offline database script"""
+ return FileResponse(
+ "backend/static/offline-db.js",
+ media_type="application/javascript",
+ headers={"Cache-Control": "no-cache"}
+ )
+
+
+# Pydantic model for sync edits
+class EditItem(BaseModel):
+ id: int
+ unitId: str
+ changes: Dict
+ timestamp: int
+
+
+class SyncEditsRequest(BaseModel):
+ edits: List[EditItem]
+
+
+@app.post("/api/sync-edits")
+async def sync_edits(request: SyncEditsRequest, db: Session = Depends(get_db)):
+ """Process offline edit queue and sync to database"""
+ from backend.models import RosterUnit
+
+ results = []
+ synced_ids = []
+
+ for edit in request.edits:
+ try:
+ # Find the unit
+ unit = db.query(RosterUnit).filter_by(id=edit.unitId).first()
+
+ if not unit:
+ results.append({
+ "id": edit.id,
+ "status": "error",
+ "reason": f"Unit {edit.unitId} not found"
+ })
+ continue
+
+ # Apply changes
+ for key, value in edit.changes.items():
+ if hasattr(unit, key):
+ # Handle boolean conversions
+ if key in ['deployed', 'retired']:
+ setattr(unit, key, value in ['true', True, 'True', '1', 1])
+ else:
+ setattr(unit, key, value if value != '' else None)
+
+ db.commit()
+
+ results.append({
+ "id": edit.id,
+ "status": "success"
+ })
+ synced_ids.append(edit.id)
+
+ except Exception as e:
+ db.rollback()
+ results.append({
+ "id": edit.id,
+ "status": "error",
+ "reason": str(e)
+ })
+
+ synced_count = len(synced_ids)
+
+ return JSONResponse({
+ "synced": synced_count,
+ "total": len(request.edits),
+ "synced_ids": synced_ids,
+ "results": results
+ })
+
+
+@app.get("/partials/roster-deployed", response_class=HTMLResponse)
+async def roster_deployed_partial(request: Request):
+ """Partial template for deployed units tab"""
+ from datetime import datetime
+ snapshot = emit_status_snapshot()
+
+ units_list = []
+ for unit_id, unit_data in snapshot["active"].items():
+ units_list.append({
+ "id": unit_id,
+ "status": unit_data.get("status", "Unknown"),
+ "age": unit_data.get("age", "N/A"),
+ "last_seen": unit_data.get("last", "Never"),
+ "deployed": unit_data.get("deployed", False),
+ "note": unit_data.get("note", ""),
+ "device_type": unit_data.get("device_type", "seismograph"),
+ "address": unit_data.get("address", ""),
+ "coordinates": unit_data.get("coordinates", ""),
+ "project_id": unit_data.get("project_id", ""),
+ "last_calibrated": unit_data.get("last_calibrated"),
+ "next_calibration_due": unit_data.get("next_calibration_due"),
+ "deployed_with_modem_id": unit_data.get("deployed_with_modem_id"),
+ "ip_address": unit_data.get("ip_address"),
+ "phone_number": unit_data.get("phone_number"),
+ "hardware_model": unit_data.get("hardware_model"),
+ })
+
+ # Sort by status priority (Missing > Pending > OK) then by ID
+ status_priority = {"Missing": 0, "Pending": 1, "OK": 2}
+ units_list.sort(key=lambda x: (status_priority.get(x["status"], 3), x["id"]))
+
+ return templates.TemplateResponse("partials/roster_table.html", {
+ "request": request,
+ "units": units_list,
+ "timestamp": datetime.now().strftime("%H:%M:%S")
+ })
+
+
+@app.get("/partials/roster-benched", response_class=HTMLResponse)
+async def roster_benched_partial(request: Request):
+ """Partial template for benched units tab"""
+ from datetime import datetime
+ snapshot = emit_status_snapshot()
+
+ units_list = []
+ for unit_id, unit_data in snapshot["benched"].items():
+ units_list.append({
+ "id": unit_id,
+ "status": unit_data.get("status", "N/A"),
+ "age": unit_data.get("age", "N/A"),
+ "last_seen": unit_data.get("last", "Never"),
+ "deployed": unit_data.get("deployed", False),
+ "note": unit_data.get("note", ""),
+ "device_type": unit_data.get("device_type", "seismograph"),
+ "address": unit_data.get("address", ""),
+ "coordinates": unit_data.get("coordinates", ""),
+ "project_id": unit_data.get("project_id", ""),
+ "last_calibrated": unit_data.get("last_calibrated"),
+ "next_calibration_due": unit_data.get("next_calibration_due"),
+ "deployed_with_modem_id": unit_data.get("deployed_with_modem_id"),
+ "ip_address": unit_data.get("ip_address"),
+ "phone_number": unit_data.get("phone_number"),
+ "hardware_model": unit_data.get("hardware_model"),
+ })
+
+ # Sort by ID
+ units_list.sort(key=lambda x: x["id"])
+
+ return templates.TemplateResponse("partials/roster_table.html", {
+ "request": request,
+ "units": units_list,
+ "timestamp": datetime.now().strftime("%H:%M:%S")
+ })
+
+
+@app.get("/partials/roster-retired", response_class=HTMLResponse)
+async def roster_retired_partial(request: Request):
+ """Partial template for retired units tab"""
+ from datetime import datetime
+ snapshot = emit_status_snapshot()
+
+ units_list = []
+ for unit_id, unit_data in snapshot["retired"].items():
+ units_list.append({
+ "id": unit_id,
+ "status": unit_data["status"],
+ "age": unit_data["age"],
+ "last_seen": unit_data["last"],
+ "deployed": unit_data["deployed"],
+ "note": unit_data.get("note", ""),
+ "device_type": unit_data.get("device_type", "seismograph"),
+ "last_calibrated": unit_data.get("last_calibrated"),
+ "next_calibration_due": unit_data.get("next_calibration_due"),
+ "deployed_with_modem_id": unit_data.get("deployed_with_modem_id"),
+ "ip_address": unit_data.get("ip_address"),
+ "phone_number": unit_data.get("phone_number"),
+ "hardware_model": unit_data.get("hardware_model"),
+ })
+
+ # Sort by ID
+ units_list.sort(key=lambda x: x["id"])
+
+ return templates.TemplateResponse("partials/retired_table.html", {
+ "request": request,
+ "units": units_list,
+ "timestamp": datetime.now().strftime("%H:%M:%S")
+ })
+
+
+@app.get("/partials/roster-ignored", response_class=HTMLResponse)
+async def roster_ignored_partial(request: Request, db: Session = Depends(get_db)):
+ """Partial template for ignored units tab"""
+ from datetime import datetime
+
+ ignored = db.query(IgnoredUnit).all()
+ ignored_list = []
+ for unit in ignored:
+ ignored_list.append({
+ "id": unit.id,
+ "reason": unit.reason or "",
+ "ignored_at": unit.ignored_at.strftime("%Y-%m-%d %H:%M:%S") if unit.ignored_at else "Unknown"
+ })
+
+ # Sort by ID
+ ignored_list.sort(key=lambda x: x["id"])
+
+ return templates.TemplateResponse("partials/ignored_table.html", {
+ "request": request,
+ "ignored_units": ignored_list,
+ "timestamp": datetime.now().strftime("%H:%M:%S")
+ })
+
+
+@app.get("/partials/unknown-emitters", response_class=HTMLResponse)
+async def unknown_emitters_partial(request: Request):
+ """Partial template for unknown emitters (HTMX)"""
+ snapshot = emit_status_snapshot()
+
+ unknown_list = []
+ for unit_id, unit_data in snapshot.get("unknown", {}).items():
+ unknown_list.append({
+ "id": unit_id,
+ "status": unit_data["status"],
+ "age": unit_data["age"],
+ "fname": unit_data.get("fname", ""),
+ })
+
+ # Sort by ID
+ unknown_list.sort(key=lambda x: x["id"])
+
+ return templates.TemplateResponse("partials/unknown_emitters.html", {
+ "request": request,
+ "unknown_units": unknown_list
+ })
+
+
+@app.get("/health")
+def health_check():
+ """Health check endpoint"""
+ return {
+ "message": f"Seismo Fleet Manager v{VERSION}",
+ "status": "running",
+ "version": VERSION
+ }
+
+
+if __name__ == "__main__":
+ import uvicorn
+ uvicorn.run(app, host="0.0.0.0", port=8001)
diff --git a/backend/migrate_add_device_types.py b/backend/migrate_add_device_types.py
new file mode 100644
index 0000000..f923f34
--- /dev/null
+++ b/backend/migrate_add_device_types.py
@@ -0,0 +1,84 @@
+"""
+Migration script to add device type support to the roster table.
+
+This adds columns for:
+- device_type (seismograph/modem discriminator)
+- Seismograph-specific fields (calibration dates, modem pairing)
+- Modem-specific fields (IP address, phone number, hardware model)
+
+Run this script once to migrate an existing database.
+"""
+
+import sqlite3
+import os
+
+# Database path
+DB_PATH = "./data/seismo_fleet.db"
+
+def migrate_database():
+ """Add new columns to the roster table"""
+
+ if not os.path.exists(DB_PATH):
+ print(f"Database not found at {DB_PATH}")
+ print("The database will be created automatically when you run the application.")
+ return
+
+ print(f"Migrating database: {DB_PATH}")
+
+ conn = sqlite3.connect(DB_PATH)
+ cursor = conn.cursor()
+
+ # Check if device_type column already exists
+ cursor.execute("PRAGMA table_info(roster)")
+ columns = [col[1] for col in cursor.fetchall()]
+
+ if "device_type" in columns:
+ print("Migration already applied - device_type column exists")
+ conn.close()
+ return
+
+ print("Adding new columns to roster table...")
+
+ try:
+ # Add device type discriminator
+ cursor.execute("ALTER TABLE roster ADD COLUMN device_type TEXT DEFAULT 'seismograph'")
+ print(" ✓ Added device_type column")
+
+ # Add seismograph-specific fields
+ cursor.execute("ALTER TABLE roster ADD COLUMN last_calibrated DATE")
+ print(" ✓ Added last_calibrated column")
+
+ cursor.execute("ALTER TABLE roster ADD COLUMN next_calibration_due DATE")
+ print(" ✓ Added next_calibration_due column")
+
+ cursor.execute("ALTER TABLE roster ADD COLUMN deployed_with_modem_id TEXT")
+ print(" ✓ Added deployed_with_modem_id column")
+
+ # Add modem-specific fields
+ cursor.execute("ALTER TABLE roster ADD COLUMN ip_address TEXT")
+ print(" ✓ Added ip_address column")
+
+ cursor.execute("ALTER TABLE roster ADD COLUMN phone_number TEXT")
+ print(" ✓ Added phone_number column")
+
+ cursor.execute("ALTER TABLE roster ADD COLUMN hardware_model TEXT")
+ print(" ✓ Added hardware_model column")
+
+ # Set all existing units to seismograph type
+ cursor.execute("UPDATE roster SET device_type = 'seismograph' WHERE device_type IS NULL")
+ print(" ✓ Set existing units to seismograph type")
+
+ conn.commit()
+ print("\nMigration completed successfully!")
+
+ except sqlite3.Error as e:
+ print(f"\nError during migration: {e}")
+ conn.rollback()
+ raise
+
+ finally:
+ conn.close()
+
+
+if __name__ == "__main__":
+ migrate_database()
diff --git a/backend/migrate_add_slm_fields.py b/backend/migrate_add_slm_fields.py
new file mode 100644
index 0000000..1c1b50e
--- /dev/null
+++ b/backend/migrate_add_slm_fields.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python3
+"""
+Database migration: Add sound level meter fields to roster table.
+
+Adds columns for sound_level_meter device type support.
+"""
+
+import sqlite3
+from pathlib import Path
+
+def migrate():
+ """Add SLM fields to roster table if they don't exist."""
+
+ # Try multiple possible database locations
+ possible_paths = [
+ Path("data/seismo_fleet.db"),
+ Path("data/sfm.db"),
+ Path("data/seismo.db"),
+ ]
+
+ db_path = None
+ for path in possible_paths:
+ if path.exists():
+ db_path = path
+ break
+
+ if db_path is None:
+ print(f"Database not found in any of: {[str(p) for p in possible_paths]}")
+ print("Creating database with models.py will include new fields automatically.")
+ return
+
+ print(f"Using database: {db_path}")
+
+ conn = sqlite3.connect(db_path)
+ cursor = conn.cursor()
+
+ # Check if columns already exist
+ cursor.execute("PRAGMA table_info(roster)")
+ existing_columns = {row[1] for row in cursor.fetchall()}
+
+ new_columns = {
+ "slm_host": "TEXT",
+ "slm_tcp_port": "INTEGER",
+ "slm_model": "TEXT",
+ "slm_serial_number": "TEXT",
+ "slm_frequency_weighting": "TEXT",
+ "slm_time_weighting": "TEXT",
+ "slm_measurement_range": "TEXT",
+ "slm_last_check": "DATETIME",
+ }
+
+ migrations_applied = []
+
+ for column_name, column_type in new_columns.items():
+ if column_name not in existing_columns:
+ try:
+ cursor.execute(f"ALTER TABLE roster ADD COLUMN {column_name} {column_type}")
+ migrations_applied.append(column_name)
+ print(f"✓ Added column: {column_name} ({column_type})")
+ except sqlite3.OperationalError as e:
+ print(f"✗ Failed to add column {column_name}: {e}")
+ else:
+ print(f"○ Column already exists: {column_name}")
+
+ conn.commit()
+ conn.close()
+
+ if migrations_applied:
+ print(f"\n✓ Migration complete! Added {len(migrations_applied)} new columns.")
+ else:
+ print("\n○ No migration needed - all columns already exist.")
+
+ print("\nSound level meter fields are now available in the roster table.")
+ print("You can now set device_type='sound_level_meter' for SLM devices.")
+
+
+if __name__ == "__main__":
+ migrate()
diff --git a/backend/migrate_add_unit_history.py b/backend/migrate_add_unit_history.py
new file mode 100644
index 0000000..15cdaad
--- /dev/null
+++ b/backend/migrate_add_unit_history.py
@@ -0,0 +1,78 @@
+"""
+Migration script to add unit history timeline support.
+
+This creates the unit_history table to track all changes to units:
+- Note changes (archived old notes, new notes)
+- Deployment status changes (deployed/benched)
+- Retired status changes
+- Other field changes
+
+Run this script once to migrate an existing database.
+"""
+
+import sqlite3
+import os
+
+# Database path
+DB_PATH = "./data/seismo_fleet.db"
+
+def migrate_database():
+ """Create the unit_history table"""
+
+ if not os.path.exists(DB_PATH):
+ print(f"Database not found at {DB_PATH}")
+ print("The database will be created automatically when you run the application.")
+ return
+
+ print(f"Migrating database: {DB_PATH}")
+
+ conn = sqlite3.connect(DB_PATH)
+ cursor = conn.cursor()
+
+ # Check if unit_history table already exists
+ cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='unit_history'")
+ if cursor.fetchone():
+ print("Migration already applied - unit_history table exists")
+ conn.close()
+ return
+
+ print("Creating unit_history table...")
+
+ try:
+ cursor.execute("""
+ CREATE TABLE unit_history (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ unit_id TEXT NOT NULL,
+ change_type TEXT NOT NULL,
+ field_name TEXT,
+ old_value TEXT,
+ new_value TEXT,
+ changed_at TIMESTAMP NOT NULL,
+ source TEXT DEFAULT 'manual',
+ notes TEXT
+ )
+ """)
+ print(" ✓ Created unit_history table")
+
+ # Create indexes for better query performance
+ cursor.execute("CREATE INDEX idx_unit_history_unit_id ON unit_history(unit_id)")
+ print(" ✓ Created index on unit_id")
+
+ cursor.execute("CREATE INDEX idx_unit_history_changed_at ON unit_history(changed_at)")
+ print(" ✓ Created index on changed_at")
+
+ conn.commit()
+ print("\nMigration completed successfully!")
+ print("Units will now track their complete history of changes.")
+
+ except sqlite3.Error as e:
+ print(f"\nError during migration: {e}")
+ conn.rollback()
+ raise
+
+ finally:
+ conn.close()
+
+
+if __name__ == "__main__":
+ migrate_database()
diff --git a/backend/migrate_add_user_preferences.py b/backend/migrate_add_user_preferences.py
new file mode 100644
index 0000000..73a4e9c
--- /dev/null
+++ b/backend/migrate_add_user_preferences.py
@@ -0,0 +1,80 @@
+"""
+Migration script to add user_preferences table.
+
+This creates a new table for storing persistent user preferences:
+- Display settings (timezone, theme, date format)
+- Auto-refresh configuration
+- Calibration defaults
+- Status threshold customization
+
+Run this script once to migrate an existing database.
+"""
+
+import sqlite3
+import os
+
+# Database path
+DB_PATH = "./data/seismo_fleet.db"
+
+def migrate_database():
+ """Create user_preferences table"""
+
+ if not os.path.exists(DB_PATH):
+ print(f"Database not found at {DB_PATH}")
+ print("The database will be created automatically when you run the application.")
+ return
+
+ print(f"Migrating database: {DB_PATH}")
+
+ conn = sqlite3.connect(DB_PATH)
+ cursor = conn.cursor()
+
+ # Check if user_preferences table already exists
+ cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='user_preferences'")
+ table_exists = cursor.fetchone()
+
+ if table_exists:
+ print("Migration already applied - user_preferences table exists")
+ conn.close()
+ return
+
+ print("Creating user_preferences table...")
+
+ try:
+ cursor.execute("""
+ CREATE TABLE user_preferences (
+ id INTEGER PRIMARY KEY DEFAULT 1,
+ timezone TEXT DEFAULT 'America/New_York',
+ theme TEXT DEFAULT 'auto',
+ auto_refresh_interval INTEGER DEFAULT 10,
+ date_format TEXT DEFAULT 'MM/DD/YYYY',
+ table_rows_per_page INTEGER DEFAULT 25,
+ calibration_interval_days INTEGER DEFAULT 365,
+ calibration_warning_days INTEGER DEFAULT 30,
+ status_ok_threshold_hours INTEGER DEFAULT 12,
+ status_pending_threshold_hours INTEGER DEFAULT 24,
+ updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+ )
+ """)
+ print(" ✓ Created user_preferences table")
+
+ # Insert default preferences
+ cursor.execute("""
+ INSERT INTO user_preferences (id) VALUES (1)
+ """)
+ print(" ✓ Inserted default preferences")
+
+ conn.commit()
+ print("\nMigration completed successfully!")
+
+ except sqlite3.Error as e:
+ print(f"\nError during migration: {e}")
+ conn.rollback()
+ raise
+
+ finally:
+ conn.close()
+
+
+if __name__ == "__main__":
+ migrate_database()
diff --git a/backend/models.py b/backend/models.py
new file mode 100644
index 0000000..e80c49c
--- /dev/null
+++ b/backend/models.py
@@ -0,0 +1,107 @@
+from sqlalchemy import Column, String, DateTime, Boolean, Text, Date, Integer
+from datetime import datetime
+from backend.database import Base
+
+
+class Emitter(Base):
+ __tablename__ = "emitters"
+
+ id = Column(String, primary_key=True, index=True)
+ unit_type = Column(String, nullable=False)
+ last_seen = Column(DateTime, default=datetime.utcnow)
+ last_file = Column(String, nullable=False)
+ status = Column(String, nullable=False)
+ notes = Column(String, nullable=True)
+
+
+class RosterUnit(Base):
+ """
+ Roster table: represents our *intended assignment* of a unit.
+ This is editable from the GUI.
+
+ Supports multiple device types (seismograph, modem, sound_level_meter) with type-specific fields.
+ """
+ __tablename__ = "roster"
+
+ # Core fields (all device types)
+ id = Column(String, primary_key=True, index=True)
+ unit_type = Column(String, default="series3") # Backward compatibility
+ device_type = Column(String, default="seismograph") # "seismograph" | "modem" | "sound_level_meter"
+ deployed = Column(Boolean, default=True)
+ retired = Column(Boolean, default=False)
+ note = Column(String, nullable=True)
+ project_id = Column(String, nullable=True)
+ location = Column(String, nullable=True) # Legacy field - use address/coordinates instead
+ address = Column(String, nullable=True) # Human-readable address
+ coordinates = Column(String, nullable=True) # Lat,Lon format: "34.0522,-118.2437"
+ last_updated = Column(DateTime, default=datetime.utcnow)
+
+ # Seismograph-specific fields (nullable for modems and SLMs)
+ last_calibrated = Column(Date, nullable=True)
+ next_calibration_due = Column(Date, nullable=True)
+ deployed_with_modem_id = Column(String, nullable=True) # FK to another RosterUnit
+
+ # Modem-specific fields (nullable for seismographs and SLMs)
+ ip_address = Column(String, nullable=True)
+ phone_number = Column(String, nullable=True)
+ hardware_model = Column(String, nullable=True)
+
+ # Sound Level Meter-specific fields (nullable for seismographs and modems)
+ slm_host = Column(String, nullable=True) # Device IP or hostname
+ slm_tcp_port = Column(Integer, nullable=True) # TCP control port (default 2255)
+ slm_model = Column(String, nullable=True) # NL-43, NL-53, etc.
+ slm_serial_number = Column(String, nullable=True) # Device serial number
+ slm_frequency_weighting = Column(String, nullable=True) # A, C, Z
+ slm_time_weighting = Column(String, nullable=True) # F (Fast), S (Slow), I (Impulse)
+ slm_measurement_range = Column(String, nullable=True) # e.g., "30-130 dB"
+ slm_last_check = Column(DateTime, nullable=True) # Last communication check
+
+
+class IgnoredUnit(Base):
+ """
+ Ignored units: units that report but should be filtered out from unknown emitters.
+ Used to suppress noise from old projects.
+ """
+ __tablename__ = "ignored_units"
+
+ id = Column(String, primary_key=True, index=True)
+ reason = Column(String, nullable=True)
+ ignored_at = Column(DateTime, default=datetime.utcnow)
+
+
+class UnitHistory(Base):
+ """
+ Unit history: complete timeline of changes to each unit.
+ Tracks note changes, status changes, deployment/benched events, and more.
+ """
+ __tablename__ = "unit_history"
+
+ id = Column(Integer, primary_key=True, autoincrement=True)
+ unit_id = Column(String, nullable=False, index=True) # FK to RosterUnit.id
+ change_type = Column(String, nullable=False) # note_change, deployed_change, retired_change, etc.
+ field_name = Column(String, nullable=True) # Which field changed
+ old_value = Column(Text, nullable=True) # Previous value
+ new_value = Column(Text, nullable=True) # New value
+ changed_at = Column(DateTime, default=datetime.utcnow, nullable=False, index=True)
+ source = Column(String, default="manual") # manual, csv_import, telemetry, offline_sync
+ notes = Column(Text, nullable=True) # Optional reason/context for the change
+
+
+class UserPreferences(Base):
+ """
+ User preferences: persistent storage for application settings.
+ Single-row table (id=1) to store global user preferences.
+ """
+ __tablename__ = "user_preferences"
+
+ id = Column(Integer, primary_key=True, default=1)
+ timezone = Column(String, default="America/New_York")
+ theme = Column(String, default="auto") # auto, light, dark
+ auto_refresh_interval = Column(Integer, default=10) # seconds
+ date_format = Column(String, default="MM/DD/YYYY")
+ table_rows_per_page = Column(Integer, default=25)
+ calibration_interval_days = Column(Integer, default=365)
+ calibration_warning_days = Column(Integer, default=30)
+ status_ok_threshold_hours = Column(Integer, default=12)
+ status_pending_threshold_hours = Column(Integer, default=24)
+ updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
diff --git a/backend/routers/activity.py b/backend/routers/activity.py
new file mode 100644
index 0000000..b881a8e
--- /dev/null
+++ b/backend/routers/activity.py
@@ -0,0 +1,146 @@
+from fastapi import APIRouter, Depends
+from sqlalchemy.orm import Session
+from sqlalchemy import desc
+from pathlib import Path
+from datetime import datetime, timedelta, timezone
+from typing import List, Dict, Any
+from backend.database import get_db
+from backend.models import UnitHistory, Emitter, RosterUnit
+
+router = APIRouter(prefix="/api", tags=["activity"])
+
+PHOTOS_BASE_DIR = Path("data/photos")
+
+
+@router.get("/recent-activity")
+def get_recent_activity(limit: int = 20, db: Session = Depends(get_db)):
+ """
+ Get recent activity feed combining unit history changes and photo uploads.
+ Returns a unified timeline of events sorted by timestamp (newest first).
+ """
+ activities = []
+
+ # Get recent history entries
+ history_entries = db.query(UnitHistory)\
+ .order_by(desc(UnitHistory.changed_at))\
+ .limit(limit * 2)\
+ .all() # Get more than needed to mix with photos
+
+ for entry in history_entries:
+ activity = {
+ "type": "history",
+ "timestamp": entry.changed_at.isoformat(),
+ "timestamp_unix": entry.changed_at.timestamp(),
+ "unit_id": entry.unit_id,
+ "change_type": entry.change_type,
+ "field_name": entry.field_name,
+ "old_value": entry.old_value,
+ "new_value": entry.new_value,
+ "source": entry.source,
+ "notes": entry.notes
+ }
+ activities.append(activity)
+
+ # Get recent photos
+ if PHOTOS_BASE_DIR.exists():
+ image_extensions = {".jpg", ".jpeg", ".png", ".gif", ".webp"}
+ photo_activities = []
+
+ for unit_dir in PHOTOS_BASE_DIR.iterdir():
+ if not unit_dir.is_dir():
+ continue
+
+ unit_id = unit_dir.name
+
+ for file_path in unit_dir.iterdir():
+ if file_path.is_file() and file_path.suffix.lower() in image_extensions:
+ modified_time = file_path.stat().st_mtime
+ photo_activities.append({
+ "type": "photo",
+ "timestamp": datetime.fromtimestamp(modified_time).isoformat(),
+ "timestamp_unix": modified_time,
+ "unit_id": unit_id,
+ "filename": file_path.name,
+ "photo_url": f"/api/unit/{unit_id}/photo/{file_path.name}"
+ })
+
+ activities.extend(photo_activities)
+
+ # Sort all activities by timestamp (newest first)
+ activities.sort(key=lambda x: x["timestamp_unix"], reverse=True)
+
+ # Limit to requested number
+ activities = activities[:limit]
+
+ return {
+ "activities": activities,
+ "total": len(activities)
+ }
+
+
+@router.get("/recent-callins")
+def get_recent_callins(hours: int = 6, limit: int = None, db: Session = Depends(get_db)):
+ """
+ Get recent unit call-ins (units that have reported recently).
+ Returns units sorted by most recent last_seen timestamp.
+
+ Args:
+ hours: Look back this many hours (default: 6)
+ limit: Maximum number of results (default: None = all)
+ """
+ # Calculate the time threshold
+ time_threshold = datetime.now(timezone.utc) - timedelta(hours=hours)
+
+ # Query emitters with recent activity, joined with roster info
+ recent_emitters = db.query(Emitter)\
+ .filter(Emitter.last_seen >= time_threshold)\
+ .order_by(desc(Emitter.last_seen))\
+ .all()
+
+ # Get roster info for all units
+ roster_dict = {r.id: r for r in db.query(RosterUnit).all()}
+
+ call_ins = []
+ for emitter in recent_emitters:
+ roster_unit = roster_dict.get(emitter.id)
+
+ # Calculate time since last seen
+ last_seen_utc = emitter.last_seen.replace(tzinfo=timezone.utc) if emitter.last_seen.tzinfo is None else emitter.last_seen
+ time_diff = datetime.now(timezone.utc) - last_seen_utc
+
+ # Format time ago
+ if time_diff.total_seconds() < 60:
+ time_ago = "just now"
+ elif time_diff.total_seconds() < 3600:
+ minutes = int(time_diff.total_seconds() / 60)
+ time_ago = f"{minutes}m ago"
+ else:
+ hours_ago = time_diff.total_seconds() / 3600
+ if hours_ago < 24:
+ time_ago = f"{int(hours_ago)}h {int((hours_ago % 1) * 60)}m ago"
+ else:
+ days = int(hours_ago / 24)
+ time_ago = f"{days}d ago"
+
+ call_in = {
+ "unit_id": emitter.id,
+ "last_seen": emitter.last_seen.isoformat(),
+ "time_ago": time_ago,
+ "status": emitter.status,
+ "device_type": roster_unit.device_type if roster_unit else "seismograph",
+ "deployed": roster_unit.deployed if roster_unit else False,
+ "note": roster_unit.note if roster_unit and roster_unit.note else "",
+ "location": roster_unit.address if roster_unit and roster_unit.address else (roster_unit.location if roster_unit else "")
+ }
+ call_ins.append(call_in)
+
+ # Apply limit if specified
+ if limit:
+ call_ins = call_ins[:limit]
+
+ return {
+ "call_ins": call_ins,
+ "total": len(call_ins),
+ "hours": hours,
+ "time_threshold": time_threshold.isoformat()
+ }
diff --git a/backend/routers/dashboard.py b/backend/routers/dashboard.py
new file mode 100644
index 0000000..525edec
--- /dev/null
+++ b/backend/routers/dashboard.py
@@ -0,0 +1,25 @@
+from fastapi import APIRouter, Request, Depends
+from fastapi.templating import Jinja2Templates
+
+from backend.services.snapshot import emit_status_snapshot
+
+router = APIRouter()
+templates = Jinja2Templates(directory="templates")
+
+
+@router.get("/dashboard/active")
+def dashboard_active(request: Request):
+ snapshot = emit_status_snapshot()
+ return templates.TemplateResponse(
+ "partials/active_table.html",
+ {"request": request, "units": snapshot["active"]}
+ )
+
+
+@router.get("/dashboard/benched")
+def dashboard_benched(request: Request):
+ snapshot = emit_status_snapshot()
+ return templates.TemplateResponse(
+ "partials/benched_table.html",
+ {"request": request, "units": snapshot["benched"]}
+ )
diff --git a/backend/routers/dashboard_tabs.py b/backend/routers/dashboard_tabs.py
new file mode 100644
index 0000000..607ead2
--- /dev/null
+++ b/backend/routers/dashboard_tabs.py
@@ -0,0 +1,34 @@
+# backend/routers/dashboard_tabs.py
+from fastapi import APIRouter, Depends
+from sqlalchemy.orm import Session
+
+from backend.database import get_db
+from backend.services.snapshot import emit_status_snapshot
+
+router = APIRouter(prefix="/dashboard", tags=["dashboard-tabs"])
+
+@router.get("/active")
+def get_active_units(db: Session = Depends(get_db)):
+ """
+ Return only ACTIVE (deployed) units for dashboard table swap.
+ """
+ snap = emit_status_snapshot()
+ units = {
+ uid: u
+ for uid, u in snap["units"].items()
+ if u["deployed"] is True
+ }
+ return {"units": units}
+
+@router.get("/benched")
+def get_benched_units(db: Session = Depends(get_db)):
+ """
+ Return only BENCHED (not deployed) units for dashboard table swap.
+ """
+ snap = emit_status_snapshot()
+ units = {
+ uid: u
+ for uid, u in snap["units"].items()
+ if u["deployed"] is False
+ }
+ return {"units": units}
diff --git a/backend/routers/photos.py b/backend/routers/photos.py
new file mode 100644
index 0000000..6039973
--- /dev/null
+++ b/backend/routers/photos.py
@@ -0,0 +1,242 @@
+from fastapi import APIRouter, HTTPException, UploadFile, File, Depends
+from fastapi.responses import FileResponse, JSONResponse
+from pathlib import Path
+from typing import List, Optional
+from datetime import datetime
+import os
+import shutil
+from PIL import Image
+from PIL.ExifTags import TAGS, GPSTAGS
+from sqlalchemy.orm import Session
+from backend.database import get_db
+from backend.models import RosterUnit
+
+router = APIRouter(prefix="/api", tags=["photos"])
+
+PHOTOS_BASE_DIR = Path("data/photos")
+
+
+def extract_exif_data(image_path: Path) -> dict:
+ """
+ Extract EXIF metadata from an image file.
+ Returns dict with timestamp, GPS coordinates, and other metadata.
+ """
+ try:
+ image = Image.open(image_path)
+ exif_data = image._getexif()
+
+ if not exif_data:
+ return {}
+
+ metadata = {}
+
+ # Extract standard EXIF tags
+ for tag_id, value in exif_data.items():
+ tag = TAGS.get(tag_id, tag_id)
+
+ # Extract datetime
+ if tag == "DateTime" or tag == "DateTimeOriginal":
+ try:
+ metadata["timestamp"] = datetime.strptime(str(value), "%Y:%m:%d %H:%M:%S")
+ except:
+ pass
+
+ # Extract GPS data
+ if tag == "GPSInfo":
+ gps_data = {}
+ for gps_tag_id in value:
+ gps_tag = GPSTAGS.get(gps_tag_id, gps_tag_id)
+ gps_data[gps_tag] = value[gps_tag_id]
+
+ # Convert GPS data to decimal degrees
+ lat = gps_data.get("GPSLatitude")
+ lat_ref = gps_data.get("GPSLatitudeRef")
+ lon = gps_data.get("GPSLongitude")
+ lon_ref = gps_data.get("GPSLongitudeRef")
+
+ if lat and lon and lat_ref and lon_ref:
+ # Convert to decimal degrees
+ lat_decimal = convert_to_degrees(lat)
+ if lat_ref == "S":
+ lat_decimal = -lat_decimal
+
+ lon_decimal = convert_to_degrees(lon)
+ if lon_ref == "W":
+ lon_decimal = -lon_decimal
+
+ metadata["latitude"] = lat_decimal
+ metadata["longitude"] = lon_decimal
+ metadata["coordinates"] = f"{lat_decimal},{lon_decimal}"
+
+ return metadata
+ except Exception as e:
+ print(f"Error extracting EXIF data: {e}")
+ return {}
+
+
+def convert_to_degrees(value):
+ """
+ Convert GPS coordinates from degrees/minutes/seconds to decimal degrees.
+ """
+ d, m, s = value
+ return float(d) + (float(m) / 60.0) + (float(s) / 3600.0)
+
+
+@router.post("/unit/{unit_id}/upload-photo")
+async def upload_photo(
+ unit_id: str,
+ photo: UploadFile = File(...),
+ auto_populate_coords: bool = True,
+ db: Session = Depends(get_db)
+):
+ """
+ Upload a photo for a unit and extract EXIF metadata.
+ If GPS data exists and auto_populate_coords is True, update the unit's coordinates.
+ """
+ # Validate file type
+ allowed_extensions = {".jpg", ".jpeg", ".png", ".gif", ".webp"}
+ file_ext = Path(photo.filename).suffix.lower()
+
+ if file_ext not in allowed_extensions:
+ raise HTTPException(
+ status_code=400,
+ detail=f"Invalid file type. Allowed: {', '.join(allowed_extensions)}"
+ )
+
+ # Create photos directory for this unit
+ unit_photo_dir = PHOTOS_BASE_DIR / unit_id
+ unit_photo_dir.mkdir(parents=True, exist_ok=True)
+
+ # Generate filename with timestamp to avoid collisions
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+ filename = f"{timestamp}_{photo.filename}"
+ file_path = unit_photo_dir / filename
+
+ # Save the file
+ try:
+ with open(file_path, "wb") as buffer:
+ shutil.copyfileobj(photo.file, buffer)
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=f"Failed to save photo: {str(e)}")
+
+ # Extract EXIF metadata
+ metadata = extract_exif_data(file_path)
+
+ # Update unit coordinates if GPS data exists and auto_populate_coords is True
+ coordinates_updated = False
+ if auto_populate_coords and "coordinates" in metadata:
+ roster_unit = db.query(RosterUnit).filter(RosterUnit.id == unit_id).first()
+
+ if roster_unit:
+ roster_unit.coordinates = metadata["coordinates"]
+ roster_unit.last_updated = datetime.utcnow()
+ db.commit()
+ coordinates_updated = True
+
+ return JSONResponse(content={
+ "success": True,
+ "filename": filename,
+ "file_path": f"/api/unit/{unit_id}/photo/{filename}",
+ "metadata": {
+ "timestamp": metadata.get("timestamp").isoformat() if metadata.get("timestamp") else None,
+ "latitude": metadata.get("latitude"),
+ "longitude": metadata.get("longitude"),
+ "coordinates": metadata.get("coordinates")
+ },
+ "coordinates_updated": coordinates_updated
+ })
+
+
+@router.get("/unit/{unit_id}/photos")
+def get_unit_photos(unit_id: str):
+ """
+ Reads /data/photos// and returns list of image filenames.
+ Primary photo = most recent file.
+ """
+ unit_photo_dir = PHOTOS_BASE_DIR / unit_id
+
+ if not unit_photo_dir.exists():
+ # Return empty list if no photos directory exists
+ return {
+ "unit_id": unit_id,
+ "photos": [],
+ "primary_photo": None
+ }
+
+ # Get all image files
+ image_extensions = {".jpg", ".jpeg", ".png", ".gif", ".webp"}
+ photos = []
+
+ for file_path in unit_photo_dir.iterdir():
+ if file_path.is_file() and file_path.suffix.lower() in image_extensions:
+ photos.append({
+ "filename": file_path.name,
+ "path": f"/api/unit/{unit_id}/photo/{file_path.name}",
+ "modified": file_path.stat().st_mtime
+ })
+
+ # Sort by modification time (most recent first)
+ photos.sort(key=lambda x: x["modified"], reverse=True)
+
+ # Primary photo is the most recent
+ primary_photo = photos[0]["filename"] if photos else None
+
+ return {
+ "unit_id": unit_id,
+ "photos": [p["filename"] for p in photos],
+ "primary_photo": primary_photo,
+ "photo_urls": [p["path"] for p in photos]
+ }
+
+
+@router.get("/recent-photos")
+def get_recent_photos(limit: int = 12):
+ """
+ Get the most recently uploaded photos across all units.
+ Returns photos sorted by modification time (newest first).
+ """
+ if not PHOTOS_BASE_DIR.exists():
+ return {"photos": []}
+
+ all_photos = []
+ image_extensions = {".jpg", ".jpeg", ".png", ".gif", ".webp"}
+
+ # Scan all unit directories
+ for unit_dir in PHOTOS_BASE_DIR.iterdir():
+ if not unit_dir.is_dir():
+ continue
+
+ unit_id = unit_dir.name
+
+ # Get all photos in this unit's directory
+ for file_path in unit_dir.iterdir():
+ if file_path.is_file() and file_path.suffix.lower() in image_extensions:
+ all_photos.append({
+ "unit_id": unit_id,
+ "filename": file_path.name,
+ "path": f"/api/unit/{unit_id}/photo/{file_path.name}",
+ "modified": file_path.stat().st_mtime,
+ "modified_iso": datetime.fromtimestamp(file_path.stat().st_mtime).isoformat()
+ })
+
+ # Sort by modification time (most recent first) and limit
+ all_photos.sort(key=lambda x: x["modified"], reverse=True)
+ recent_photos = all_photos[:limit]
+
+ return {
+ "photos": recent_photos,
+ "total": len(all_photos)
+ }
+
+
+@router.get("/unit/{unit_id}/photo/{filename}")
+def get_photo(unit_id: str, filename: str):
+ """
+ Serves a specific photo file.
+ """
+ file_path = PHOTOS_BASE_DIR / unit_id / filename
+
+ if not file_path.exists() or not file_path.is_file():
+ raise HTTPException(status_code=404, detail="Photo not found")
+
+ return FileResponse(file_path)
diff --git a/backend/routers/roster.py b/backend/routers/roster.py
new file mode 100644
index 0000000..d2792e1
--- /dev/null
+++ b/backend/routers/roster.py
@@ -0,0 +1,46 @@
+from fastapi import APIRouter, Depends
+from sqlalchemy.orm import Session
+from datetime import datetime, timedelta
+from typing import Dict, Any
+import random
+
+from backend.database import get_db
+from backend.services.snapshot import emit_status_snapshot
+
+router = APIRouter(prefix="/api", tags=["roster"])
+
+
+@router.get("/status-snapshot")
+def get_status_snapshot(db: Session = Depends(get_db)):
+ """
+ Calls emit_status_snapshot() to get current fleet status.
+ This will be replaced with real Series3 emitter logic later.
+ """
+ return emit_status_snapshot()
+
+
+@router.get("/roster")
+def get_roster(db: Session = Depends(get_db)):
+ """
+ Returns list of units with their metadata and status.
+ Uses mock data for now.
+ """
+ snapshot = emit_status_snapshot()
+ units_list = []
+
+ for unit_id, unit_data in snapshot["units"].items():
+ units_list.append({
+ "id": unit_id,
+ "status": unit_data["status"],
+ "age": unit_data["age"],
+ "last_seen": unit_data["last"],
+ "deployed": unit_data["deployed"],
+ "note": unit_data.get("note", ""),
+ "last_file": unit_data.get("fname", "")
+ })
+
+ # Sort by status priority (Missing > Pending > OK) then by ID
+ status_priority = {"Missing": 0, "Pending": 1, "OK": 2}
+ units_list.sort(key=lambda x: (status_priority.get(x["status"], 3), x["id"]))
+
+ return {"units": units_list}
diff --git a/backend/routers/roster_edit.py b/backend/routers/roster_edit.py
new file mode 100644
index 0000000..a495885
--- /dev/null
+++ b/backend/routers/roster_edit.py
@@ -0,0 +1,529 @@
+from fastapi import APIRouter, Depends, HTTPException, Form, UploadFile, File
+from sqlalchemy.orm import Session
+from datetime import datetime, date
+import csv
+import io
+
+from backend.database import get_db
+from backend.models import RosterUnit, IgnoredUnit, Emitter, UnitHistory
+
+router = APIRouter(prefix="/api/roster", tags=["roster-edit"])
+
+
+def record_history(db: Session, unit_id: str, change_type: str, field_name: str = None,
+ old_value: str = None, new_value: str = None, source: str = "manual", notes: str = None):
+ """Helper function to record a change in unit history"""
+ history_entry = UnitHistory(
+ unit_id=unit_id,
+ change_type=change_type,
+ field_name=field_name,
+ old_value=old_value,
+ new_value=new_value,
+ changed_at=datetime.utcnow(),
+ source=source,
+ notes=notes
+ )
+ db.add(history_entry)
+ # Note: caller is responsible for db.commit()
+
+
+def get_or_create_roster_unit(db: Session, unit_id: str):
+ unit = db.query(RosterUnit).filter(RosterUnit.id == unit_id).first()
+ if not unit:
+ unit = RosterUnit(id=unit_id)
+ db.add(unit)
+ db.commit()
+ db.refresh(unit)
+ return unit
+
+
+@router.post("/add")
+def add_roster_unit(
+ id: str = Form(...),
+ device_type: str = Form("seismograph"),
+ unit_type: str = Form("series3"),
+ deployed: bool = Form(False),
+ retired: bool = Form(False),
+ note: str = Form(""),
+ project_id: str = Form(None),
+ location: str = Form(None),
+ address: str = Form(None),
+ coordinates: str = Form(None),
+ # Seismograph-specific fields
+ last_calibrated: str = Form(None),
+ next_calibration_due: str = Form(None),
+ deployed_with_modem_id: str = Form(None),
+ # Modem-specific fields
+ ip_address: str = Form(None),
+ phone_number: str = Form(None),
+ hardware_model: str = Form(None),
+ db: Session = Depends(get_db)
+):
+ if db.query(RosterUnit).filter(RosterUnit.id == id).first():
+ raise HTTPException(status_code=400, detail="Unit already exists")
+
+ # Parse date fields if provided
+ last_cal_date = None
+ if last_calibrated:
+ try:
+ last_cal_date = datetime.strptime(last_calibrated, "%Y-%m-%d").date()
+ except ValueError:
+ raise HTTPException(status_code=400, detail="Invalid last_calibrated date format. Use YYYY-MM-DD")
+
+ next_cal_date = None
+ if next_calibration_due:
+ try:
+ next_cal_date = datetime.strptime(next_calibration_due, "%Y-%m-%d").date()
+ except ValueError:
+ raise HTTPException(status_code=400, detail="Invalid next_calibration_due date format. Use YYYY-MM-DD")
+
+ unit = RosterUnit(
+ id=id,
+ device_type=device_type,
+ unit_type=unit_type,
+ deployed=deployed,
+ retired=retired,
+ note=note,
+ project_id=project_id,
+ location=location,
+ address=address,
+ coordinates=coordinates,
+ last_updated=datetime.utcnow(),
+ # Seismograph-specific fields
+ last_calibrated=last_cal_date,
+ next_calibration_due=next_cal_date,
+ deployed_with_modem_id=deployed_with_modem_id if deployed_with_modem_id else None,
+ # Modem-specific fields
+ ip_address=ip_address if ip_address else None,
+ phone_number=phone_number if phone_number else None,
+ hardware_model=hardware_model if hardware_model else None,
+ )
+ db.add(unit)
+ db.commit()
+ return {"message": "Unit added", "id": id, "device_type": device_type}
+
+
+@router.get("/{unit_id}")
+def get_roster_unit(unit_id: str, db: Session = Depends(get_db)):
+ """Get a single roster unit by ID"""
+ unit = db.query(RosterUnit).filter(RosterUnit.id == unit_id).first()
+ if not unit:
+ raise HTTPException(status_code=404, detail="Unit not found")
+
+ return {
+ "id": unit.id,
+ "device_type": unit.device_type or "seismograph",
+ "unit_type": unit.unit_type,
+ "deployed": unit.deployed,
+ "retired": unit.retired,
+ "note": unit.note or "",
+ "project_id": unit.project_id or "",
+ "location": unit.location or "",
+ "address": unit.address or "",
+ "coordinates": unit.coordinates or "",
+ "last_calibrated": unit.last_calibrated.isoformat() if unit.last_calibrated else "",
+ "next_calibration_due": unit.next_calibration_due.isoformat() if unit.next_calibration_due else "",
+ "deployed_with_modem_id": unit.deployed_with_modem_id or "",
+ "ip_address": unit.ip_address or "",
+ "phone_number": unit.phone_number or "",
+ "hardware_model": unit.hardware_model or "",
+ }
+
+
+@router.post("/edit/{unit_id}")
+def edit_roster_unit(
+ unit_id: str,
+ device_type: str = Form("seismograph"),
+ unit_type: str = Form("series3"),
+ deployed: bool = Form(False),
+ retired: bool = Form(False),
+ note: str = Form(""),
+ project_id: str = Form(None),
+ location: str = Form(None),
+ address: str = Form(None),
+ coordinates: str = Form(None),
+ # Seismograph-specific fields
+ last_calibrated: str = Form(None),
+ next_calibration_due: str = Form(None),
+ deployed_with_modem_id: str = Form(None),
+ # Modem-specific fields
+ ip_address: str = Form(None),
+ phone_number: str = Form(None),
+ hardware_model: str = Form(None),
+ db: Session = Depends(get_db)
+):
+ unit = db.query(RosterUnit).filter(RosterUnit.id == unit_id).first()
+ if not unit:
+ raise HTTPException(status_code=404, detail="Unit not found")
+
+ # Parse date fields if provided
+ last_cal_date = None
+ if last_calibrated:
+ try:
+ last_cal_date = datetime.strptime(last_calibrated, "%Y-%m-%d").date()
+ except ValueError:
+ raise HTTPException(status_code=400, detail="Invalid last_calibrated date format. Use YYYY-MM-DD")
+
+ next_cal_date = None
+ if next_calibration_due:
+ try:
+ next_cal_date = datetime.strptime(next_calibration_due, "%Y-%m-%d").date()
+ except ValueError:
+ raise HTTPException(status_code=400, detail="Invalid next_calibration_due date format. Use YYYY-MM-DD")
+
+ # Track changes for history
+ old_note = unit.note
+ old_deployed = unit.deployed
+ old_retired = unit.retired
+
+ # Update all fields
+ unit.device_type = device_type
+ unit.unit_type = unit_type
+ unit.deployed = deployed
+ unit.retired = retired
+ unit.note = note
+ unit.project_id = project_id
+ unit.location = location
+ unit.address = address
+ unit.coordinates = coordinates
+ unit.last_updated = datetime.utcnow()
+
+ # Seismograph-specific fields
+ unit.last_calibrated = last_cal_date
+ unit.next_calibration_due = next_cal_date
+ unit.deployed_with_modem_id = deployed_with_modem_id if deployed_with_modem_id else None
+
+ # Modem-specific fields
+ unit.ip_address = ip_address if ip_address else None
+ unit.phone_number = phone_number if phone_number else None
+ unit.hardware_model = hardware_model if hardware_model else None
+
+ # Record history entries for changed fields
+ if old_note != note:
+ record_history(db, unit_id, "note_change", "note", old_note, note, "manual")
+
+ if old_deployed != deployed:
+ status_text = "deployed" if deployed else "benched"
+ old_status_text = "deployed" if old_deployed else "benched"
+ record_history(db, unit_id, "deployed_change", "deployed", old_status_text, status_text, "manual")
+
+ if old_retired != retired:
+ status_text = "retired" if retired else "active"
+ old_status_text = "retired" if old_retired else "active"
+ record_history(db, unit_id, "retired_change", "retired", old_status_text, status_text, "manual")
+
+ db.commit()
+ return {"message": "Unit updated", "id": unit_id, "device_type": device_type}
+
+
+@router.post("/set-deployed/{unit_id}")
+def set_deployed(unit_id: str, deployed: bool = Form(...), db: Session = Depends(get_db)):
+ unit = get_or_create_roster_unit(db, unit_id)
+ old_deployed = unit.deployed
+ unit.deployed = deployed
+ unit.last_updated = datetime.utcnow()
+
+ # Record history entry for deployed status change
+ if old_deployed != deployed:
+ status_text = "deployed" if deployed else "benched"
+ old_status_text = "deployed" if old_deployed else "benched"
+ record_history(
+ db=db,
+ unit_id=unit_id,
+ change_type="deployed_change",
+ field_name="deployed",
+ old_value=old_status_text,
+ new_value=status_text,
+ source="manual"
+ )
+
+ db.commit()
+ return {"message": "Updated", "id": unit_id, "deployed": deployed}
+
+
+@router.post("/set-retired/{unit_id}")
+def set_retired(unit_id: str, retired: bool = Form(...), db: Session = Depends(get_db)):
+ unit = get_or_create_roster_unit(db, unit_id)
+ old_retired = unit.retired
+ unit.retired = retired
+ unit.last_updated = datetime.utcnow()
+
+ # Record history entry for retired status change
+ if old_retired != retired:
+ status_text = "retired" if retired else "active"
+ old_status_text = "retired" if old_retired else "active"
+ record_history(
+ db=db,
+ unit_id=unit_id,
+ change_type="retired_change",
+ field_name="retired",
+ old_value=old_status_text,
+ new_value=status_text,
+ source="manual"
+ )
+
+ db.commit()
+ return {"message": "Updated", "id": unit_id, "retired": retired}
+
+
+@router.delete("/{unit_id}")
+def delete_roster_unit(unit_id: str, db: Session = Depends(get_db)):
+ """
+ Permanently delete a unit from the database.
+ Checks roster, emitters, and ignored_units tables and deletes from any table where the unit exists.
+ """
+ deleted = False
+
+ # Try to delete from roster table
+ roster_unit = db.query(RosterUnit).filter(RosterUnit.id == unit_id).first()
+ if roster_unit:
+ db.delete(roster_unit)
+ deleted = True
+
+ # Try to delete from emitters table
+ emitter = db.query(Emitter).filter(Emitter.id == unit_id).first()
+ if emitter:
+ db.delete(emitter)
+ deleted = True
+
+ # Try to delete from ignored_units table
+ ignored_unit = db.query(IgnoredUnit).filter(IgnoredUnit.id == unit_id).first()
+ if ignored_unit:
+ db.delete(ignored_unit)
+ deleted = True
+
+ # If not found in any table, return error
+ if not deleted:
+ raise HTTPException(status_code=404, detail="Unit not found")
+
+ db.commit()
+ return {"message": "Unit deleted", "id": unit_id}
+
+
+@router.post("/set-note/{unit_id}")
+def set_note(unit_id: str, note: str = Form(""), db: Session = Depends(get_db)):
+ unit = get_or_create_roster_unit(db, unit_id)
+ old_note = unit.note
+ unit.note = note
+ unit.last_updated = datetime.utcnow()
+
+ # Record history entry for note change
+ if old_note != note:
+ record_history(
+ db=db,
+ unit_id=unit_id,
+ change_type="note_change",
+ field_name="note",
+ old_value=old_note,
+ new_value=note,
+ source="manual"
+ )
+
+ db.commit()
+ return {"message": "Updated", "id": unit_id, "note": note}
+
+
+@router.post("/import-csv")
+async def import_csv(
+ file: UploadFile = File(...),
+ update_existing: bool = Form(True),
+ db: Session = Depends(get_db)
+):
+ """
+ Import roster units from CSV file.
+
+ Expected CSV columns (unit_id is required, others are optional):
+ - unit_id: Unique identifier for the unit
+ - unit_type: Type of unit (default: "series3")
+ - deployed: Boolean for deployment status (default: False)
+ - retired: Boolean for retirement status (default: False)
+ - note: Notes about the unit
+ - project_id: Project identifier
+ - location: Location description
+
+ Args:
+ file: CSV file upload
+ update_existing: If True, update existing units; if False, skip them
+ """
+
+ if not file.filename.endswith('.csv'):
+ raise HTTPException(status_code=400, detail="File must be a CSV")
+
+ # Read file content
+ contents = await file.read()
+ csv_text = contents.decode('utf-8')
+ csv_reader = csv.DictReader(io.StringIO(csv_text))
+
+ results = {
+ "added": [],
+ "updated": [],
+ "skipped": [],
+ "errors": []
+ }
+
+ for row_num, row in enumerate(csv_reader, start=2): # Start at 2 to account for header
+ try:
+ # Validate required field
+ unit_id = row.get('unit_id', '').strip()
+ if not unit_id:
+ results["errors"].append({
+ "row": row_num,
+ "error": "Missing required field: unit_id"
+ })
+ continue
+
+ # Check if unit exists
+ existing_unit = db.query(RosterUnit).filter(RosterUnit.id == unit_id).first()
+
+ if existing_unit:
+ if not update_existing:
+ results["skipped"].append(unit_id)
+ continue
+
+ # Update existing unit
+ existing_unit.unit_type = row.get('unit_type', existing_unit.unit_type or 'series3')
+ existing_unit.deployed = row.get('deployed', '').lower() in ('true', '1', 'yes') if row.get('deployed') else existing_unit.deployed
+ existing_unit.retired = row.get('retired', '').lower() in ('true', '1', 'yes') if row.get('retired') else existing_unit.retired
+ existing_unit.note = row.get('note', existing_unit.note or '')
+ existing_unit.project_id = row.get('project_id', existing_unit.project_id)
+ existing_unit.location = row.get('location', existing_unit.location)
+ existing_unit.address = row.get('address', existing_unit.address)
+ existing_unit.coordinates = row.get('coordinates', existing_unit.coordinates)
+ existing_unit.last_updated = datetime.utcnow()
+
+ results["updated"].append(unit_id)
+ else:
+ # Create new unit
+ new_unit = RosterUnit(
+ id=unit_id,
+ unit_type=row.get('unit_type', 'series3'),
+ deployed=row.get('deployed', '').lower() in ('true', '1', 'yes'),
+ retired=row.get('retired', '').lower() in ('true', '1', 'yes'),
+ note=row.get('note', ''),
+ project_id=row.get('project_id'),
+ location=row.get('location'),
+ address=row.get('address'),
+ coordinates=row.get('coordinates'),
+ last_updated=datetime.utcnow()
+ )
+ db.add(new_unit)
+ results["added"].append(unit_id)
+
+ except Exception as e:
+ results["errors"].append({
+ "row": row_num,
+ "unit_id": row.get('unit_id', 'unknown'),
+ "error": str(e)
+ })
+
+ # Commit all changes
+ try:
+ db.commit()
+ except Exception as e:
+ db.rollback()
+ raise HTTPException(status_code=500, detail=f"Database error: {str(e)}")
+
+ return {
+ "message": "CSV import completed",
+ "summary": {
+ "added": len(results["added"]),
+ "updated": len(results["updated"]),
+ "skipped": len(results["skipped"]),
+ "errors": len(results["errors"])
+ },
+ "details": results
+ }
+
+
+@router.post("/ignore/{unit_id}")
+def ignore_unit(unit_id: str, reason: str = Form(""), db: Session = Depends(get_db)):
+ """
+ Add a unit to the ignore list to suppress it from unknown emitters.
+ """
+ # Check if already ignored
+ if db.query(IgnoredUnit).filter(IgnoredUnit.id == unit_id).first():
+ raise HTTPException(status_code=400, detail="Unit already ignored")
+
+ ignored = IgnoredUnit(
+ id=unit_id,
+ reason=reason,
+ ignored_at=datetime.utcnow()
+ )
+ db.add(ignored)
+ db.commit()
+ return {"message": "Unit ignored", "id": unit_id}
+
+
+@router.delete("/ignore/{unit_id}")
+def unignore_unit(unit_id: str, db: Session = Depends(get_db)):
+ """
+ Remove a unit from the ignore list.
+ """
+ ignored = db.query(IgnoredUnit).filter(IgnoredUnit.id == unit_id).first()
+ if not ignored:
+ raise HTTPException(status_code=404, detail="Unit not in ignore list")
+
+ db.delete(ignored)
+ db.commit()
+ return {"message": "Unit unignored", "id": unit_id}
+
+
+@router.get("/ignored")
+def list_ignored_units(db: Session = Depends(get_db)):
+ """
+ Get list of all ignored units.
+ """
+ ignored_units = db.query(IgnoredUnit).all()
+ return {
+ "ignored": [
+ {
+ "id": unit.id,
+ "reason": unit.reason,
+ "ignored_at": unit.ignored_at.isoformat()
+ }
+ for unit in ignored_units
+ ]
+ }
+
+
+@router.get("/history/{unit_id}")
+def get_unit_history(unit_id: str, db: Session = Depends(get_db)):
+ """
+ Get complete history timeline for a unit.
+ Returns all historical changes ordered by most recent first.
+ """
+ history_entries = db.query(UnitHistory).filter(
+ UnitHistory.unit_id == unit_id
+ ).order_by(UnitHistory.changed_at.desc()).all()
+
+ return {
+ "unit_id": unit_id,
+ "history": [
+ {
+ "id": entry.id,
+ "change_type": entry.change_type,
+ "field_name": entry.field_name,
+ "old_value": entry.old_value,
+ "new_value": entry.new_value,
+ "changed_at": entry.changed_at.isoformat(),
+ "source": entry.source,
+ "notes": entry.notes
+ }
+ for entry in history_entries
+ ]
+ }
+
+
+@router.delete("/history/{history_id}")
+def delete_history_entry(history_id: int, db: Session = Depends(get_db)):
+ """
+ Delete a specific history entry by ID.
+ Allows manual cleanup of old history entries.
+ """
+ history_entry = db.query(UnitHistory).filter(UnitHistory.id == history_id).first()
+ if not history_entry:
+ raise HTTPException(status_code=404, detail="History entry not found")
+
+ db.delete(history_entry)
+ db.commit()
+ return {"message": "History entry deleted", "id": history_id}
diff --git a/backend/routers/settings.py b/backend/routers/settings.py
new file mode 100644
index 0000000..bb14357
--- /dev/null
+++ b/backend/routers/settings.py
@@ -0,0 +1,479 @@
+from fastapi import APIRouter, Depends, HTTPException, UploadFile, File
+from fastapi.responses import StreamingResponse, FileResponse
+from sqlalchemy.orm import Session
+from datetime import datetime, date
+from pydantic import BaseModel
+from typing import Optional
+import csv
+import io
+import shutil
+from pathlib import Path
+
+from backend.database import get_db
+from backend.models import RosterUnit, Emitter, IgnoredUnit, UserPreferences
+from backend.services.database_backup import DatabaseBackupService
+
+router = APIRouter(prefix="/api/settings", tags=["settings"])
+
+
+@router.get("/export-csv")
+def export_roster_csv(db: Session = Depends(get_db)):
+ """Export all roster units to CSV"""
+ units = db.query(RosterUnit).all()
+
+ # Create CSV in memory
+ output = io.StringIO()
+ fieldnames = [
+ 'unit_id', 'unit_type', 'device_type', 'deployed', 'retired',
+ 'note', 'project_id', 'location', 'address', 'coordinates',
+ 'last_calibrated', 'next_calibration_due', 'deployed_with_modem_id',
+ 'ip_address', 'phone_number', 'hardware_model'
+ ]
+
+ writer = csv.DictWriter(output, fieldnames=fieldnames)
+ writer.writeheader()
+
+ for unit in units:
+ writer.writerow({
+ 'unit_id': unit.id,
+ 'unit_type': unit.unit_type or '',
+ 'device_type': unit.device_type or 'seismograph',
+ 'deployed': 'true' if unit.deployed else 'false',
+ 'retired': 'true' if unit.retired else 'false',
+ 'note': unit.note or '',
+ 'project_id': unit.project_id or '',
+ 'location': unit.location or '',
+ 'address': unit.address or '',
+ 'coordinates': unit.coordinates or '',
+ 'last_calibrated': unit.last_calibrated.strftime('%Y-%m-%d') if unit.last_calibrated else '',
+ 'next_calibration_due': unit.next_calibration_due.strftime('%Y-%m-%d') if unit.next_calibration_due else '',
+ 'deployed_with_modem_id': unit.deployed_with_modem_id or '',
+ 'ip_address': unit.ip_address or '',
+ 'phone_number': unit.phone_number or '',
+ 'hardware_model': unit.hardware_model or ''
+ })
+
+ output.seek(0)
+ filename = f"roster_export_{date.today().isoformat()}.csv"
+
+ return StreamingResponse(
+ io.BytesIO(output.getvalue().encode('utf-8')),
+ media_type="text/csv",
+ headers={"Content-Disposition": f"attachment; filename={filename}"}
+ )
+
+
+@router.get("/stats")
+def get_table_stats(db: Session = Depends(get_db)):
+ """Get counts for all tables"""
+ roster_count = db.query(RosterUnit).count()
+ emitters_count = db.query(Emitter).count()
+ ignored_count = db.query(IgnoredUnit).count()
+
+ return {
+ "roster": roster_count,
+ "emitters": emitters_count,
+ "ignored": ignored_count,
+ "total": roster_count + emitters_count + ignored_count
+ }
+
+
+@router.get("/roster-units")
+def get_all_roster_units(db: Session = Depends(get_db)):
+ """Get all roster units for management table"""
+ units = db.query(RosterUnit).order_by(RosterUnit.id).all()
+
+ return [{
+ "id": unit.id,
+ "device_type": unit.device_type or "seismograph",
+ "unit_type": unit.unit_type or "series3",
+ "deployed": unit.deployed,
+ "retired": unit.retired,
+ "note": unit.note or "",
+ "project_id": unit.project_id or "",
+ "location": unit.location or "",
+ "address": unit.address or "",
+ "coordinates": unit.coordinates or "",
+ "last_calibrated": unit.last_calibrated.isoformat() if unit.last_calibrated else None,
+ "next_calibration_due": unit.next_calibration_due.isoformat() if unit.next_calibration_due else None,
+ "deployed_with_modem_id": unit.deployed_with_modem_id or "",
+ "ip_address": unit.ip_address or "",
+ "phone_number": unit.phone_number or "",
+ "hardware_model": unit.hardware_model or "",
+ "slm_host": unit.slm_host or "",
+ "slm_tcp_port": unit.slm_tcp_port,
+ "slm_model": unit.slm_model or "",
+ "slm_serial_number": unit.slm_serial_number or "",
+ "slm_frequency_weighting": unit.slm_frequency_weighting or "",
+ "slm_time_weighting": unit.slm_time_weighting or "",
+ "slm_measurement_range": unit.slm_measurement_range or "",
+ "slm_last_check": unit.slm_last_check.isoformat() if unit.slm_last_check else None,
+ "last_updated": unit.last_updated.isoformat() if unit.last_updated else None
+ } for unit in units]
+
+
+def parse_date(date_str):
+ """Helper function to parse date strings"""
+ if not date_str or not date_str.strip():
+ return None
+ try:
+ return datetime.strptime(date_str.strip(), "%Y-%m-%d").date()
+ except ValueError:
+ return None
+
+
+@router.post("/import-csv-replace")
+async def import_csv_replace(
+ file: UploadFile = File(...),
+ db: Session = Depends(get_db)
+):
+ """
+ Replace all roster data with CSV import (atomic transaction).
+ Clears roster table first, then imports all rows from CSV.
+ """
+
+ if not file.filename.endswith('.csv'):
+ raise HTTPException(status_code=400, detail="File must be a CSV")
+
+ # Read and parse CSV
+ contents = await file.read()
+ csv_text = contents.decode('utf-8')
+ csv_reader = csv.DictReader(io.StringIO(csv_text))
+
+ # Parse all rows FIRST (fail fast before deletion)
+ parsed_units = []
+ for row_num, row in enumerate(csv_reader, start=2):
+ unit_id = row.get('unit_id', '').strip()
+ if not unit_id:
+ raise HTTPException(
+ status_code=400,
+ detail=f"Row {row_num}: Missing required field unit_id"
+ )
+
+ # Parse and validate dates
+ last_cal_date = parse_date(row.get('last_calibrated'))
+ next_cal_date = parse_date(row.get('next_calibration_due'))
+
+ parsed_units.append({
+ 'id': unit_id,
+ 'unit_type': row.get('unit_type', 'series3'),
+ 'device_type': row.get('device_type', 'seismograph'),
+ 'deployed': row.get('deployed', '').lower() in ('true', '1', 'yes'),
+ 'retired': row.get('retired', '').lower() in ('true', '1', 'yes'),
+ 'note': row.get('note', ''),
+ 'project_id': row.get('project_id') or None,
+ 'location': row.get('location') or None,
+ 'address': row.get('address') or None,
+ 'coordinates': row.get('coordinates') or None,
+ 'last_calibrated': last_cal_date,
+ 'next_calibration_due': next_cal_date,
+ 'deployed_with_modem_id': row.get('deployed_with_modem_id') or None,
+ 'ip_address': row.get('ip_address') or None,
+ 'phone_number': row.get('phone_number') or None,
+ 'hardware_model': row.get('hardware_model') or None,
+ })
+
+ # Atomic transaction: delete all, then insert all
+ try:
+ deleted_count = db.query(RosterUnit).delete()
+
+ for unit_data in parsed_units:
+ new_unit = RosterUnit(**unit_data, last_updated=datetime.utcnow())
+ db.add(new_unit)
+
+ db.commit()
+
+ return {
+ "message": "Roster replaced successfully",
+ "deleted": deleted_count,
+ "added": len(parsed_units)
+ }
+
+ except Exception as e:
+ db.rollback()
+ raise HTTPException(status_code=500, detail=f"Import failed: {str(e)}")
+
+
+@router.post("/clear-all")
+def clear_all_data(db: Session = Depends(get_db)):
+ """Clear all tables (roster, emitters, ignored)"""
+ try:
+ roster_count = db.query(RosterUnit).delete()
+ emitters_count = db.query(Emitter).delete()
+ ignored_count = db.query(IgnoredUnit).delete()
+
+ db.commit()
+
+ return {
+ "message": "All data cleared",
+ "deleted": {
+ "roster": roster_count,
+ "emitters": emitters_count,
+ "ignored": ignored_count,
+ "total": roster_count + emitters_count + ignored_count
+ }
+ }
+ except Exception as e:
+ db.rollback()
+ raise HTTPException(status_code=500, detail=f"Clear failed: {str(e)}")
+
+
+@router.post("/clear-roster")
+def clear_roster(db: Session = Depends(get_db)):
+ """Clear roster table only"""
+ try:
+ count = db.query(RosterUnit).delete()
+ db.commit()
+ return {"message": "Roster cleared", "deleted": count}
+ except Exception as e:
+ db.rollback()
+ raise HTTPException(status_code=500, detail=f"Clear failed: {str(e)}")
+
+
+@router.post("/clear-emitters")
+def clear_emitters(db: Session = Depends(get_db)):
+ """Clear emitters table only"""
+ try:
+ count = db.query(Emitter).delete()
+ db.commit()
+ return {"message": "Emitters cleared", "deleted": count}
+ except Exception as e:
+ db.rollback()
+ raise HTTPException(status_code=500, detail=f"Clear failed: {str(e)}")
+
+
+@router.post("/clear-ignored")
+def clear_ignored(db: Session = Depends(get_db)):
+ """Clear ignored units table only"""
+ try:
+ count = db.query(IgnoredUnit).delete()
+ db.commit()
+ return {"message": "Ignored units cleared", "deleted": count}
+ except Exception as e:
+ db.rollback()
+ raise HTTPException(status_code=500, detail=f"Clear failed: {str(e)}")
+
+
+# User Preferences Endpoints
+
+class PreferencesUpdate(BaseModel):
+ """Schema for updating user preferences (all fields optional)"""
+ timezone: Optional[str] = None
+ theme: Optional[str] = None
+ auto_refresh_interval: Optional[int] = None
+ date_format: Optional[str] = None
+ table_rows_per_page: Optional[int] = None
+ calibration_interval_days: Optional[int] = None
+ calibration_warning_days: Optional[int] = None
+ status_ok_threshold_hours: Optional[int] = None
+ status_pending_threshold_hours: Optional[int] = None
+
+
+@router.get("/preferences")
+def get_preferences(db: Session = Depends(get_db)):
+ """
+ Get user preferences. Creates default preferences if none exist.
+ """
+ prefs = db.query(UserPreferences).filter(UserPreferences.id == 1).first()
+
+ if not prefs:
+ # Create default preferences
+ prefs = UserPreferences(id=1)
+ db.add(prefs)
+ db.commit()
+ db.refresh(prefs)
+
+ return {
+ "timezone": prefs.timezone,
+ "theme": prefs.theme,
+ "auto_refresh_interval": prefs.auto_refresh_interval,
+ "date_format": prefs.date_format,
+ "table_rows_per_page": prefs.table_rows_per_page,
+ "calibration_interval_days": prefs.calibration_interval_days,
+ "calibration_warning_days": prefs.calibration_warning_days,
+ "status_ok_threshold_hours": prefs.status_ok_threshold_hours,
+ "status_pending_threshold_hours": prefs.status_pending_threshold_hours,
+ "updated_at": prefs.updated_at.isoformat() if prefs.updated_at else None
+ }
+
+
+@router.put("/preferences")
+def update_preferences(
+ updates: PreferencesUpdate,
+ db: Session = Depends(get_db)
+):
+ """
+ Update user preferences. Accepts partial updates.
+ Creates default preferences if none exist.
+ """
+ prefs = db.query(UserPreferences).filter(UserPreferences.id == 1).first()
+
+ if not prefs:
+ # Create default preferences
+ prefs = UserPreferences(id=1)
+ db.add(prefs)
+
+ # Update only provided fields
+ update_data = updates.dict(exclude_unset=True)
+ for field, value in update_data.items():
+ setattr(prefs, field, value)
+
+ prefs.updated_at = datetime.utcnow()
+
+ db.commit()
+ db.refresh(prefs)
+
+ return {
+ "message": "Preferences updated successfully",
+ "timezone": prefs.timezone,
+ "theme": prefs.theme,
+ "auto_refresh_interval": prefs.auto_refresh_interval,
+ "date_format": prefs.date_format,
+ "table_rows_per_page": prefs.table_rows_per_page,
+ "calibration_interval_days": prefs.calibration_interval_days,
+ "calibration_warning_days": prefs.calibration_warning_days,
+ "status_ok_threshold_hours": prefs.status_ok_threshold_hours,
+ "status_pending_threshold_hours": prefs.status_pending_threshold_hours,
+ "updated_at": prefs.updated_at.isoformat() if prefs.updated_at else None
+ }
+
+
+# Database Management Endpoints
+
+backup_service = DatabaseBackupService()
+
+
+@router.get("/database/stats")
+def get_database_stats():
+ """Get current database statistics"""
+ try:
+ stats = backup_service.get_database_stats()
+ return stats
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=f"Failed to get database stats: {str(e)}")
+
+
+@router.post("/database/snapshot")
+def create_database_snapshot(description: Optional[str] = None):
+ """Create a full database snapshot"""
+ try:
+ snapshot = backup_service.create_snapshot(description=description)
+ return {
+ "message": "Snapshot created successfully",
+ "snapshot": snapshot
+ }
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=f"Snapshot creation failed: {str(e)}")
+
+
+@router.get("/database/snapshots")
+def list_database_snapshots():
+ """List all available database snapshots"""
+ try:
+ snapshots = backup_service.list_snapshots()
+ return {
+ "snapshots": snapshots,
+ "count": len(snapshots)
+ }
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=f"Failed to list snapshots: {str(e)}")
+
+
+@router.get("/database/snapshot/{filename}")
+def download_snapshot(filename: str):
+ """Download a specific snapshot file"""
+ try:
+ snapshot_path = backup_service.download_snapshot(filename)
+ return FileResponse(
+ path=str(snapshot_path),
+ filename=filename,
+ media_type="application/x-sqlite3"
+ )
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail=f"Snapshot {filename} not found")
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=f"Download failed: {str(e)}")
+
+
+@router.delete("/database/snapshot/{filename}")
+def delete_database_snapshot(filename: str):
+ """Delete a specific snapshot"""
+ try:
+ backup_service.delete_snapshot(filename)
+ return {
+ "message": f"Snapshot {filename} deleted successfully",
+ "filename": filename
+ }
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail=f"Snapshot {filename} not found")
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=f"Delete failed: {str(e)}")
+
+
+class RestoreRequest(BaseModel):
+ """Schema for restore request"""
+ filename: str
+ create_backup: bool = True
+
+
+@router.post("/database/restore")
+def restore_database(request: RestoreRequest, db: Session = Depends(get_db)):
+ """Restore database from a snapshot"""
+ try:
+ # Close the database connection before restoring
+ db.close()
+
+ result = backup_service.restore_snapshot(
+ filename=request.filename,
+ create_backup_before_restore=request.create_backup
+ )
+
+ return result
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail=f"Snapshot {request.filename} not found")
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=f"Restore failed: {str(e)}")
+
+
+@router.post("/database/upload-snapshot")
+async def upload_snapshot(file: UploadFile = File(...)):
+ """Upload a snapshot file to the backups directory"""
+ if not file.filename.endswith('.db'):
+ raise HTTPException(status_code=400, detail="File must be a .db file")
+
+ try:
+ # Save uploaded file to backups directory
+ backups_dir = Path("./data/backups")
+ backups_dir.mkdir(parents=True, exist_ok=True)
+
+ timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S")
+ uploaded_filename = f"snapshot_uploaded_{timestamp}.db"
+ file_path = backups_dir / uploaded_filename
+
+ # Save file
+ with open(file_path, "wb") as buffer:
+ shutil.copyfileobj(file.file, buffer)
+
+ # Create metadata
+ metadata = {
+ "filename": uploaded_filename,
+ "created_at": timestamp,
+ "created_at_iso": datetime.utcnow().isoformat(),
+ "description": f"Uploaded: {file.filename}",
+ "size_bytes": file_path.stat().st_size,
+ "size_mb": round(file_path.stat().st_size / (1024 * 1024), 2),
+ "type": "uploaded"
+ }
+
+ metadata_path = backups_dir / f"{uploaded_filename}.meta.json"
+ import json
+ with open(metadata_path, 'w') as f:
+ json.dump(metadata, f, indent=2)
+
+ return {
+ "message": "Snapshot uploaded successfully",
+ "snapshot": metadata
+ }
+
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=f"Upload failed: {str(e)}")
diff --git a/backend/routers/slm_ui.py b/backend/routers/slm_ui.py
new file mode 100644
index 0000000..d0945f6
--- /dev/null
+++ b/backend/routers/slm_ui.py
@@ -0,0 +1,123 @@
+"""
+Sound Level Meter UI Router
+
+Provides endpoints for SLM dashboard cards, detail pages, and real-time data.
+"""
+
+from fastapi import APIRouter, Depends, HTTPException, Request
+from fastapi.responses import HTMLResponse
+from fastapi.templating import Jinja2Templates
+from sqlalchemy.orm import Session
+from datetime import datetime
+import httpx
+import logging
+import os
+
+from backend.database import get_db
+from backend.models import RosterUnit
+
+logger = logging.getLogger(__name__)
+
+router = APIRouter(prefix="/slm", tags=["slm-ui"])
+templates = Jinja2Templates(directory="templates")
+
+SLMM_BASE_URL = os.getenv("SLMM_BASE_URL", "http://172.19.0.1:8100")
+
+
+@router.get("/{unit_id}", response_class=HTMLResponse)
+async def slm_detail_page(request: Request, unit_id: str, db: Session = Depends(get_db)):
+ """Sound level meter detail page with controls."""
+
+ # Get roster unit
+ unit = db.query(RosterUnit).filter_by(id=unit_id).first()
+ if not unit or unit.device_type != "sound_level_meter":
+ raise HTTPException(status_code=404, detail="Sound level meter not found")
+
+ return templates.TemplateResponse("slm_detail.html", {
+ "request": request,
+ "unit": unit,
+ "unit_id": unit_id
+ })
+
+
+@router.get("/api/{unit_id}/summary")
+async def get_slm_summary(unit_id: str, db: Session = Depends(get_db)):
+ """Get SLM summary data for dashboard card."""
+
+ # Get roster unit
+ unit = db.query(RosterUnit).filter_by(id=unit_id).first()
+ if not unit or unit.device_type != "sound_level_meter":
+ raise HTTPException(status_code=404, detail="Sound level meter not found")
+
+ # Try to get live status from SLMM
+ status_data = None
+ try:
+ async with httpx.AsyncClient(timeout=3.0) as client:
+ response = await client.get(f"{SLMM_BASE_URL}/api/nl43/{unit_id}/status")
+ if response.status_code == 200:
+ status_data = response.json().get("data")
+ except Exception as e:
+ logger.warning(f"Failed to get SLM status for {unit_id}: {e}")
+
+ return {
+ "unit_id": unit_id,
+ "device_type": "sound_level_meter",
+ "deployed": unit.deployed,
+ "model": unit.slm_model or "NL-43",
+ "location": unit.address or unit.location,
+ "coordinates": unit.coordinates,
+ "note": unit.note,
+ "status": status_data,
+ "last_check": unit.slm_last_check.isoformat() if unit.slm_last_check else None,
+ }
+
+
+@router.get("/partials/{unit_id}/card", response_class=HTMLResponse)
+async def slm_dashboard_card(request: Request, unit_id: str, db: Session = Depends(get_db)):
+ """Render SLM dashboard card partial."""
+
+ summary = await get_slm_summary(unit_id, db)
+
+ return templates.TemplateResponse("partials/slm_card.html", {
+ "request": request,
+ "slm": summary
+ })
+
+
+@router.get("/partials/{unit_id}/controls", response_class=HTMLResponse)
+async def slm_controls_partial(request: Request, unit_id: str, db: Session = Depends(get_db)):
+ """Render SLM control panel partial."""
+
+ unit = db.query(RosterUnit).filter_by(id=unit_id).first()
+ if not unit or unit.device_type != "sound_level_meter":
+ raise HTTPException(status_code=404, detail="Sound level meter not found")
+
+ # Get current status from SLMM
+ measurement_state = None
+ battery_level = None
+ try:
+ async with httpx.AsyncClient(timeout=3.0) as client:
+ # Get measurement state
+ state_response = await client.get(
+ f"{SLMM_BASE_URL}/api/nl43/{unit_id}/measurement-state"
+ )
+ if state_response.status_code == 200:
+ measurement_state = state_response.json().get("measurement_state")
+
+ # Get battery level
+ battery_response = await client.get(
+ f"{SLMM_BASE_URL}/api/nl43/{unit_id}/battery"
+ )
+ if battery_response.status_code == 200:
+ battery_level = battery_response.json().get("battery_level")
+ except Exception as e:
+ logger.warning(f"Failed to get SLM control data for {unit_id}: {e}")
+
+ return templates.TemplateResponse("partials/slm_controls.html", {
+ "request": request,
+ "unit_id": unit_id,
+ "unit": unit,
+ "measurement_state": measurement_state,
+ "battery_level": battery_level,
+ "is_measuring": measurement_state == "Start"
+ })
diff --git a/backend/routers/slmm.py b/backend/routers/slmm.py
new file mode 100644
index 0000000..b075637
--- /dev/null
+++ b/backend/routers/slmm.py
@@ -0,0 +1,130 @@
+"""
+SLMM (Sound Level Meter Manager) Proxy Router
+
+Proxies requests from SFM to the standalone SLMM backend service.
+SLMM runs on port 8100 and handles NL43/NL53 sound level meter communication.
+"""
+
+from fastapi import APIRouter, HTTPException, Request, Response
+from fastapi.responses import StreamingResponse
+import httpx
+import logging
+import os
+
+logger = logging.getLogger(__name__)
+
+router = APIRouter(prefix="/api/slmm", tags=["slmm"])
+
+# SLMM backend URL - configurable via environment variable
+SLMM_BASE_URL = os.getenv("SLMM_BASE_URL", "http://localhost:8100")
+
+
+@router.get("/health")
+async def check_slmm_health():
+ """
+ Check if the SLMM backend service is reachable and healthy.
+ """
+ try:
+ async with httpx.AsyncClient(timeout=5.0) as client:
+ response = await client.get(f"{SLMM_BASE_URL}/health")
+
+ if response.status_code == 200:
+ data = response.json()
+ return {
+ "status": "ok",
+ "slmm_status": "connected",
+ "slmm_url": SLMM_BASE_URL,
+ "slmm_version": data.get("version", "unknown"),
+ "slmm_response": data
+ }
+ else:
+ return {
+ "status": "degraded",
+ "slmm_status": "error",
+ "slmm_url": SLMM_BASE_URL,
+ "detail": f"SLMM returned status {response.status_code}"
+ }
+
+ except httpx.ConnectError:
+ return {
+ "status": "error",
+ "slmm_status": "unreachable",
+ "slmm_url": SLMM_BASE_URL,
+ "detail": "Cannot connect to SLMM backend. Is it running?"
+ }
+ except Exception as e:
+ return {
+ "status": "error",
+ "slmm_status": "error",
+ "slmm_url": SLMM_BASE_URL,
+ "detail": str(e)
+ }
+
+
+@router.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE", "PATCH"])
+async def proxy_to_slmm(path: str, request: Request):
+ """
+ Proxy all requests to the SLMM backend service.
+
+ This allows SFM to act as a unified frontend for all device types,
+ while SLMM remains a standalone backend service.
+ """
+ # Build target URL
+ target_url = f"{SLMM_BASE_URL}/api/nl43/{path}"
+
+ # Get query parameters
+ query_params = dict(request.query_params)
+
+ # Get request body if present
+ body = None
+ if request.method in ["POST", "PUT", "PATCH"]:
+ try:
+ body = await request.body()
+ except Exception as e:
+ logger.error(f"Failed to read request body: {e}")
+ body = None
+
+ # Get headers (exclude host and other proxy-specific headers)
+ headers = dict(request.headers)
+ headers_to_exclude = ["host", "content-length", "transfer-encoding", "connection"]
+ proxy_headers = {k: v for k, v in headers.items() if k.lower() not in headers_to_exclude}
+
+ logger.info(f"Proxying {request.method} request to SLMM: {target_url}")
+
+ try:
+ async with httpx.AsyncClient(timeout=30.0) as client:
+ # Forward the request to SLMM
+ response = await client.request(
+ method=request.method,
+ url=target_url,
+ params=query_params,
+ headers=proxy_headers,
+ content=body
+ )
+
+ # Return the response from SLMM
+ return Response(
+ content=response.content,
+ status_code=response.status_code,
+ headers=dict(response.headers),
+ media_type=response.headers.get("content-type")
+ )
+
+ except httpx.ConnectError:
+ logger.error(f"Failed to connect to SLMM backend at {SLMM_BASE_URL}")
+ raise HTTPException(
+ status_code=503,
+ detail=f"SLMM backend service unavailable. Is SLMM running on {SLMM_BASE_URL}?"
+ )
+ except httpx.TimeoutException:
+ logger.error(f"Timeout connecting to SLMM backend at {SLMM_BASE_URL}")
+ raise HTTPException(
+ status_code=504,
+ detail="SLMM backend timeout"
+ )
+ except Exception as e:
+ logger.error(f"Error proxying to SLMM: {e}")
+ raise HTTPException(
+ status_code=500,
+ detail=f"Failed to proxy request to SLMM: {str(e)}"
+ )
diff --git a/backend/routers/units.py b/backend/routers/units.py
new file mode 100644
index 0000000..654fa2c
--- /dev/null
+++ b/backend/routers/units.py
@@ -0,0 +1,44 @@
+from fastapi import APIRouter, Depends, HTTPException
+from sqlalchemy.orm import Session
+from datetime import datetime
+from typing import Dict, Any
+
+from backend.database import get_db
+from backend.services.snapshot import emit_status_snapshot
+
+router = APIRouter(prefix="/api", tags=["units"])
+
+
+@router.get("/unit/{unit_id}")
+def get_unit_detail(unit_id: str, db: Session = Depends(get_db)):
+ """
+ Returns detailed data for a single unit.
+ """
+ snapshot = emit_status_snapshot()
+
+ if unit_id not in snapshot["units"]:
+ raise HTTPException(status_code=404, detail=f"Unit {unit_id} not found")
+
+ unit_data = snapshot["units"][unit_id]
+
+ # Mock coordinates for now (will be replaced with real data)
+ mock_coords = {
+ "BE1234": {"lat": 37.7749, "lon": -122.4194, "location": "San Francisco, CA"},
+ "BE5678": {"lat": 34.0522, "lon": -118.2437, "location": "Los Angeles, CA"},
+ "BE9012": {"lat": 40.7128, "lon": -74.0060, "location": "New York, NY"},
+ "BE3456": {"lat": 41.8781, "lon": -87.6298, "location": "Chicago, IL"},
+ "BE7890": {"lat": 29.7604, "lon": -95.3698, "location": "Houston, TX"},
+ }
+
+ coords = mock_coords.get(unit_id, {"lat": 39.8283, "lon": -98.5795, "location": "Unknown"})
+
+ return {
+ "id": unit_id,
+ "status": unit_data["status"],
+ "age": unit_data["age"],
+ "last_seen": unit_data["last"],
+ "last_file": unit_data.get("fname", ""),
+ "deployed": unit_data["deployed"],
+ "note": unit_data.get("note", ""),
+ "coordinates": coords
+ }
diff --git a/backend/routes.py b/backend/routes.py
new file mode 100644
index 0000000..2c6cd8f
--- /dev/null
+++ b/backend/routes.py
@@ -0,0 +1,286 @@
+from fastapi import APIRouter, Depends, HTTPException
+from sqlalchemy.orm import Session
+from pydantic import BaseModel
+from datetime import datetime
+from typing import Optional, List
+
+from backend.database import get_db
+from backend.models import Emitter
+
+router = APIRouter()
+
+
+# Helper function to detect unit type from unit ID
+def detect_unit_type(unit_id: str) -> str:
+ """
+ Automatically detect if a unit is Series 3 or Series 4 based on ID pattern.
+
+ Series 4 (Micromate) units have IDs starting with "UM" followed by digits (e.g., UM11719)
+ Series 3 units typically have other patterns
+
+ Returns:
+ "series4" if the unit ID matches Micromate pattern (UM#####)
+ "series3" otherwise
+ """
+ if not unit_id:
+ return "unknown"
+
+ # Series 4 (Micromate) pattern: UM followed by digits
+ if unit_id.upper().startswith("UM") and len(unit_id) > 2:
+ # Check if remaining characters after "UM" are digits
+ rest = unit_id[2:]
+ if rest.isdigit():
+ return "series4"
+
+ # Default to series3 for other patterns
+ return "series3"
+
+
+# Pydantic schemas for request/response validation
+class EmitterReport(BaseModel):
+ unit: str
+ unit_type: str
+ timestamp: str
+ file: str
+ status: str
+
+
+class EmitterResponse(BaseModel):
+ id: str
+ unit_type: str
+ last_seen: datetime
+ last_file: str
+ status: str
+ notes: Optional[str] = None
+
+ class Config:
+ from_attributes = True
+
+
+@router.post("/emitters/report", status_code=200)
+def report_emitter(report: EmitterReport, db: Session = Depends(get_db)):
+ """
+ Endpoint for emitters to report their status.
+ Creates a new emitter if it doesn't exist, or updates an existing one.
+ """
+ try:
+ # Parse the timestamp
+ timestamp = datetime.fromisoformat(report.timestamp.replace('Z', '+00:00'))
+ except ValueError:
+ raise HTTPException(status_code=400, detail="Invalid timestamp format")
+
+ # Check if emitter already exists
+ emitter = db.query(Emitter).filter(Emitter.id == report.unit).first()
+
+ if emitter:
+ # Update existing emitter
+ emitter.unit_type = report.unit_type
+ emitter.last_seen = timestamp
+ emitter.last_file = report.file
+ emitter.status = report.status
+ else:
+ # Create new emitter
+ emitter = Emitter(
+ id=report.unit,
+ unit_type=report.unit_type,
+ last_seen=timestamp,
+ last_file=report.file,
+ status=report.status
+ )
+ db.add(emitter)
+
+ db.commit()
+ db.refresh(emitter)
+
+ return {
+ "message": "Emitter report received",
+ "unit": emitter.id,
+ "status": emitter.status
+ }
+
+
+@router.get("/fleet/status", response_model=List[EmitterResponse])
+def get_fleet_status(db: Session = Depends(get_db)):
+ """
+ Returns a list of all emitters and their current status.
+ """
+ emitters = db.query(Emitter).all()
+ return emitters
+
+# series3v1.1 Standardized Heartbeat Schema (multi-unit)
+from fastapi import Request
+
+@router.post("/api/series3/heartbeat", status_code=200)
+async def series3_heartbeat(request: Request, db: Session = Depends(get_db)):
+ """
+ Accepts a full telemetry payload from the Series3 emitter.
+ Updates or inserts each unit into the database.
+ """
+ payload = await request.json()
+
+ source = payload.get("source_id")
+ units = payload.get("units", [])
+
+ print("\n=== Series 3 Heartbeat ===")
+ print("Source:", source)
+ print("Units received:", len(units))
+ print("==========================\n")
+
+ results = []
+
+ for u in units:
+ uid = u.get("unit_id")
+ last_event_time = u.get("last_event_time")
+ event_meta = u.get("event_metadata", {})
+ age_minutes = u.get("age_minutes")
+
+ try:
+ if last_event_time:
+ ts = datetime.fromisoformat(last_event_time.replace("Z", "+00:00"))
+ else:
+ ts = None
+ except:
+ ts = None
+
+ # Pull from DB
+ emitter = db.query(Emitter).filter(Emitter.id == uid).first()
+
+ # File name (from event_metadata)
+ last_file = event_meta.get("file_name")
+ status = "Unknown"
+
+ # Determine status based on age
+ if age_minutes is None:
+ status = "Missing"
+ elif age_minutes > 24 * 60:
+ status = "Missing"
+ elif age_minutes > 12 * 60:
+ status = "Pending"
+ else:
+ status = "OK"
+
+ if emitter:
+ # Update existing
+ emitter.last_seen = ts
+ emitter.last_file = last_file
+ emitter.status = status
+ # Update unit_type if it was incorrectly classified
+ detected_type = detect_unit_type(uid)
+ if emitter.unit_type != detected_type:
+ emitter.unit_type = detected_type
+ else:
+ # Insert new - auto-detect unit type from ID
+ detected_type = detect_unit_type(uid)
+ emitter = Emitter(
+ id=uid,
+ unit_type=detected_type,
+ last_seen=ts,
+ last_file=last_file,
+ status=status
+ )
+ db.add(emitter)
+
+ results.append({"unit": uid, "status": status})
+
+ db.commit()
+
+ return {
+ "message": "Heartbeat processed",
+ "source": source,
+ "units_processed": len(results),
+ "results": results
+ }
+
+
+# series4 (Micromate) Standardized Heartbeat Schema
+@router.post("/api/series4/heartbeat", status_code=200)
+async def series4_heartbeat(request: Request, db: Session = Depends(get_db)):
+ """
+ Accepts a full telemetry payload from the Series4 (Micromate) emitter.
+ Updates or inserts each unit into the database.
+
+ Expected payload:
+ {
+ "source": "series4_emitter",
+ "generated_at": "2025-12-04T20:01:00",
+ "units": [
+ {
+ "unit_id": "UM11719",
+ "type": "micromate",
+ "project_hint": "Clearwater - ECMS 57940",
+ "last_call": "2025-12-04T19:30:42",
+ "status": "OK",
+ "age_days": 0.04,
+ "age_hours": 0.9,
+ "mlg_path": "C:\\THORDATA\\..."
+ }
+ ]
+ }
+ """
+ payload = await request.json()
+
+ source = payload.get("source", "series4_emitter")
+ units = payload.get("units", [])
+
+ print("\n=== Series 4 Heartbeat ===")
+ print("Source:", source)
+ print("Units received:", len(units))
+ print("==========================\n")
+
+ results = []
+
+ for u in units:
+ uid = u.get("unit_id")
+ last_call_str = u.get("last_call")
+ status = u.get("status", "Unknown")
+ mlg_path = u.get("mlg_path")
+ project_hint = u.get("project_hint")
+
+ # Parse last_call timestamp
+ try:
+ if last_call_str:
+ ts = datetime.fromisoformat(last_call_str.replace("Z", "+00:00"))
+ else:
+ ts = None
+ except:
+ ts = None
+
+ # Pull from DB
+ emitter = db.query(Emitter).filter(Emitter.id == uid).first()
+
+ if emitter:
+ # Update existing
+ emitter.last_seen = ts
+ emitter.last_file = mlg_path
+ emitter.status = status
+ # Update unit_type if it was incorrectly classified
+ detected_type = detect_unit_type(uid)
+ if emitter.unit_type != detected_type:
+ emitter.unit_type = detected_type
+ # Optionally update notes with project hint if it exists
+ if project_hint and not emitter.notes:
+ emitter.notes = f"Project: {project_hint}"
+ else:
+ # Insert new - auto-detect unit type from ID
+ detected_type = detect_unit_type(uid)
+ notes = f"Project: {project_hint}" if project_hint else None
+ emitter = Emitter(
+ id=uid,
+ unit_type=detected_type,
+ last_seen=ts,
+ last_file=mlg_path,
+ status=status,
+ notes=notes
+ )
+ db.add(emitter)
+
+ results.append({"unit": uid, "status": status})
+
+ db.commit()
+
+ return {
+ "message": "Heartbeat processed",
+ "source": source,
+ "units_processed": len(results),
+ "results": results
+ }
diff --git a/backend/services/backup_scheduler.py b/backend/services/backup_scheduler.py
new file mode 100644
index 0000000..15168cc
--- /dev/null
+++ b/backend/services/backup_scheduler.py
@@ -0,0 +1,145 @@
+"""
+Automatic Database Backup Scheduler
+Handles scheduled automatic backups of the database
+"""
+
+import schedule
+import time
+import threading
+from datetime import datetime
+from typing import Optional
+import logging
+
+from backend.services.database_backup import DatabaseBackupService
+
+logger = logging.getLogger(__name__)
+
+
+class BackupScheduler:
+ """Manages automatic database backups on a schedule"""
+
+ def __init__(self, db_path: str = "./data/seismo_fleet.db", backups_dir: str = "./data/backups"):
+ self.backup_service = DatabaseBackupService(db_path=db_path, backups_dir=backups_dir)
+ self.scheduler_thread: Optional[threading.Thread] = None
+ self.is_running = False
+
+ # Default settings
+ self.backup_interval_hours = 24 # Daily backups
+ self.keep_count = 10 # Keep last 10 backups
+ self.enabled = False
+
+ def configure(self, interval_hours: int = 24, keep_count: int = 10, enabled: bool = True):
+ """
+ Configure backup scheduler settings
+
+ Args:
+ interval_hours: Hours between automatic backups
+ keep_count: Number of backups to retain
+ enabled: Whether automatic backups are enabled
+ """
+ self.backup_interval_hours = interval_hours
+ self.keep_count = keep_count
+ self.enabled = enabled
+
+ logger.info(f"Backup scheduler configured: interval={interval_hours}h, keep={keep_count}, enabled={enabled}")
+
+ def create_automatic_backup(self):
+ """Create an automatic backup and cleanup old ones"""
+ if not self.enabled:
+ logger.info("Automatic backups are disabled, skipping")
+ return
+
+ try:
+ timestamp = datetime.utcnow().strftime("%Y-%m-%d %H:%M UTC")
+ description = f"Automatic backup - {timestamp}"
+
+ logger.info("Creating automatic backup...")
+ snapshot = self.backup_service.create_snapshot(description=description)
+
+ logger.info(f"Automatic backup created: {snapshot['filename']} ({snapshot['size_mb']} MB)")
+
+ # Cleanup old backups
+ cleanup_result = self.backup_service.cleanup_old_snapshots(keep_count=self.keep_count)
+ if cleanup_result['deleted'] > 0:
+ logger.info(f"Cleaned up {cleanup_result['deleted']} old snapshots")
+
+ return snapshot
+
+ except Exception as e:
+ logger.error(f"Automatic backup failed: {str(e)}")
+ return None
+
+ def start(self):
+ """Start the backup scheduler in a background thread"""
+ if self.is_running:
+ logger.warning("Backup scheduler is already running")
+ return
+
+ if not self.enabled:
+ logger.info("Backup scheduler is disabled, not starting")
+ return
+
+ logger.info(f"Starting backup scheduler (every {self.backup_interval_hours} hours)")
+
+ # Clear any existing scheduled jobs
+ schedule.clear()
+
+ # Schedule the backup job
+ schedule.every(self.backup_interval_hours).hours.do(self.create_automatic_backup)
+
+ # Also run immediately on startup
+ self.create_automatic_backup()
+
+ # Start the scheduler thread
+ self.is_running = True
+ self.scheduler_thread = threading.Thread(target=self._run_scheduler, daemon=True)
+ self.scheduler_thread.start()
+
+ logger.info("Backup scheduler started successfully")
+
+ def _run_scheduler(self):
+ """Internal method to run the scheduler loop"""
+ while self.is_running:
+ schedule.run_pending()
+ time.sleep(60) # Check every minute
+
+ def stop(self):
+ """Stop the backup scheduler"""
+ if not self.is_running:
+ logger.warning("Backup scheduler is not running")
+ return
+
+ logger.info("Stopping backup scheduler...")
+ self.is_running = False
+ schedule.clear()
+
+ if self.scheduler_thread:
+ self.scheduler_thread.join(timeout=5)
+
+ logger.info("Backup scheduler stopped")
+
+ def get_status(self) -> dict:
+ """Get current scheduler status"""
+ next_run = None
+ if self.is_running and schedule.jobs:
+ next_run = schedule.jobs[0].next_run.isoformat() if schedule.jobs[0].next_run else None
+
+ return {
+ "enabled": self.enabled,
+ "running": self.is_running,
+ "interval_hours": self.backup_interval_hours,
+ "keep_count": self.keep_count,
+ "next_run": next_run
+ }
+
+
+# Global scheduler instance
+_scheduler_instance: Optional[BackupScheduler] = None
+
+
+def get_backup_scheduler() -> BackupScheduler:
+ """Get or create the global backup scheduler instance"""
+ global _scheduler_instance
+ if _scheduler_instance is None:
+ _scheduler_instance = BackupScheduler()
+ return _scheduler_instance
diff --git a/backend/services/database_backup.py b/backend/services/database_backup.py
new file mode 100644
index 0000000..2858fd2
--- /dev/null
+++ b/backend/services/database_backup.py
@@ -0,0 +1,192 @@
+"""
+Database Backup and Restore Service
+Handles full database snapshots, restoration, and remote synchronization
+"""
+
+import os
+import shutil
+import sqlite3
+from datetime import datetime
+from pathlib import Path
+from typing import List, Dict, Optional
+import json
+
+
+class DatabaseBackupService:
+ """Manages database backup operations"""
+
+ def __init__(self, db_path: str = "./data/seismo_fleet.db", backups_dir: str = "./data/backups"):
+ self.db_path = Path(db_path)
+ self.backups_dir = Path(backups_dir)
+ self.backups_dir.mkdir(parents=True, exist_ok=True)
+
+ def create_snapshot(self, description: Optional[str] = None) -> Dict:
+ """
+ Create a full database snapshot using SQLite backup API
+ Returns snapshot metadata
+ """
+ if not self.db_path.exists():
+ raise FileNotFoundError(f"Database not found at {self.db_path}")
+
+ # Generate snapshot filename with timestamp
+ timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S")
+ snapshot_name = f"snapshot_{timestamp}.db"
+ snapshot_path = self.backups_dir / snapshot_name
+
+ # Get database size before backup
+ db_size = self.db_path.stat().st_size
+
+ try:
+ # Use SQLite backup API for safe backup (handles concurrent access)
+ source_conn = sqlite3.connect(str(self.db_path))
+ dest_conn = sqlite3.connect(str(snapshot_path))
+
+ # Perform the backup
+ with dest_conn:
+ source_conn.backup(dest_conn)
+
+ source_conn.close()
+ dest_conn.close()
+
+ # Create metadata
+ metadata = {
+ "filename": snapshot_name,
+ "created_at": timestamp,
+ "created_at_iso": datetime.utcnow().isoformat(),
+ "description": description or "Manual snapshot",
+ "size_bytes": snapshot_path.stat().st_size,
+ "size_mb": round(snapshot_path.stat().st_size / (1024 * 1024), 2),
+ "original_db_size_bytes": db_size,
+ "type": "manual"
+ }
+
+ # Save metadata as JSON sidecar file
+ metadata_path = self.backups_dir / f"{snapshot_name}.meta.json"
+ with open(metadata_path, 'w') as f:
+ json.dump(metadata, f, indent=2)
+
+ return metadata
+
+ except Exception as e:
+ # Clean up partial snapshot if it exists
+ if snapshot_path.exists():
+ snapshot_path.unlink()
+ raise Exception(f"Snapshot creation failed: {str(e)}")
+
+ def list_snapshots(self) -> List[Dict]:
+ """
+ List all available snapshots with metadata
+ Returns list sorted by creation date (newest first)
+ """
+ snapshots = []
+
+ for db_file in sorted(self.backups_dir.glob("snapshot_*.db"), reverse=True):
+ metadata_file = self.backups_dir / f"{db_file.name}.meta.json"
+
+ if metadata_file.exists():
+ with open(metadata_file, 'r') as f:
+ metadata = json.load(f)
+ else:
+ # Fallback for legacy snapshots without metadata
+ stat_info = db_file.stat()
+ metadata = {
+ "filename": db_file.name,
+ "created_at": datetime.fromtimestamp(stat_info.st_mtime).strftime("%Y%m%d_%H%M%S"),
+ "created_at_iso": datetime.fromtimestamp(stat_info.st_mtime).isoformat(),
+ "description": "Legacy snapshot",
+ "size_bytes": stat_info.st_size,
+ "size_mb": round(stat_info.st_size / (1024 * 1024), 2),
+ "type": "manual"
+ }
+
+ snapshots.append(metadata)
+
+ return snapshots
+
+ def delete_snapshot(self, filename: str) -> bool:
+ """Delete a snapshot and its metadata"""
+ snapshot_path = self.backups_dir / filename
+ metadata_path = self.backups_dir / f"{filename}.meta.json"
+
+ if not snapshot_path.exists():
+ raise FileNotFoundError(f"Snapshot {filename} not found")
+
+ snapshot_path.unlink()
+ if metadata_path.exists():
+ metadata_path.unlink()
+
+ return True
+
+ def restore_snapshot(self, filename: str, create_backup_before_restore: bool = True) -> Dict:
+ """
+ Restore database from a snapshot
+ Creates a safety backup before restoring if requested
+ """
+ snapshot_path = self.backups_dir / filename
+
+ if not snapshot_path.exists():
+ raise FileNotFoundError(f"Snapshot {filename} not found")
+
+ if not self.db_path.exists():
+ raise FileNotFoundError(f"Database not found at {self.db_path}")
+
+ backup_info = None
+
+ # Create safety backup before restore
+ if create_backup_before_restore:
+ backup_info = self.create_snapshot(description="Auto-backup before restore")
+
+ try:
+ # Replace database file
+ shutil.copy2(str(snapshot_path), str(self.db_path))
+
+ return {
+ "message": "Database restored successfully",
+ "restored_from": filename,
+ "restored_at": datetime.utcnow().isoformat(),
+ "backup_created": backup_info["filename"] if backup_info else None
+ }
+
+ except Exception as e:
+ raise Exception(f"Restore failed: {str(e)}")
+
+ def get_database_stats(self) -> Dict:
+ """Get statistics about the current database"""
+ if not self.db_path.exists():
+ return {"error": "Database not found"}
+
+ conn = sqlite3.connect(str(self.db_path))
+ cursor = conn.cursor()
+
+ # Get table counts
+ cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_%'")
+ tables = cursor.fetchall()
+
+ table_stats = {}
+ total_rows = 0
+
+ for (table_name,) in tables:
+ cursor.execute(f"SELECT COUNT(*) FROM {table_name}")
+ count = cursor.fetchone()[0]
+ table_stats[table_name] = count
+ total_rows += count
+
+ conn.close()
+
+ db_size = self.db_path.stat().st_size
+
+ return {
+ "database_path": str(self.db_path),
+ "size_bytes": db_size,
+ "size_mb": round(db_size / (1024 * 1024), 2),
+ "total_rows": total_rows,
+ "tables": table_stats,
+ "last_modified": datetime.fromtimestamp(self.db_path.stat().st_mtime).isoformat()
+ }
+
+ def download_snapshot(self, filename: str) -> Path:
+ """Get the file path for downloading a snapshot"""
+ snapshot_path = self.backups_dir / filename
+ if not snapshot_path.exists():
+ raise FileNotFoundError(f"Snapshot {filename} not found")
+ return snapshot_path
diff --git a/backend/services/snapshot.py b/backend/services/snapshot.py
new file mode 100644
index 0000000..7c737b7
--- /dev/null
+++ b/backend/services/snapshot.py
@@ -0,0 +1,154 @@
+from datetime import datetime, timezone
+from sqlalchemy.orm import Session
+
+from backend.database import get_db_session
+from backend.models import Emitter, RosterUnit, IgnoredUnit
+
+
+def ensure_utc(dt):
+ if dt is None:
+ return None
+ if dt.tzinfo is None:
+ return dt.replace(tzinfo=timezone.utc)
+ return dt.astimezone(timezone.utc)
+
+
+def format_age(last_seen):
+ if not last_seen:
+ return "N/A"
+ last_seen = ensure_utc(last_seen)
+ now = datetime.now(timezone.utc)
+ diff = now - last_seen
+ hours = diff.total_seconds() // 3600
+ mins = (diff.total_seconds() % 3600) // 60
+ return f"{int(hours)}h {int(mins)}m"
+
+
+def emit_status_snapshot():
+ """
+ Merge roster (what we *intend*) with emitter data (what is *actually happening*).
+ """
+
+ db = get_db_session()
+ try:
+ roster = {r.id: r for r in db.query(RosterUnit).all()}
+ emitters = {e.id: e for e in db.query(Emitter).all()}
+ ignored = {i.id for i in db.query(IgnoredUnit).all()}
+
+ units = {}
+
+ # --- Merge roster entries first ---
+ for unit_id, r in roster.items():
+ e = emitters.get(unit_id)
+ if r.retired:
+ # Retired units get separated later
+ status = "Retired"
+ age = "N/A"
+ last_seen = None
+ fname = ""
+ else:
+ if e:
+ status = e.status
+ last_seen = ensure_utc(e.last_seen)
+ age = format_age(last_seen)
+ fname = e.last_file
+ else:
+ # Rostered but no emitter data
+ status = "Missing"
+ last_seen = None
+ age = "N/A"
+ fname = ""
+
+ units[unit_id] = {
+ "id": unit_id,
+ "status": status,
+ "age": age,
+ "last": last_seen.isoformat() if last_seen else None,
+ "fname": fname,
+ "deployed": r.deployed,
+ "note": r.note or "",
+ "retired": r.retired,
+ # Device type and type-specific fields
+ "device_type": r.device_type or "seismograph",
+ "last_calibrated": r.last_calibrated.isoformat() if r.last_calibrated else None,
+ "next_calibration_due": r.next_calibration_due.isoformat() if r.next_calibration_due else None,
+ "deployed_with_modem_id": r.deployed_with_modem_id,
+ "ip_address": r.ip_address,
+ "phone_number": r.phone_number,
+ "hardware_model": r.hardware_model,
+ # Location for mapping
+ "location": r.location or "",
+ "address": r.address or "",
+ "coordinates": r.coordinates or "",
+ }
+
+ # --- Add unexpected emitter-only units ---
+ for unit_id, e in emitters.items():
+ if unit_id not in roster:
+ last_seen = ensure_utc(e.last_seen)
+ units[unit_id] = {
+ "id": unit_id,
+ "status": e.status,
+ "age": format_age(last_seen),
+ "last": last_seen.isoformat(),
+ "fname": e.last_file,
+ "deployed": False, # default
+ "note": "",
+ "retired": False,
+ # Device type and type-specific fields (defaults for unknown units)
+ "device_type": "seismograph", # default
+ "last_calibrated": None,
+ "next_calibration_due": None,
+ "deployed_with_modem_id": None,
+ "ip_address": None,
+ "phone_number": None,
+ "hardware_model": None,
+ # Location fields
+ "location": "",
+ "address": "",
+ "coordinates": "",
+ }
+
+ # Separate buckets for UI
+ active_units = {
+ uid: u for uid, u in units.items()
+ if not u["retired"] and u["deployed"] and uid not in ignored
+ }
+
+ benched_units = {
+ uid: u for uid, u in units.items()
+ if not u["retired"] and not u["deployed"] and uid not in ignored
+ }
+
+ retired_units = {
+ uid: u for uid, u in units.items()
+ if u["retired"]
+ }
+
+ # Unknown units - emitters that aren't in the roster and aren't ignored
+ unknown_units = {
+ uid: u for uid, u in units.items()
+ if uid not in roster and uid not in ignored
+ }
+
+ return {
+ "timestamp": datetime.utcnow().isoformat(),
+ "units": units,
+ "active": active_units,
+ "benched": benched_units,
+ "retired": retired_units,
+ "unknown": unknown_units,
+ "summary": {
+ "total": len(active_units) + len(benched_units),
+ "active": len(active_units),
+ "benched": len(benched_units),
+ "retired": len(retired_units),
+ "unknown": len(unknown_units),
+ # Status counts only for deployed units (active_units)
+ "ok": sum(1 for u in active_units.values() if u["status"] == "OK"),
+ "pending": sum(1 for u in active_units.values() if u["status"] == "Pending"),
+ "missing": sum(1 for u in active_units.values() if u["status"] == "Missing"),
+ }
+ }
+ finally:
+ db.close()
diff --git a/backend/static/icons/ICON_GENERATION_INSTRUCTIONS.md b/backend/static/icons/ICON_GENERATION_INSTRUCTIONS.md
new file mode 100644
index 0000000..a62823f
--- /dev/null
+++ b/backend/static/icons/ICON_GENERATION_INSTRUCTIONS.md
@@ -0,0 +1,78 @@
+# PWA Icon Generation Instructions
+
+The PWA manifest requires 8 icon sizes for full compatibility across devices.
+
+## Required Icon Sizes
+
+- 72x72px
+- 96x96px
+- 128x128px
+- 144x144px
+- 152x152px
+- 192x192px
+- 384x384px
+- 512x512px (maskable)
+
+## Design Guidelines
+
+**Background:** Navy blue (#142a66)
+**Icon/Logo:** Orange (#f48b1c)
+**Style:** Simple, recognizable design that works at small sizes
+
+## Quick Generation Methods
+
+### Option 1: Online PWA Icon Generator
+
+1. Visit: https://www.pwabuilder.com/imageGenerator
+2. Upload a 512x512px source image
+3. Download the generated icon pack
+4. Copy PNG files to this directory
+
+### Option 2: ImageMagick (Command Line)
+
+If you have a 512x512px source image called `source-icon.png`:
+
+```bash
+# From the icons directory
+for size in 72 96 128 144 152 192 384 512; do
+ convert source-icon.png -resize ${size}x${size} icon-${size}.png
+done
+```
+
+### Option 3: Photoshop/GIMP
+
+1. Create a 512x512px canvas
+2. Add your design (navy background + orange icon)
+3. Save/Export for each required size
+4. Name files as: icon-72.png, icon-96.png, etc.
+
+## Temporary Placeholder
+
+For testing, you can use a simple colored square:
+
+```bash
+# Generate simple colored placeholder icons
+for size in 72 96 128 144 152 192 384 512; do
+ convert -size ${size}x${size} xc:#142a66 \
+ -gravity center \
+ -fill '#f48b1c' \
+ -pointsize $((size / 2)) \
+ -annotate +0+0 'SFM' \
+ icon-${size}.png
+done
+```
+
+## Verification
+
+After generating icons, verify:
+- All 8 sizes exist in this directory
+- Files are named exactly: icon-72.png, icon-96.png, etc.
+- Images have transparent or navy background
+- Logo/text is clearly visible at smallest size (72px)
+
+## Testing PWA Installation
+
+1. Open SFM in Chrome on Android or Safari on iOS
+2. Look for "Install App" or "Add to Home Screen" prompt
+3. Check that the correct icon appears in the install dialog
+4. After installation, verify icon on home screen
diff --git a/backend/static/icons/icon-128.png b/backend/static/icons/icon-128.png
new file mode 100644
index 0000000..83af799
Binary files /dev/null and b/backend/static/icons/icon-128.png differ
diff --git a/backend/static/icons/icon-128.png.svg b/backend/static/icons/icon-128.png.svg
new file mode 100644
index 0000000..e812a63
--- /dev/null
+++ b/backend/static/icons/icon-128.png.svg
@@ -0,0 +1,4 @@
+
diff --git a/backend/static/icons/icon-144.png b/backend/static/icons/icon-144.png
new file mode 100644
index 0000000..d8d90b5
Binary files /dev/null and b/backend/static/icons/icon-144.png differ
diff --git a/backend/static/icons/icon-144.png.svg b/backend/static/icons/icon-144.png.svg
new file mode 100644
index 0000000..5de8b3a
--- /dev/null
+++ b/backend/static/icons/icon-144.png.svg
@@ -0,0 +1,4 @@
+
diff --git a/backend/static/icons/icon-152.png b/backend/static/icons/icon-152.png
new file mode 100644
index 0000000..9ef75af
Binary files /dev/null and b/backend/static/icons/icon-152.png differ
diff --git a/backend/static/icons/icon-152.png.svg b/backend/static/icons/icon-152.png.svg
new file mode 100644
index 0000000..a3f0850
--- /dev/null
+++ b/backend/static/icons/icon-152.png.svg
@@ -0,0 +1,4 @@
+
diff --git a/backend/static/icons/icon-192.png b/backend/static/icons/icon-192.png
new file mode 100644
index 0000000..3290b47
Binary files /dev/null and b/backend/static/icons/icon-192.png differ
diff --git a/backend/static/icons/icon-192.png.svg b/backend/static/icons/icon-192.png.svg
new file mode 100644
index 0000000..ef79877
--- /dev/null
+++ b/backend/static/icons/icon-192.png.svg
@@ -0,0 +1,4 @@
+
diff --git a/backend/static/icons/icon-384.png b/backend/static/icons/icon-384.png
new file mode 100644
index 0000000..2cf0aef
Binary files /dev/null and b/backend/static/icons/icon-384.png differ
diff --git a/backend/static/icons/icon-384.png.svg b/backend/static/icons/icon-384.png.svg
new file mode 100644
index 0000000..f71f324
--- /dev/null
+++ b/backend/static/icons/icon-384.png.svg
@@ -0,0 +1,4 @@
+
diff --git a/backend/static/icons/icon-512.png b/backend/static/icons/icon-512.png
new file mode 100644
index 0000000..b2c82dd
Binary files /dev/null and b/backend/static/icons/icon-512.png differ
diff --git a/backend/static/icons/icon-512.png.svg b/backend/static/icons/icon-512.png.svg
new file mode 100644
index 0000000..39b3068
--- /dev/null
+++ b/backend/static/icons/icon-512.png.svg
@@ -0,0 +1,4 @@
+
diff --git a/backend/static/icons/icon-72.png b/backend/static/icons/icon-72.png
new file mode 100644
index 0000000..d0d0359
Binary files /dev/null and b/backend/static/icons/icon-72.png differ
diff --git a/backend/static/icons/icon-72.png.svg b/backend/static/icons/icon-72.png.svg
new file mode 100644
index 0000000..5ebfd03
--- /dev/null
+++ b/backend/static/icons/icon-72.png.svg
@@ -0,0 +1,4 @@
+
diff --git a/backend/static/icons/icon-96.png b/backend/static/icons/icon-96.png
new file mode 100644
index 0000000..cbcff51
Binary files /dev/null and b/backend/static/icons/icon-96.png differ
diff --git a/backend/static/icons/icon-96.png.svg b/backend/static/icons/icon-96.png.svg
new file mode 100644
index 0000000..1217879
--- /dev/null
+++ b/backend/static/icons/icon-96.png.svg
@@ -0,0 +1,4 @@
+
diff --git a/backend/static/manifest.json b/backend/static/manifest.json
new file mode 100644
index 0000000..8d0c879
--- /dev/null
+++ b/backend/static/manifest.json
@@ -0,0 +1,78 @@
+{
+ "name": "Seismo Fleet Manager",
+ "short_name": "SFM",
+ "description": "Real-time seismograph and modem fleet monitoring and management",
+ "start_url": "/",
+ "display": "standalone",
+ "orientation": "portrait",
+ "background_color": "#142a66",
+ "theme_color": "#f48b1c",
+ "icons": [
+ {
+ "src": "/static/icons/icon-72.png",
+ "sizes": "72x72",
+ "type": "image/png"
+ },
+ {
+ "src": "/static/icons/icon-96.png",
+ "sizes": "96x96",
+ "type": "image/png"
+ },
+ {
+ "src": "/static/icons/icon-128.png",
+ "sizes": "128x128",
+ "type": "image/png"
+ },
+ {
+ "src": "/static/icons/icon-144.png",
+ "sizes": "144x144",
+ "type": "image/png"
+ },
+ {
+ "src": "/static/icons/icon-152.png",
+ "sizes": "152x152",
+ "type": "image/png"
+ },
+ {
+ "src": "/static/icons/icon-192.png",
+ "sizes": "192x192",
+ "type": "image/png"
+ },
+ {
+ "src": "/static/icons/icon-384.png",
+ "sizes": "384x384",
+ "type": "image/png"
+ },
+ {
+ "src": "/static/icons/icon-512.png",
+ "sizes": "512x512",
+ "type": "image/png",
+ "purpose": "any maskable"
+ }
+ ],
+ "screenshots": [
+ {
+ "src": "/static/screenshots/dashboard.png",
+ "type": "image/png",
+ "sizes": "540x720",
+ "form_factor": "narrow"
+ }
+ ],
+ "categories": ["utilities", "productivity"],
+ "shortcuts": [
+ {
+ "name": "Dashboard",
+ "short_name": "Dashboard",
+ "description": "View fleet status dashboard",
+ "url": "/",
+ "icons": [{ "src": "/static/icons/icon-192.png", "sizes": "192x192" }]
+ },
+ {
+ "name": "Fleet Roster",
+ "short_name": "Roster",
+ "description": "View and manage fleet roster",
+ "url": "/roster",
+ "icons": [{ "src": "/static/icons/icon-192.png", "sizes": "192x192" }]
+ }
+ ]
+}
diff --git a/backend/static/mobile.css b/backend/static/mobile.css
new file mode 100644
index 0000000..fce8491
--- /dev/null
+++ b/backend/static/mobile.css
@@ -0,0 +1,612 @@
+/* Mobile-specific styles for Seismo Fleet Manager */
+/* Touch-optimized, portrait-first design */
+
+/* ===== MOBILE TOUCH TARGETS ===== */
+@media (max-width: 767px) {
+ /* Buttons - 44x44px minimum (iOS standard) */
+ .btn, button:not(.tab-button), .button, a.button {
+ min-width: 44px;
+ min-height: 44px;
+ padding: 12px 16px;
+ }
+
+ /* Icon-only buttons */
+ .icon-button, .btn-icon {
+ width: 44px;
+ height: 44px;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ padding: 0;
+ }
+
+ /* Form inputs - 48px height, 16px font prevents iOS zoom */
+ input:not([type="checkbox"]):not([type="radio"]),
+ select,
+ textarea {
+ min-height: 48px;
+ font-size: 16px !important;
+ padding: 12px 16px;
+ }
+
+ /* Checkboxes and radio buttons - larger touch targets */
+ input[type="checkbox"],
+ input[type="radio"] {
+ width: 24px;
+ height: 24px;
+ min-height: 24px;
+ }
+
+ /* Bottom nav buttons - 56px industry standard */
+ .bottom-nav button {
+ min-height: 56px;
+ padding: 8px;
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ justify-content: center;
+ gap: 4px;
+ }
+
+ /* Increase spacing between clickable elements */
+ .btn + .btn,
+ button + button {
+ margin-left: 8px;
+ }
+}
+
+/* ===== HAMBURGER MENU ===== */
+.hamburger-btn {
+ position: fixed;
+ top: 1rem;
+ left: 1rem;
+ z-index: 50;
+ width: 44px;
+ height: 44px;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ background-color: white;
+ border: 1px solid #e5e7eb;
+ border-radius: 0.5rem;
+ box-shadow: 0 1px 3px 0 rgb(0 0 0 / 0.1);
+ transition: all 0.2s;
+}
+
+.hamburger-btn:active {
+ transform: scale(0.95);
+}
+
+.dark .hamburger-btn {
+ background-color: #1e293b;
+ border-color: #374151;
+}
+
+/* Hamburger icon */
+.hamburger-icon {
+ width: 24px;
+ height: 24px;
+ display: flex;
+ flex-direction: column;
+ justify-content: space-around;
+}
+
+.hamburger-line {
+ width: 100%;
+ height: 2px;
+ background-color: #374151;
+ transition: all 0.3s;
+}
+
+.dark .hamburger-line {
+ background-color: #e5e7eb;
+}
+
+/* Hamburger animation when menu open */
+.menu-open .hamburger-line:nth-child(1) {
+ transform: translateY(8px) rotate(45deg);
+}
+
+.menu-open .hamburger-line:nth-child(2) {
+ opacity: 0;
+}
+
+.menu-open .hamburger-line:nth-child(3) {
+ transform: translateY(-8px) rotate(-45deg);
+}
+
+/* ===== SIDEBAR (RESPONSIVE) ===== */
+.sidebar {
+ position: fixed;
+ left: 0;
+ top: 0;
+ width: 16rem; /* 256px */
+ height: 100vh;
+ z-index: 40;
+ transition: transform 0.3s ease-in-out;
+}
+
+@media (max-width: 767px) {
+ .sidebar {
+ transform: translateX(-100%);
+ }
+
+ .sidebar.open {
+ transform: translateX(0);
+ }
+}
+
+@media (min-width: 768px) {
+ .sidebar {
+ transform: translateX(0) !important;
+ }
+}
+
+/* ===== BACKDROP ===== */
+.backdrop {
+ position: fixed;
+ inset: 0;
+ background-color: rgba(0, 0, 0, 0.5);
+ z-index: 30;
+ opacity: 0;
+ transition: opacity 0.3s ease-in-out;
+ pointer-events: none;
+}
+
+.backdrop.show {
+ opacity: 1;
+ pointer-events: auto;
+}
+
+@media (min-width: 768px) {
+ .backdrop {
+ display: none;
+ }
+}
+
+/* ===== BOTTOM NAVIGATION ===== */
+.bottom-nav {
+ position: fixed;
+ bottom: 0;
+ left: 0;
+ right: 0;
+ height: 4rem;
+ background-color: white;
+ border-top: 1px solid #e5e7eb;
+ z-index: 20;
+ box-shadow: 0 -1px 3px 0 rgb(0 0 0 / 0.1);
+}
+
+.dark .bottom-nav {
+ background-color: #1e293b;
+ border-top-color: #374151;
+}
+
+.bottom-nav-btn {
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ justify-content: center;
+ gap: 4px;
+ color: #6b7280;
+ transition: all 0.2s;
+ border: none;
+ background: none;
+ cursor: pointer;
+ width: 100%;
+ height: 100%;
+}
+
+.bottom-nav-btn:active {
+ transform: scale(0.95);
+ background-color: #f3f4f6;
+}
+
+.dark .bottom-nav-btn:active {
+ background-color: #374151;
+}
+
+.bottom-nav-btn.active {
+ color: #f48b1c; /* seismo-orange */
+}
+
+.bottom-nav-btn svg {
+ width: 24px;
+ height: 24px;
+}
+
+.bottom-nav-btn span {
+ font-size: 11px;
+ font-weight: 500;
+}
+
+@media (min-width: 768px) {
+ .bottom-nav {
+ display: none;
+ }
+}
+
+/* ===== MAIN CONTENT ADJUSTMENTS ===== */
+.main-content {
+ margin-left: 0;
+ padding-bottom: 5rem; /* 80px for bottom nav */
+ min-height: 100vh;
+}
+
+@media (min-width: 768px) {
+ .main-content {
+ margin-left: 16rem; /* 256px sidebar width */
+ padding-bottom: 0;
+ }
+}
+
+/* ===== MOBILE ROSTER CARDS ===== */
+.unit-card {
+ background-color: white;
+ border-radius: 0.5rem;
+ box-shadow: 0 1px 3px 0 rgb(0 0 0 / 0.1);
+ padding: 1rem;
+ cursor: pointer;
+ transition: transform 0.2s, box-shadow 0.2s;
+ -webkit-tap-highlight-color: transparent;
+}
+
+.unit-card:active {
+ transform: scale(0.98);
+}
+
+.dark .unit-card {
+ background-color: #1e293b;
+}
+
+.unit-card:hover {
+ box-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1);
+}
+
+/* ===== UNIT DETAIL MODAL (BOTTOM SHEET) ===== */
+.unit-modal {
+ position: fixed;
+ inset: 0;
+ z-index: 50;
+ display: flex;
+ align-items: flex-end;
+ justify-content: center;
+ pointer-events: none;
+ opacity: 0;
+ transition: opacity 0.3s ease-in-out;
+}
+
+.unit-modal.show {
+ pointer-events: auto;
+ opacity: 1;
+}
+
+.unit-modal-backdrop {
+ position: absolute;
+ inset: 0;
+ background-color: rgba(0, 0, 0, 0.5);
+}
+
+.unit-modal-content {
+ position: relative;
+ width: 100%;
+ max-height: 85vh;
+ background-color: white;
+ border-top-left-radius: 1rem;
+ border-top-right-radius: 1rem;
+ box-shadow: 0 -4px 6px -1px rgb(0 0 0 / 0.1);
+ overflow-y: auto;
+ transform: translateY(100%);
+ transition: transform 0.3s ease-out;
+}
+
+.unit-modal.show .unit-modal-content {
+ transform: translateY(0);
+}
+
+.dark .unit-modal-content {
+ background-color: #1e293b;
+}
+
+@media (min-width: 768px) {
+ .unit-modal {
+ align-items: center;
+ }
+
+ .unit-modal-content {
+ max-width: 42rem; /* 672px */
+ border-radius: 0.75rem;
+ transform: translateY(20px);
+ opacity: 0;
+ }
+
+ .unit-modal.show .unit-modal-content {
+ transform: translateY(0);
+ opacity: 1;
+ }
+}
+
+/* Modal handle bar (mobile only) */
+.modal-handle {
+ height: 4px;
+ width: 3rem;
+ background-color: #d1d5db;
+ border-radius: 9999px;
+ margin: 0.75rem auto 1rem;
+}
+
+@media (min-width: 768px) {
+ .modal-handle {
+ display: none;
+ }
+}
+
+/* ===== OFFLINE INDICATOR ===== */
+.offline-indicator {
+ position: fixed;
+ top: 0;
+ left: 0;
+ right: 0;
+ background-color: #eab308; /* yellow-500 */
+ color: white;
+ text-align: center;
+ padding: 0.5rem;
+ font-size: 0.875rem;
+ font-weight: 500;
+ z-index: 50;
+ transform: translateY(-100%);
+ transition: transform 0.3s ease-in-out;
+}
+
+.offline-indicator.show {
+ transform: translateY(0);
+}
+
+/* ===== SYNC TOAST ===== */
+.sync-toast {
+ position: fixed;
+ bottom: 6rem; /* Above bottom nav */
+ left: 1rem;
+ right: 1rem;
+ background-color: #22c55e; /* green-500 */
+ color: white;
+ padding: 0.75rem 1rem;
+ border-radius: 0.5rem;
+ box-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1);
+ z-index: 50;
+ opacity: 0;
+ transform: translateY(20px);
+ transition: opacity 0.3s, transform 0.3s;
+ pointer-events: none;
+}
+
+.sync-toast.show {
+ opacity: 1;
+ transform: translateY(0);
+ pointer-events: auto;
+}
+
+@media (min-width: 768px) {
+ .sync-toast {
+ bottom: 1rem;
+ left: auto;
+ right: 1rem;
+ max-width: 20rem;
+ }
+}
+
+/* ===== MOBILE SEARCH BAR (STICKY) ===== */
+@media (max-width: 767px) {
+ .mobile-search-sticky {
+ position: sticky;
+ top: 0;
+ z-index: 10;
+ background-color: #f3f4f6;
+ margin: -1rem -1rem 1rem -1rem;
+ padding: 0.5rem 1rem;
+ }
+
+ .dark .mobile-search-sticky {
+ background-color: #111827;
+ }
+}
+
+@media (min-width: 768px) {
+ .mobile-search-sticky {
+ position: static;
+ background-color: transparent;
+ margin: 0;
+ padding: 0;
+ }
+}
+
+/* ===== STATUS BADGES ===== */
+.status-dot {
+ width: 1rem;
+ height: 1rem;
+ border-radius: 9999px;
+ flex-shrink: 0;
+}
+
+.status-badge {
+ padding: 0.25rem 0.75rem;
+ border-radius: 9999px;
+ font-size: 0.75rem;
+ font-weight: 500;
+}
+
+/* ===== DEVICE TYPE BADGES ===== */
+.device-badge {
+ padding: 0.25rem 0.5rem;
+ border-radius: 9999px;
+ font-size: 0.75rem;
+ font-weight: 500;
+ display: inline-block;
+}
+
+/* ===== MOBILE MAP HEIGHT ===== */
+@media (max-width: 767px) {
+ #fleet-map {
+ height: 16rem !important; /* 256px on mobile */
+ }
+
+ #unit-map {
+ height: 16rem !important; /* 256px on mobile */
+ }
+}
+
+/* ===== MAP OVERLAP FIX ===== */
+/* Prevent map and controls from overlapping UI elements on mobile */
+@media (max-width: 767px) {
+ /* Constrain leaflet container to prevent overflow */
+ .leaflet-container {
+ max-width: 100%;
+ overflow: hidden;
+ }
+
+ /* Override Leaflet's default high z-index values */
+ /* Bottom nav is z-20, sidebar is z-40, so map must be below */
+ .leaflet-pane,
+ .leaflet-tile-pane,
+ .leaflet-overlay-pane,
+ .leaflet-shadow-pane,
+ .leaflet-marker-pane,
+ .leaflet-tooltip-pane,
+ .leaflet-popup-pane {
+ z-index: 1 !important;
+ }
+
+ /* Map controls should also be below navigation elements */
+ .leaflet-control-container,
+ .leaflet-top,
+ .leaflet-bottom,
+ .leaflet-left,
+ .leaflet-right {
+ z-index: 1 !important;
+ }
+
+ .leaflet-control {
+ z-index: 1 !important;
+ }
+
+ /* When sidebar is open, hide all Leaflet controls (zoom, attribution, etc) */
+ body.menu-open .leaflet-control-container {
+ opacity: 0;
+ pointer-events: none;
+ transition: opacity 0.3s ease-in-out;
+ }
+
+ /* Ensure map tiles are non-interactive when sidebar is open */
+ body.menu-open #fleet-map,
+ body.menu-open #unit-map {
+ pointer-events: none;
+ }
+}
+
+/* ===== PENDING SYNC BADGE ===== */
+.pending-sync-badge {
+ display: inline-flex;
+ align-items: center;
+ gap: 0.25rem;
+ padding: 0.125rem 0.5rem;
+ background-color: #fef3c7; /* amber-100 */
+ color: #92400e; /* amber-800 */
+ border-radius: 9999px;
+ font-size: 0.75rem;
+ font-weight: 500;
+}
+
+.dark .pending-sync-badge {
+ background-color: #78350f;
+ color: #fef3c7;
+}
+
+.pending-sync-badge::before {
+ content: "⏳";
+ font-size: 0.875rem;
+}
+
+/* ===== MOBILE-SPECIFIC UTILITY CLASSES ===== */
+@media (max-width: 767px) {
+ .mobile-text-lg {
+ font-size: 1.125rem;
+ line-height: 1.75rem;
+ }
+
+ .mobile-text-xl {
+ font-size: 1.25rem;
+ line-height: 1.75rem;
+ }
+
+ .mobile-p-4 {
+ padding: 1rem;
+ }
+
+ .mobile-mb-4 {
+ margin-bottom: 1rem;
+ }
+}
+
+/* ===== ACCESSIBILITY ===== */
+/* Improve focus visibility on mobile */
+@media (max-width: 767px) {
+ button:focus-visible,
+ a:focus-visible,
+ input:focus-visible,
+ select:focus-visible,
+ textarea:focus-visible {
+ outline: 2px solid #f48b1c;
+ outline-offset: 2px;
+ }
+}
+
+/* Prevent text selection on buttons (better mobile UX) */
+button,
+.btn,
+.button {
+ -webkit-user-select: none;
+ user-select: none;
+ -webkit-tap-highlight-color: transparent;
+}
+
+/* ===== SMOOTH SCROLLING ===== */
+html {
+ scroll-behavior: smooth;
+}
+
+/* Prevent overscroll bounce on iOS */
+body {
+ overscroll-behavior-y: none;
+}
+
+/* ===== LOADING STATES ===== */
+.loading-pulse {
+ animation: pulse 2s cubic-bezier(0.4, 0, 0.6, 1) infinite;
+}
+
+@keyframes pulse {
+ 0%, 100% {
+ opacity: 1;
+ }
+ 50% {
+ opacity: 0.5;
+ }
+}
+
+/* ===== SAFE AREA SUPPORT (iOS notch) ===== */
+@supports (padding: env(safe-area-inset-bottom)) {
+ .bottom-nav {
+ padding-bottom: env(safe-area-inset-bottom);
+ height: calc(4rem + env(safe-area-inset-bottom));
+ }
+
+ .main-content {
+ padding-bottom: calc(5rem + env(safe-area-inset-bottom));
+ }
+
+ @media (min-width: 768px) {
+ .main-content {
+ padding-bottom: 0;
+ }
+ }
+}
diff --git a/backend/static/mobile.js b/backend/static/mobile.js
new file mode 100644
index 0000000..74b2f44
--- /dev/null
+++ b/backend/static/mobile.js
@@ -0,0 +1,597 @@
+/* Mobile JavaScript for Seismo Fleet Manager */
+/* Handles hamburger menu, modals, offline sync, and mobile interactions */
+
+// ===== GLOBAL STATE =====
+let currentUnitData = null;
+let isOnline = navigator.onLine;
+
+// ===== HAMBURGER MENU TOGGLE =====
+function toggleMenu() {
+ const sidebar = document.getElementById('sidebar');
+ const backdrop = document.getElementById('backdrop');
+ const hamburgerBtn = document.getElementById('hamburgerBtn');
+
+ if (sidebar && backdrop) {
+ const isOpen = sidebar.classList.contains('open');
+
+ if (isOpen) {
+ // Close menu
+ sidebar.classList.remove('open');
+ backdrop.classList.remove('show');
+ hamburgerBtn?.classList.remove('menu-open');
+ document.body.style.overflow = '';
+ document.body.classList.remove('menu-open');
+ } else {
+ // Open menu
+ sidebar.classList.add('open');
+ backdrop.classList.add('show');
+ hamburgerBtn?.classList.add('menu-open');
+ document.body.style.overflow = 'hidden';
+ document.body.classList.add('menu-open');
+ }
+ }
+}
+
+// Close menu when clicking backdrop
+function closeMenuFromBackdrop() {
+ const sidebar = document.getElementById('sidebar');
+ const backdrop = document.getElementById('backdrop');
+ const hamburgerBtn = document.getElementById('hamburgerBtn');
+
+ if (sidebar && backdrop) {
+ sidebar.classList.remove('open');
+ backdrop.classList.remove('show');
+ hamburgerBtn?.classList.remove('menu-open');
+ document.body.style.overflow = '';
+ document.body.classList.remove('menu-open');
+ }
+}
+
+// Close menu when window is resized to desktop
+function handleResize() {
+ if (window.innerWidth >= 768) {
+ const sidebar = document.getElementById('sidebar');
+ const backdrop = document.getElementById('backdrop');
+ const hamburgerBtn = document.getElementById('hamburgerBtn');
+
+ if (sidebar && backdrop) {
+ sidebar.classList.remove('open');
+ backdrop.classList.remove('show');
+ hamburgerBtn?.classList.remove('menu-open');
+ document.body.style.overflow = '';
+ document.body.classList.remove('menu-open');
+ }
+ }
+}
+
+// ===== UNIT DETAIL MODAL =====
+function openUnitModal(unitId, status = null, age = null) {
+ const modal = document.getElementById('unitModal');
+ if (!modal) return;
+
+ // Store the status info passed from the card
+ // Accept status if it's a non-empty string, use age if provided or default to '--'
+ const cardStatusInfo = (status && status !== '') ? {
+ status: status,
+ age: age || '--'
+ } : null;
+
+ console.log('openUnitModal:', { unitId, status, age, cardStatusInfo });
+
+ // Fetch unit data and populate modal
+ fetchUnitDetails(unitId).then(unit => {
+ if (unit) {
+ currentUnitData = unit;
+ // Pass the card status info to the populate function
+ populateUnitModal(unit, cardStatusInfo);
+ modal.classList.add('show');
+ document.body.style.overflow = 'hidden';
+ }
+ });
+}
+
+function closeUnitModal(event) {
+ // Only close if clicking backdrop or close button
+ if (event && event.target.closest('.unit-modal-content') && !event.target.closest('[data-close-modal]')) {
+ return;
+ }
+
+ const modal = document.getElementById('unitModal');
+ if (modal) {
+ modal.classList.remove('show');
+ document.body.style.overflow = '';
+ currentUnitData = null;
+ }
+}
+
+async function fetchUnitDetails(unitId) {
+ try {
+ // Try to fetch from network first
+ const response = await fetch(`/api/roster/${unitId}`);
+ if (response.ok) {
+ const unit = await response.json();
+
+ // Save to IndexedDB if offline support is available
+ if (window.offlineDB) {
+ await window.offlineDB.saveUnit(unit);
+ }
+
+ return unit;
+ }
+ } catch (error) {
+ console.log('Network fetch failed, trying offline storage:', error);
+
+ // Fall back to offline storage
+ if (window.offlineDB) {
+ return await window.offlineDB.getUnit(unitId);
+ }
+ }
+
+ return null;
+}
+
+function populateUnitModal(unit, cardStatusInfo = null) {
+ // Set unit ID in header
+ const modalUnitId = document.getElementById('modalUnitId');
+ if (modalUnitId) {
+ modalUnitId.textContent = unit.id;
+ }
+
+ // Populate modal content
+ const modalContent = document.getElementById('modalContent');
+ if (!modalContent) return;
+
+ // Use status from card if provided, otherwise get from snapshot or derive from unit
+ let statusInfo = cardStatusInfo || getUnitStatus(unit.id, unit);
+ console.log('populateUnitModal:', { unit, cardStatusInfo, statusInfo });
+
+ const statusColor = statusInfo.status === 'OK' ? 'green' :
+ statusInfo.status === 'Pending' ? 'yellow' :
+ statusInfo.status === 'Missing' ? 'red' : 'gray';
+
+ const statusTextColor = statusInfo.status === 'OK' ? 'text-green-600 dark:text-green-400' :
+ statusInfo.status === 'Pending' ? 'text-yellow-600 dark:text-yellow-400' :
+ statusInfo.status === 'Missing' ? 'text-red-600 dark:text-red-400' :
+ 'text-gray-600 dark:text-gray-400';
+
+ // Determine status label (show "Benched" instead of "Unknown" for non-deployed units)
+ let statusLabel = statusInfo.status;
+ if ((statusInfo.status === 'Unknown' || statusInfo.status === 'N/A') && !unit.deployed) {
+ statusLabel = 'Benched';
+ }
+
+ // Create navigation URL for location
+ const createNavUrl = (address, coordinates) => {
+ if (address) {
+ // Use address for navigation
+ const encodedAddress = encodeURIComponent(address);
+ // Universal link that works on iOS and Android
+ return `https://www.google.com/maps/search/?api=1&query=${encodedAddress}`;
+ } else if (coordinates) {
+ // Use coordinates for navigation (format: lat,lon)
+ const encodedCoords = encodeURIComponent(coordinates);
+ return `https://www.google.com/maps/search/?api=1&query=${encodedCoords}`;
+ }
+ return null;
+ };
+
+ const navUrl = createNavUrl(unit.address, unit.coordinates);
+
+ modalContent.innerHTML = `
+
+