Compare commits
132 Commits
v0.4.0
...
27eeb0fae6
| Author | SHA1 | Date | |
|---|---|---|---|
| 27eeb0fae6 | |||
| 192e15f238 | |||
| 49bc625c1a | |||
| 95fedca8c9 | |||
| e8e155556a | |||
| 33e962e73d | |||
| ac48fb2977 | |||
| 3c4b81cf78 | |||
| d135727ebd | |||
| 64d4423308 | |||
| 4f71d528ce | |||
| 4f56dea4f3 | |||
| 57a85f565b | |||
|
|
e6555ba924 | ||
| 8694282dd0 | |||
| bc02dc9564 | |||
| 0d01715f81 | |||
| b3ec249c5e | |||
| b6e74258f1 | |||
| 1a87ff13c9 | |||
| 22c62c0729 | |||
| 0f47b69c92 | |||
| 76667454b3 | |||
| 0e3f512203 | |||
|
|
15d962ba42 | ||
| e4d1f0d684 | |||
|
|
b571dc29bc | ||
|
|
e2c841d5d7 | ||
| cc94493331 | |||
|
|
5a5426cceb | ||
|
|
66eddd6fe2 | ||
|
|
c77794787c | ||
| 61c84bc71d | |||
| fbf7f2a65d | |||
|
|
202fcaf91c | ||
|
|
3a411d0a89 | ||
| 0c2186f5d8 | |||
| c138e8c6a0 | |||
| 1dd396acd8 | |||
| e89a04f58c | |||
| e4ef065db8 | |||
| 86010de60c | |||
| f89f04cd6f | |||
| 67a2faa2d3 | |||
| 14856e61ef | |||
| 2b69518b33 | |||
| 6070d03e83 | |||
| 240552751c | |||
| 015ce0a254 | |||
| ef8c046f31 | |||
| 3637cf5af8 | |||
| 7fde14d882 | |||
| bd3d937a82 | |||
| 291fa8e862 | |||
| 8e292b1aca | |||
| 7516bbea70 | |||
| da4e5f66c5 | |||
| dae2595303 | |||
| 0c4e7aa5e6 | |||
| 229499ccf6 | |||
| fdc4adeaee | |||
| b3bf91880a | |||
| 17b3f91dfc | |||
| 6c1d0bc467 | |||
|
|
abd059983f | ||
|
|
0f17841218 | ||
|
|
65362bab21 | ||
|
|
dc77a362ce | ||
|
|
28942600ab | ||
|
|
80861997af | ||
| b15d434fce | |||
|
|
70ef43de11 | ||
| 7b4e12c127 | |||
|
|
24473c9ca3 | ||
|
|
caabfd0c42 | ||
|
|
ebe60d2b7d | ||
|
|
842e9d6f61 | ||
| 742a98a8ed | |||
| 3b29c4d645 | |||
|
|
63d9c59873 | ||
|
|
794bfc00dc | ||
|
|
89662d2fa5 | ||
|
|
eb0a99796d | ||
| b47e69e609 | |||
| 1cb25b6c17 | |||
|
|
e515bff1a9 | ||
|
|
f296806fd1 | ||
|
|
24da5ab79f | ||
|
|
305540f564 | ||
|
|
639b485c28 | ||
|
|
d78bafb76e | ||
|
|
8373cff10d | ||
|
|
4957a08198 | ||
|
|
05482bd903 | ||
|
|
5ee6f5eb28 | ||
| 7ce0f6115d | |||
|
|
6492fdff82 | ||
|
|
44d7841852 | ||
|
|
38c600aca3 | ||
|
|
eeda94926f | ||
|
|
57be9bf1f1 | ||
|
|
8431784708 | ||
|
|
c771a86675 | ||
|
|
65ea0920db | ||
|
|
1f3fa7a718 | ||
|
|
a9c9b1fd48 | ||
|
|
4c213c96ee | ||
|
|
ff38b74548 | ||
|
|
c8a030a3ba | ||
|
|
d8a8330427 | ||
|
|
1ef0557ccb | ||
|
|
6c7ce5aad0 | ||
|
|
54754e2279 | ||
|
|
8787a2dbb8 | ||
|
|
7971092509 | ||
|
|
d349af9444 | ||
|
|
be83cb3fe7 | ||
|
|
e9216b9abc | ||
|
|
d93785c230 | ||
|
|
98ee9d7cea | ||
|
|
04c66bdf9c | ||
|
|
8a5fadb5df | ||
|
|
893cb96e8d | ||
|
|
c30d7fac22 | ||
|
|
6d34e543fe | ||
|
|
4d74eda65f | ||
|
|
96cb27ef83 | ||
|
|
85b211e532 | ||
|
|
e16f61aca7 | ||
|
|
dba4ad168c | ||
|
|
e78d252cf3 | ||
|
|
ab9c650d93 |
@@ -1,19 +1,44 @@
|
|||||||
|
docker-compose.override.yml
|
||||||
|
|
||||||
|
# Python cache / compiled
|
||||||
__pycache__
|
__pycache__
|
||||||
*.pyc
|
*.pyc
|
||||||
*.pyo
|
*.pyo
|
||||||
*.pyd
|
*.pyd
|
||||||
.Python
|
.Python
|
||||||
|
|
||||||
|
# Build artifacts
|
||||||
*.so
|
*.so
|
||||||
*.egg
|
*.egg
|
||||||
*.egg-info
|
*.egg-info
|
||||||
dist
|
dist
|
||||||
build
|
build
|
||||||
|
|
||||||
|
# VCS
|
||||||
.git
|
.git
|
||||||
.gitignore
|
.gitignore
|
||||||
|
|
||||||
|
# Databases (must live in volumes)
|
||||||
*.db
|
*.db
|
||||||
*.db-journal
|
*.db-journal
|
||||||
|
|
||||||
|
# Environment / virtualenv
|
||||||
.env
|
.env
|
||||||
.venv
|
.venv
|
||||||
venv/
|
venv/
|
||||||
ENV/
|
ENV/
|
||||||
|
|
||||||
|
# Runtime data (mounted volumes)
|
||||||
data/
|
data/
|
||||||
|
data-dev/
|
||||||
|
|
||||||
|
# Editors / OS junk
|
||||||
|
.vscode/
|
||||||
|
.idea/
|
||||||
|
.DS_Store
|
||||||
|
Thumbs.db
|
||||||
|
.claude
|
||||||
|
sfm.code-workspace
|
||||||
|
|
||||||
|
# Tests (optional)
|
||||||
|
tests/
|
||||||
|
|||||||
22
.gitignore
vendored
@@ -1,3 +1,17 @@
|
|||||||
|
# Terra-View Specifics
|
||||||
|
# Dev build counter (local only, never commit)
|
||||||
|
build_number.txt
|
||||||
|
docker-compose.override.yml
|
||||||
|
|
||||||
|
# SQLite database files
|
||||||
|
*.db
|
||||||
|
*.db-journal
|
||||||
|
data/
|
||||||
|
data-dev/
|
||||||
|
.aider*
|
||||||
|
.aider*
|
||||||
|
|
||||||
|
|
||||||
# Byte-compiled / optimized / DLL files
|
# Byte-compiled / optimized / DLL files
|
||||||
__pycache__/
|
__pycache__/
|
||||||
*.py[codz]
|
*.py[codz]
|
||||||
@@ -206,8 +220,14 @@ marimo/_static/
|
|||||||
marimo/_lsp/
|
marimo/_lsp/
|
||||||
__marimo__/
|
__marimo__/
|
||||||
|
|
||||||
|
<<<<<<< HEAD
|
||||||
# Seismo Fleet Manager
|
# Seismo Fleet Manager
|
||||||
# SQLite database files
|
# SQLite database files
|
||||||
*.db
|
*.db
|
||||||
*.db-journal
|
*.db-journal
|
||||||
data/
|
/data/
|
||||||
|
/data-dev/
|
||||||
|
.aider*
|
||||||
|
.aider*
|
||||||
|
=======
|
||||||
|
>>>>>>> 0c2186f5d89d948b0357d674c0773a67a67d8027
|
||||||
|
|||||||
331
CHANGELOG.md
@@ -1,10 +1,331 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
All notable changes to Seismo Fleet Manager will be documented in this file.
|
All notable changes to Terra-View will be documented in this file.
|
||||||
|
|
||||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||||
|
|
||||||
|
## [0.9.2] - 2026-03-27
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **Deployment Records**: Seismographs now track a full deployment history (location, project, dates). Each deployment is logged on the unit detail page with start/end dates, and the fleet calendar service uses this history for availability calculations.
|
||||||
|
- **Allocated Unit Status**: New `allocated` status for units reserved for an upcoming job but not yet deployed. Allocated units appear in the dashboard summary, roster filters, and devices table with visual indicators.
|
||||||
|
- **Project Allocation**: Units can be linked to a project via `allocated_to_project_id`. Allocation is shown on the unit detail page and in a new quick-info modal accessible from the fleet calendar and roster.
|
||||||
|
- **Quick-Info Unit Modal**: Click any unit in the fleet calendar or roster to open a modal showing cal status, project allocation, upcoming jobs, and deployment state — without leaving the page.
|
||||||
|
- **Cal Date in Planner**: When a unit is selected for a monitoring location slot in the Job Planner, its calibration expiry date is now shown inline so you can spot near-expiry units before committing.
|
||||||
|
- **Inline Seismograph Editing**: Unit rows in the seismograph dashboard now support inline editing of cal date, notes, and deployment status without navigating to the full detail page.
|
||||||
|
|
||||||
|
### Migration Notes
|
||||||
|
Run on each database before deploying:
|
||||||
|
```bash
|
||||||
|
docker compose exec terra-view python3 backend/migrate_add_allocated.py
|
||||||
|
docker compose exec terra-view python3 backend/migrate_add_deployment_records.py
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [0.9.1] - 2026-03-20
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- **Location slots not persisting**: Empty monitoring location slots (no unit assigned yet) were lost on save/reload. Added `location_slots` JSON column to `job_reservations` to store the full slot list including empty slots.
|
||||||
|
- **Modems in Recent Alerts**: Modems no longer appear in the dashboard Recent Alerts panel — alerts are for seismographs and SLMs only. Modem status is still tracked internally via paired device inheritance.
|
||||||
|
- **Series 4 heartbeat `source_id`**: Updated heartbeat endpoint to accept the new `source_id` field from Series 4 units with fallback to the legacy field for backwards compatibility.
|
||||||
|
|
||||||
|
### Migration Notes
|
||||||
|
Run on each database before deploying:
|
||||||
|
```bash
|
||||||
|
docker compose exec terra-view python3 backend/migrate_add_location_slots.py
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [0.9.0] - 2026-03-19
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **Job Planner**: Full redesign of the Fleet Calendar into a two-tab Job Planner / Calendar interface
|
||||||
|
- **Planner tab**: Create and manage job reservations with name, device type, dates, color, estimated units, and monitoring locations
|
||||||
|
- **Calendar tab**: 12-month rolling heatmap with colored job bars per day; confirmed jobs solid, planned jobs dashed
|
||||||
|
- **Monitoring Locations**: Each job has named location slots (filled = unit assigned, empty = needs a unit); progress shown as `2/5` with colored squares that fill as units are assigned
|
||||||
|
- **Estimated Units**: Separate planning number independent of actual location count; shown prominently on job cards
|
||||||
|
- **Fleet Summary panel**: Unit counts as clickable filter buttons; unit list shows reservation badges with job name, dates, and color
|
||||||
|
- **Available Units panel**: Shows units available for the job's date range when assigning
|
||||||
|
- **Smart color picker**: 18-swatch palette + custom color wheel; new jobs auto-pick a color maximally distant in hue from existing jobs
|
||||||
|
- **Job card progress**: `est. N · X/Y (Z more)` with filled/empty squares; amber → green when fully assigned
|
||||||
|
- **Promote to Project**: Promote a planned job to a tracked project directly from the planner form
|
||||||
|
- **Collapsible job details**: Name, dates, device type, color, project link, and estimated units collapse into a summary header
|
||||||
|
- **Calendar bar tooltips**: Hover any job bar to see job name and date range
|
||||||
|
- **Hash-based tab persistence**: `#cal` in URL restores Calendar tab on refresh; device type toggle preserves active tab
|
||||||
|
- **Auto-scroll to today**: Switching to Calendar tab smooth-scrolls to the current month
|
||||||
|
- **Upcoming project status**: New `upcoming` status for projects promoted from reservations
|
||||||
|
- **Job device type**: Reservations carry a device type so they only appear on the correct calendar
|
||||||
|
- **Project filtering by device type**: Projects only appear on the calendar matching their type (vibration → seismograph, sound → SLM, combined → both)
|
||||||
|
- **Confirmed/Planned toggles**: Independent show/hide toggles for job bar layers on the calendar
|
||||||
|
- **Cal expire dots toggle**: Calibration expiry dots off by default, togglable
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- **Renamed**: "Fleet Calendar" / "Reservation Planner" → **"Job Planner"** throughout UI and sidebar
|
||||||
|
- **Project status dropdown**: Inline `<select>` in project header for quick status changes
|
||||||
|
- **"All Projects" tab**: Shows everything except deleted; default view excludes archived/completed
|
||||||
|
- **Toast notifications**: All `alert()` dialogs replaced with non-blocking toasts (green = success, red = error)
|
||||||
|
|
||||||
|
### Migration Notes
|
||||||
|
Run on each database before deploying:
|
||||||
|
```bash
|
||||||
|
docker compose exec terra-view python3 -c "
|
||||||
|
import sqlite3
|
||||||
|
conn = sqlite3.connect('/app/data/seismo_fleet.db')
|
||||||
|
conn.execute('ALTER TABLE job_reservations ADD COLUMN estimated_units INTEGER')
|
||||||
|
conn.commit()
|
||||||
|
conn.close()
|
||||||
|
"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [0.8.0] - 2026-03-18
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **Watcher Manager**: New admin page (`/admin/watchers`) for monitoring field watcher agents
|
||||||
|
- Live status cards per agent showing connectivity, version, IP, last-seen age, and log tail
|
||||||
|
- Trigger Update button to queue a self-update on the agent's next heartbeat
|
||||||
|
- Expand/collapse log tail with full-log expand mode
|
||||||
|
- Live surgical refresh every 30 seconds via `/api/admin/watchers` — no full page reload, open logs stay open
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- **Watcher status logic**: Agent status now reflects whether Terra-View is hearing from the watcher (ok if seen within 60 minutes, missing otherwise) — previously reflected the worst unit status from the last heartbeat payload, which caused false alarms when units went missing
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- **Watcher Manager meta row**: Dark mode background was white due to invalid `dark:bg-slate-850` Tailwind class; corrected to `dark:bg-slate-800`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [0.7.1] - 2026-03-12
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **"Out for Calibration" Unit Status**: New `out_for_cal` status for units currently away for calibration, with visual indicators in the roster, unit list, and seismograph stats panel
|
||||||
|
- **Reservation Modal**: Fleet calendar reservation modal is now fully functional for creating and managing device reservations
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- **Retire Unit Button**: Redesigned to be more visually prominent/destructive to reduce accidental clicks
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- **Migration Scripts**: Fixed database path references in several migration scripts
|
||||||
|
- **Docker Compose**: Removed dev override file from the repository; dev environment config kept separate
|
||||||
|
|
||||||
|
### Migration Notes
|
||||||
|
Run the following migration script once per database before deploying:
|
||||||
|
```bash
|
||||||
|
python backend/migrate_add_out_for_calibration.py
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [0.7.0] - 2026-03-07
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **Project Status Management**: Projects can now be placed `on_hold` or `archived`, with automatic cancellation of pending scheduled actions
|
||||||
|
- **Hard Delete Projects**: Support for permanently deleting projects, in addition to soft-delete with auto-pruning
|
||||||
|
- **Vibration Location Detail**: New dedicated template for vibration project location detail views
|
||||||
|
- **Vibration Project Isolation**: Vibration projects no longer show SLM-specific project tabs
|
||||||
|
- **Manual SD Card Data Upload**: Upload offline NRL data directly from SD card via ZIP or multi-file select
|
||||||
|
- Accepts `.rnd`/`.rnh` files; parses `.rnh` metadata for session start/stop times, serial number, and store name
|
||||||
|
- Creates `MonitoringSession` and `DataFile` records automatically; no unit assignment required
|
||||||
|
- Upload panel on NRL detail Data Files tab with inline feedback and auto-refresh via HTMX
|
||||||
|
- **Standalone SLM Type**: New SLM device mode that operates without a modem (direct IP connection)
|
||||||
|
- **NL32 Data Support**: Report generator and web viewer now support NL32 measurement data format
|
||||||
|
- **Combined Report Wizard**: Multi-session combined Excel report generation tool
|
||||||
|
- Wizard UI grouped by location with period type badges (day/night)
|
||||||
|
- Each selected session produces one `.xlsx` in a ZIP archive
|
||||||
|
- Period type filtering: day sessions keep last calendar date (7AM–6:59PM); night sessions span both days (7PM–6:59AM)
|
||||||
|
- **Combined Report Preview**: Interactive spreadsheet-style preview before generating combined reports
|
||||||
|
- **Chart Preview**: Live chart preview in the report generator matching final report styling
|
||||||
|
- **SLM Model Schemas**: Per-model configuration schemas for NL32, NL43, NL53 devices
|
||||||
|
- **Data Collection Mode**: Projects now store a data collection mode field with UI controls and migration
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- **MonitoringSession rename**: `RecordingSession` renamed to `MonitoringSession` throughout codebase; DB table renamed from `recording_sessions` to `monitoring_sessions`
|
||||||
|
- Migration: `backend/migrate_rename_recording_to_monitoring_sessions.py`
|
||||||
|
- **Combined Report Split Logic**: Separate days now generate separate `.xlsx` files; NRLs remain one per sheet
|
||||||
|
- **Mass Upload Parsing**: Smarter file filtering — no longer imports unneeded Lp files or `.xlsx` files
|
||||||
|
- **SLM Start Time Grace Period**: 15-minute grace window added so data starting at session start time is included
|
||||||
|
- **NL32 Date Parsing**: Date now read from `start_time` field instead of file metadata
|
||||||
|
- **Project Data Labels**: Improved Jinja filters and UI label clarity for project data views
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- **Dev/Prod Separation**: Dev server now uses Docker Compose override; production deployment no longer affected by dev config
|
||||||
|
- **SLM Modal**: Bench/deploy toggle now correctly shown in SLM unit modal
|
||||||
|
- **Auto-Downloaded Files**: Files downloaded by scheduler now appear in project file listings
|
||||||
|
- **Duplicate Download**: Removed duplicate file download that occurred following a scheduled stop
|
||||||
|
- **SLMM Environment Variables**: `TCP_IDLE_TTL` and `TCP_MAX_AGE` now correctly passed to SLMM service via docker-compose
|
||||||
|
|
||||||
|
### Technical Details
|
||||||
|
- `session_label` and `period_type` stored on `monitoring_sessions` table (migration: `migrate_add_session_period_type.py`)
|
||||||
|
- `device_model` stored on `monitoring_sessions` table (migration: `migrate_add_session_device_model.py`)
|
||||||
|
- Upload endpoint: `POST /api/projects/{project_id}/nrl/{location_id}/upload-data`
|
||||||
|
- ZIP filename format: `{session_label}_{project_name}_report.xlsx` (label first)
|
||||||
|
|
||||||
|
### Migration Notes
|
||||||
|
Run the following migration scripts once per database before deploying:
|
||||||
|
```bash
|
||||||
|
python backend/migrate_rename_recording_to_monitoring_sessions.py
|
||||||
|
python backend/migrate_add_session_period_type.py
|
||||||
|
python backend/migrate_add_session_device_model.py
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## [0.6.1] - 2026-02-16
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **One-Off Recording Schedules**: Support for scheduling single recordings with specific start and end datetimes
|
||||||
|
- **Bidirectional Pairing Sync**: Pairing a device with a modem now automatically updates both sides, clearing stale pairings when reassigned
|
||||||
|
- **Auto-Fill Notes from Modem**: Notes are now copied from modem to paired device when fields are empty
|
||||||
|
- **SLMM Download Requests**: New `_download_request` method in SLMM client for binary file downloads with local save
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- **Scheduler Timezone**: One-off scheduler times now use local time instead of UTC
|
||||||
|
- **Pairing Consistency**: Old device references are properly cleared when a modem is re-paired to a new device
|
||||||
|
|
||||||
|
## [0.6.0] - 2026-02-06
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **Calendar & Reservation Mode**: Fleet calendar view with reservation system for scheduling device deployments
|
||||||
|
- **Device Pairing Interface**: New two-column pairing page (`/pair-devices`) for linking recorders (seismographs/SLMs) with modems
|
||||||
|
- Visual pairing interface with drag-and-drop style interactions
|
||||||
|
- Fuzzy-search modem pairing for SLMs
|
||||||
|
- Pairing options now accessible from modem page
|
||||||
|
- Improved pair status sharing across views
|
||||||
|
- **Modem Dashboard Enhancements**:
|
||||||
|
- Modem model number now a dedicated configuration field with per-model options
|
||||||
|
- Direct link to modem login page from unit detail view
|
||||||
|
- Modem view converted to list format
|
||||||
|
- **Seismograph List Improvements**:
|
||||||
|
- Enhanced visibility with better filtering and sorting
|
||||||
|
- Calibration dates now color-coded for quick status assessment
|
||||||
|
- User sets date of previous calibration (not expiry) for clearer workflow
|
||||||
|
- **SLMM Device Control Lock**: Prevents command flooding to NL-43 devices
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- **Calibration Date UX**: Users now set the date of the previous calibration rather than upcoming expiry dates - more intuitive workflow
|
||||||
|
- **Settings Persistence**: Settings save no longer reloads the page
|
||||||
|
- **Tab State**: Tab state now persists in URL hash for better navigation
|
||||||
|
- **Scheduler Management**: Schedule changes now cascade to individual events
|
||||||
|
- **Dashboard Filtering**: Enhanced dashboard with additional filtering options and SLM status sync
|
||||||
|
- **SLMM Polling Intervals**: Fixed and improved polling intervals for better responsiveness
|
||||||
|
- **24-Hour Scheduler Cycle**: Improved cycle handling to prevent issues with scheduled downloads
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- **SLM Modal Fields**: Modal now only contains correct device-specific fields
|
||||||
|
- **IP Address Handling**: IP address correctly passed via modem pairing
|
||||||
|
- **Mobile Type Display**: Fixed incorrect device type display in roster and device tables
|
||||||
|
- **SLMM Scheduled Downloads**: Fixed issues with scheduled download operations
|
||||||
|
|
||||||
|
## [0.5.1] - 2026-01-27
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **Dashboard Schedule View**: Today's scheduled actions now display directly on the main dashboard
|
||||||
|
- New "Today's Actions" panel showing upcoming and past scheduled events
|
||||||
|
- Schedule list partial for project-specific schedule views
|
||||||
|
- API endpoint for fetching today's schedule data
|
||||||
|
- **New Branding Assets**: Complete logo rework for Terra-View
|
||||||
|
- New Terra-View logos for light and dark themes
|
||||||
|
- Retina-ready (@2x) logo variants
|
||||||
|
- Updated favicons (16px and 32px)
|
||||||
|
- Refreshed PWA icons (72px through 512px)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- **Dashboard Layout**: Reorganized to include schedule information panel
|
||||||
|
- **Base Template**: Updated to use new Terra-View logos with theme-aware switching
|
||||||
|
|
||||||
|
## [0.5.0] - 2026-01-23
|
||||||
|
|
||||||
|
_Note: This version was not formally released; changes were included in v0.5.1._
|
||||||
|
|
||||||
|
## [0.4.4] - 2026-01-23
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **Recurring schedules**: New scheduler service, recurring schedule APIs, and schedule templates (calendar/interval/list).
|
||||||
|
- **Alerts UI + backend**: Alerting service plus dropdown/list templates for surfacing notifications.
|
||||||
|
- **Report templates + viewers**: CRUD API for report templates, report preview screen, and RND file viewer.
|
||||||
|
- **SLM tooling**: SLM settings modal and SLM project report generator workflow.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- **Project data management**: Unified files view, refreshed FTP browser, and new project header/templates for file/session/unit/assignment lists.
|
||||||
|
- **Device/SLM sync**: Standardized SLM device types and tightened SLMM sync paths.
|
||||||
|
- **Docs/scripts**: Cleanup pass and expanded device-type documentation.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- **Scheduler actions**: Strict command definitions so actions run reliably.
|
||||||
|
- **Project view title**: Resolved JSON string rendering in project headers.
|
||||||
|
|
||||||
|
## [0.4.3] - 2026-01-14
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **Sound Level Meter roster tooling**: Roster manager surfaces SLM metadata, supports rename unit flows, and adds return-to-project navigation to keep SLM dashboard users oriented.
|
||||||
|
- **Project management templates**: New schedule and unit list templates plus file/session lists show what each project stores before teams dive into deployments.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- **Project view refresh**: FTP browser now downloads folders locally, the countdown timer was rebuilt, and project/device templates gained edit modals for projects and locations so navigation feels smoother.
|
||||||
|
- **SLM control sync & accuracy**: Control center groundwork now runs inside the dev UI, configuration edits propagate to SLMM (which caches configs for faster responses), and the SLM live view reads the correct DRD fields after the refactor.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- **SLM UI syntax bug**: Resolved the unexpected token error that appeared in the refreshed SLM components.
|
||||||
|
|
||||||
|
## [0.4.2] - 2026-01-05
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- **SLM Configuration Interface**: Sound Level Meters can now be configured directly from the SLM dashboard
|
||||||
|
- Configuration modal with comprehensive SLM parameter editing
|
||||||
|
- TCP port configuration for SLM control connections (default: 2255)
|
||||||
|
- FTP port configuration for SLM data retrieval (default: 21)
|
||||||
|
- Modem assignment for network access or direct IP connection support
|
||||||
|
- Test Modem button with ping-based connectivity verification (shows IP and response time)
|
||||||
|
- Test SLM Connection button for end-to-end connectivity validation
|
||||||
|
- Dynamic form fields that hide/show based on modem selection
|
||||||
|
- **SLM Dashboard Endpoints**: New API routes for SLM management
|
||||||
|
- `GET /api/slm-dashboard/config/{unit_id}` - Load SLM configuration form
|
||||||
|
- `POST /api/slm-dashboard/config/{unit_id}` - Save SLM configuration
|
||||||
|
- `GET /api/slm-dashboard/test-modem/{modem_id}` - Ping modem for connectivity test
|
||||||
|
- **Database Schema Updates**: Added `slm_ftp_port` column to roster table
|
||||||
|
- Migration script: `scripts/add_slm_ftp_port.py`
|
||||||
|
- Supports both TCP (control) and FTP (data) port configuration per SLM unit
|
||||||
|
- **Docker Environment Enhancements**:
|
||||||
|
- Added `iputils-ping` and `curl` packages to Docker image for network diagnostics
|
||||||
|
- Health check endpoint support via curl
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- **Form Validation**: Fixed 400 Bad Request error when adding modem units
|
||||||
|
- Form fields for device-specific parameters now properly disabled when hidden
|
||||||
|
- Empty string values for integer fields no longer cause validation failures
|
||||||
|
- JavaScript now disables hidden form sections to prevent unwanted data submission
|
||||||
|
- **Unit Status Accuracy**: Fixed issue where unit status was loading from a saved cache instead of actual last-heard time
|
||||||
|
- Unit status now accurately reflects real-time connectivity
|
||||||
|
- Status determination based on actual `slm_last_check` timestamp
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- **Roster Form Behavior**: Device-specific form fields are now disabled (not just hidden) when not applicable
|
||||||
|
- Prevents SLM fields from submitting when adding modems
|
||||||
|
- Prevents modem fields from submitting when adding SLMs
|
||||||
|
- Cleaner form submissions with only relevant data
|
||||||
|
- **Port Field Handling**: Backend now accepts port fields as strings and converts to integers
|
||||||
|
- Handles empty string values gracefully
|
||||||
|
- Proper type conversion with None fallback for empty values
|
||||||
|
|
||||||
|
### Technical Details
|
||||||
|
- Added `setFieldsDisabled()` helper function for managing form field state
|
||||||
|
- Updated `toggleDeviceFields()` and `toggleEditDeviceFields()` to disable/enable fields
|
||||||
|
- Backend type conversion: `slm_tcp_port` and `slm_ftp_port` accept strings, convert to int with empty string handling
|
||||||
|
- Modem ping uses subprocess with 1 packet, 2-second timeout, returns response time in milliseconds
|
||||||
|
- Configuration form uses 3-column grid layout for TCP Port, FTP Port, and Direct IP fields
|
||||||
|
|
||||||
|
## [0.4.1] - 2026-01-05
|
||||||
|
### Added
|
||||||
|
- **SLM Integration**: Sound Level Meters are now manageable in SFM
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Fixed an issue where unit status was loading from a saved cache and not based on when it was actually heard from last. Unit status is now accurate.
|
||||||
|
|
||||||
|
|
||||||
## [0.4.0] - 2025-12-16
|
## [0.4.0] - 2025-12-16
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
@@ -293,6 +614,14 @@ No database migration required for v0.4.0. All new features use existing databas
|
|||||||
- Photo management per unit
|
- Photo management per unit
|
||||||
- Automated status categorization (OK/Pending/Missing)
|
- Automated status categorization (OK/Pending/Missing)
|
||||||
|
|
||||||
|
[0.7.0]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.6.1...v0.7.0
|
||||||
|
[0.6.0]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.5.1...v0.6.0
|
||||||
|
[0.5.1]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.5.0...v0.5.1
|
||||||
|
[0.5.0]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.4.4...v0.5.0
|
||||||
|
[0.4.4]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.4.3...v0.4.4
|
||||||
|
[0.4.3]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.4.2...v0.4.3
|
||||||
|
[0.4.2]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.4.1...v0.4.2
|
||||||
|
[0.4.1]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.4.0...v0.4.1
|
||||||
[0.4.0]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.3.3...v0.4.0
|
[0.4.0]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.3.3...v0.4.0
|
||||||
[0.3.3]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.3.2...v0.3.3
|
[0.3.3]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.3.2...v0.3.3
|
||||||
[0.3.2]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.3.1...v0.3.2
|
[0.3.2]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.3.1...v0.3.2
|
||||||
|
|||||||
@@ -1,8 +1,17 @@
|
|||||||
FROM python:3.11-slim
|
FROM python:3.11-slim
|
||||||
|
|
||||||
|
# Build number for dev builds (injected via --build-arg)
|
||||||
|
ARG BUILD_NUMBER=0
|
||||||
|
ENV BUILD_NUMBER=${BUILD_NUMBER}
|
||||||
|
|
||||||
# Set working directory
|
# Set working directory
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Install system dependencies (ping for network diagnostics)
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends iputils-ping curl && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Copy requirements first for better caching
|
# Copy requirements first for better caching
|
||||||
COPY requirements.txt .
|
COPY requirements.txt .
|
||||||
|
|
||||||
|
|||||||
95
README.md
@@ -1,4 +1,4 @@
|
|||||||
# Seismo Fleet Manager v0.4.0
|
# Terra-View v0.9.2
|
||||||
Backend API and HTMX-powered web interface for managing a mixed fleet of seismographs and field modems. Track deployments, monitor health in real time, merge roster intent with incoming telemetry, and control your fleet through a unified database and dashboard.
|
Backend API and HTMX-powered web interface for managing a mixed fleet of seismographs and field modems. Track deployments, monitor health in real time, merge roster intent with incoming telemetry, and control your fleet through a unified database and dashboard.
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
@@ -308,7 +308,7 @@ print(response.json())
|
|||||||
|-------|------|-------------|
|
|-------|------|-------------|
|
||||||
| id | string | Unit identifier (primary key) |
|
| id | string | Unit identifier (primary key) |
|
||||||
| unit_type | string | Hardware model name (default: `series3`) |
|
| unit_type | string | Hardware model name (default: `series3`) |
|
||||||
| device_type | string | `seismograph` or `modem` discriminator |
|
| device_type | string | Device type: `"seismograph"`, `"modem"`, or `"slm"` (sound level meter) |
|
||||||
| deployed | boolean | Whether the unit is in the field |
|
| deployed | boolean | Whether the unit is in the field |
|
||||||
| retired | boolean | Removes the unit from deployments but preserves history |
|
| retired | boolean | Removes the unit from deployments but preserves history |
|
||||||
| note | string | Notes about the unit |
|
| note | string | Notes about the unit |
|
||||||
@@ -334,6 +334,39 @@ print(response.json())
|
|||||||
| phone_number | string | Cellular number for the modem |
|
| phone_number | string | Cellular number for the modem |
|
||||||
| hardware_model | string | Modem hardware reference |
|
| hardware_model | string | Modem hardware reference |
|
||||||
|
|
||||||
|
**Sound Level Meter (SLM) fields**
|
||||||
|
|
||||||
|
| Field | Type | Description |
|
||||||
|
|-------|------|-------------|
|
||||||
|
| slm_host | string | Direct IP address for SLM (if not using modem) |
|
||||||
|
| slm_tcp_port | integer | TCP control port (default: 2255) |
|
||||||
|
| slm_ftp_port | integer | FTP file transfer port (default: 21) |
|
||||||
|
| slm_model | string | Device model (NL-43, NL-53) |
|
||||||
|
| slm_serial_number | string | Manufacturer serial number |
|
||||||
|
| slm_frequency_weighting | string | Frequency weighting setting (A, C, Z) |
|
||||||
|
| slm_time_weighting | string | Time weighting setting (F=Fast, S=Slow) |
|
||||||
|
| slm_measurement_range | string | Measurement range setting |
|
||||||
|
| slm_last_check | datetime | Last status check timestamp |
|
||||||
|
| deployed_with_modem_id | string | Modem pairing (shared with seismographs) |
|
||||||
|
|
||||||
|
### Device Type Schema
|
||||||
|
|
||||||
|
Terra-View supports three device types with the following standardized `device_type` values:
|
||||||
|
|
||||||
|
- **`"seismograph"`** (default) - Seismic monitoring devices (Series 3, Series 4, Micromate)
|
||||||
|
- Uses: calibration dates, modem pairing
|
||||||
|
- Examples: BE1234, UM12345 (Series 3/4 units)
|
||||||
|
|
||||||
|
- **`"modem"`** - Field modems and network equipment
|
||||||
|
- Uses: IP address, phone number, hardware model
|
||||||
|
- Examples: MDM001, MODEM-2025-01
|
||||||
|
|
||||||
|
- **`"slm"`** - Sound level meters (Rion NL-43/NL-53)
|
||||||
|
- Uses: TCP/FTP configuration, measurement settings, modem pairing
|
||||||
|
- Examples: SLM-43-01, NL43-001
|
||||||
|
|
||||||
|
**Important**: All `device_type` values must be lowercase. The legacy value `"sound_level_meter"` has been deprecated in favor of the shorter `"slm"`. Run `backend/migrate_standardize_device_types.py` to update existing databases.
|
||||||
|
|
||||||
### Emitter Table (Device Check-ins)
|
### Emitter Table (Device Check-ins)
|
||||||
|
|
||||||
| Field | Type | Description |
|
| Field | Type | Description |
|
||||||
@@ -463,6 +496,40 @@ docker compose down -v
|
|||||||
|
|
||||||
## Release Highlights
|
## Release Highlights
|
||||||
|
|
||||||
|
### v0.8.0 — 2026-03-18
|
||||||
|
- **Watcher Manager**: Admin page for monitoring field watcher agents with live status cards, log tails, and one-click update triggering
|
||||||
|
- **Watcher Status Fix**: Agent status now reflects heartbeat connectivity (missing if not heard from in >60 min) rather than unit-level data staleness
|
||||||
|
- **Live Refresh**: Watcher Manager surgically patches status, last-seen, and pending indicators every 30s without a full page reload
|
||||||
|
|
||||||
|
### v0.7.0 — 2026-03-07
|
||||||
|
- **Project Status Management**: On-hold and archived project states with automatic cancellation of pending actions
|
||||||
|
- **Manual SD Card Upload**: Upload offline NRL/SLM data directly from SD card (ZIP or multi-file); auto-creates monitoring sessions from `.rnh` metadata
|
||||||
|
- **Combined Report Wizard**: Multi-session Excel report generation with location grouping, period type filtering, and ZIP download
|
||||||
|
- **NL32 Support**: Report generator and web viewer now handle NL32 measurement data
|
||||||
|
- **Chart Preview**: Live chart preview in the report generator matching final output styling
|
||||||
|
- **Standalone SLM Mode**: SLMs can now be configured without a paired modem (direct IP)
|
||||||
|
- **Vibration Project Isolation**: Vibration project views no longer show SLM-specific tabs
|
||||||
|
- **MonitoringSession Rename**: `RecordingSession` renamed to `MonitoringSession` throughout; run migration before deploying
|
||||||
|
|
||||||
|
### v0.6.1 — 2026-02-16
|
||||||
|
- **One-Off Recording Schedules**: Schedule single recordings with specific start/end datetimes
|
||||||
|
- **Bidirectional Pairing Sync**: Device-modem pairing now updates both sides automatically
|
||||||
|
- **Scheduler Timezone Fix**: One-off schedule times use local time instead of UTC
|
||||||
|
|
||||||
|
### v0.6.0 — 2026-02-06
|
||||||
|
- **Calendar & Reservation Mode**: Fleet calendar view with device deployment scheduling and reservation system
|
||||||
|
- **Device Pairing Interface**: New `/pair-devices` page with two-column layout for linking recorders with modems, fuzzy-search, and visual pairing workflow
|
||||||
|
- **Calibration UX Overhaul**: Users now set date of previous calibration (not expiry); seismograph list enhanced with color-coded calibration status, filtering, and sorting
|
||||||
|
- **Modem Dashboard**: Model number as dedicated config, modem login links, list view format, and pairing options accessible from modem page
|
||||||
|
- **SLMM Improvements**: Device control lock prevents command flooding, fixed polling intervals and scheduled downloads
|
||||||
|
- **UI Polish**: Tab state persists in URL hash, settings save without reload, scheduler changes cascade to events, fixed mobile type display
|
||||||
|
|
||||||
|
### v0.4.3 — 2026-01-14
|
||||||
|
- **Sound Level Meter workflow**: Roster manager surfaces SLM metadata, supports rename actions, and adds return-to-project navigation plus schedule/unit templates for project planning.
|
||||||
|
- **Project insight panels**: Project dashboards now expose file and session lists so teams can see what each project stores before diving into units.
|
||||||
|
- **Project view polish**: FTP browser supports folder downloads, the timer display was reimplemented, and the project/device templates gained edit modals for projects and locations to streamline navigation.
|
||||||
|
- **SLM sync & accuracy**: Configuration edits now propagate to SLMM (which caches configs for faster responses) and the live view uses the correct DRD fields so telemetry aligns with the control center.
|
||||||
|
|
||||||
### v0.4.0 — 2025-12-16
|
### v0.4.0 — 2025-12-16
|
||||||
- **Database Management System**: Complete backup and restore functionality with manual snapshots, restore operations, and upload/download capabilities
|
- **Database Management System**: Complete backup and restore functionality with manual snapshots, restore operations, and upload/download capabilities
|
||||||
- **Remote Database Cloning**: New `clone_db_to_dev.py` script for copying production database to remote dev servers over WAN
|
- **Remote Database Cloning**: New `clone_db_to_dev.py` script for copying production database to remote dev servers over WAN
|
||||||
@@ -532,9 +599,29 @@ MIT
|
|||||||
|
|
||||||
## Version
|
## Version
|
||||||
|
|
||||||
**Current: 0.4.0** — Database management system with backup/restore and remote cloning (2025-12-16)
|
**Current: 0.8.0** — Watcher Manager admin page, live agent status refresh, watcher connectivity-based status (2026-03-18)
|
||||||
|
|
||||||
Previous: 0.3.3 — Mobile navigation improvements and better status visibility (2025-12-12)
|
Previous: 0.7.1 — Out-for-calibration status, reservation modal, migration fixes (2026-03-12)
|
||||||
|
|
||||||
|
0.7.0 — Project status management, manual SD card upload, combined report wizard, NL32 support, MonitoringSession rename (2026-03-07)
|
||||||
|
|
||||||
|
0.6.1 — One-off recording schedules, bidirectional pairing sync, scheduler timezone fix (2026-02-16)
|
||||||
|
|
||||||
|
0.6.0 — Calendar & reservation mode, device pairing interface, calibration UX overhaul, modem dashboard enhancements (2026-02-06)
|
||||||
|
|
||||||
|
0.5.1 — Dashboard schedule view with today's actions panel, new Terra-View branding and logo rework (2026-01-27)
|
||||||
|
|
||||||
|
0.4.4 — Recurring schedules, alerting UI, report templates + RND viewer, and SLM workflow polish (2026-01-23)
|
||||||
|
|
||||||
|
0.4.3 — SLM roster/project view refresh, project insight panels, FTP browser folder downloads, and SLMM sync (2026-01-14)
|
||||||
|
|
||||||
|
0.4.2 — SLM configuration interface with TCP/FTP controls, modem diagnostics, and dashboard endpoints for Sound Level Meters (2026-01-05)
|
||||||
|
|
||||||
|
0.4.1 — Sound Level Meter integration with full management UI for SLM units (2026-01-05)
|
||||||
|
|
||||||
|
0.4.0 — Database management system with backup/restore and remote cloning (2025-12-16)
|
||||||
|
|
||||||
|
0.3.3 — Mobile navigation improvements and better status visibility (2025-12-12)
|
||||||
|
|
||||||
0.3.2 — Progressive Web App with mobile optimization (2025-12-12)
|
0.3.2 — Progressive Web App with mobile optimization (2025-12-12)
|
||||||
|
|
||||||
|
|||||||
BIN
assets/terra-view-icon_large.png
Normal file
|
After Width: | Height: | Size: 36 KiB |
108
backend/init_projects_db.py
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Database initialization script for Projects system.
|
||||||
|
|
||||||
|
This script creates the new project management tables and populates
|
||||||
|
the project_types table with default templates.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python -m backend.init_projects_db
|
||||||
|
"""
|
||||||
|
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
from backend.database import engine, SessionLocal
|
||||||
|
from backend.models import (
|
||||||
|
Base,
|
||||||
|
ProjectType,
|
||||||
|
Project,
|
||||||
|
MonitoringLocation,
|
||||||
|
UnitAssignment,
|
||||||
|
ScheduledAction,
|
||||||
|
MonitoringSession,
|
||||||
|
DataFile,
|
||||||
|
)
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
|
||||||
|
def init_project_types(db: Session):
|
||||||
|
"""Initialize default project types."""
|
||||||
|
project_types = [
|
||||||
|
{
|
||||||
|
"id": "sound_monitoring",
|
||||||
|
"name": "Sound Monitoring",
|
||||||
|
"description": "Noise monitoring projects with sound level meters and NRLs (Noise Recording Locations)",
|
||||||
|
"icon": "volume-2", # Lucide icon name
|
||||||
|
"supports_sound": True,
|
||||||
|
"supports_vibration": False,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "vibration_monitoring",
|
||||||
|
"name": "Vibration Monitoring",
|
||||||
|
"description": "Seismic/vibration monitoring projects with seismographs and monitoring points",
|
||||||
|
"icon": "activity", # Lucide icon name
|
||||||
|
"supports_sound": False,
|
||||||
|
"supports_vibration": True,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "combined",
|
||||||
|
"name": "Combined Monitoring",
|
||||||
|
"description": "Full-spectrum monitoring with both sound and vibration capabilities",
|
||||||
|
"icon": "layers", # Lucide icon name
|
||||||
|
"supports_sound": True,
|
||||||
|
"supports_vibration": True,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
for pt_data in project_types:
|
||||||
|
existing = db.query(ProjectType).filter_by(id=pt_data["id"]).first()
|
||||||
|
if not existing:
|
||||||
|
pt = ProjectType(**pt_data)
|
||||||
|
db.add(pt)
|
||||||
|
print(f"✓ Created project type: {pt_data['name']}")
|
||||||
|
else:
|
||||||
|
print(f" Project type already exists: {pt_data['name']}")
|
||||||
|
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
|
||||||
|
def create_tables():
|
||||||
|
"""Create all tables defined in models."""
|
||||||
|
print("Creating project management tables...")
|
||||||
|
Base.metadata.create_all(bind=engine)
|
||||||
|
print("✓ Tables created successfully")
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
print("=" * 60)
|
||||||
|
print("Terra-View Projects System - Database Initialization")
|
||||||
|
print("=" * 60)
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Create tables
|
||||||
|
create_tables()
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Initialize project types
|
||||||
|
db = SessionLocal()
|
||||||
|
try:
|
||||||
|
print("Initializing project types...")
|
||||||
|
init_project_types(db)
|
||||||
|
print()
|
||||||
|
print("=" * 60)
|
||||||
|
print("✓ Database initialization complete!")
|
||||||
|
print("=" * 60)
|
||||||
|
print()
|
||||||
|
print("Next steps:")
|
||||||
|
print(" 1. Restart Terra-View to load new routes")
|
||||||
|
print(" 2. Navigate to /projects to create your first project")
|
||||||
|
print(" 3. Check documentation for API endpoints")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"✗ Error during initialization: {e}")
|
||||||
|
db.rollback()
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
db.close()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
495
backend/main.py
@@ -1,17 +1,27 @@
|
|||||||
import os
|
import os
|
||||||
from fastapi import FastAPI, Request, Depends
|
import logging
|
||||||
|
from fastapi import FastAPI, Request, Depends, HTTPException
|
||||||
from fastapi.middleware.cors import CORSMiddleware
|
from fastapi.middleware.cors import CORSMiddleware
|
||||||
from fastapi.staticfiles import StaticFiles
|
from fastapi.staticfiles import StaticFiles
|
||||||
from fastapi.templating import Jinja2Templates
|
from fastapi.templating import Jinja2Templates
|
||||||
from fastapi.responses import HTMLResponse, FileResponse, JSONResponse
|
from fastapi.responses import HTMLResponse, FileResponse, JSONResponse
|
||||||
|
from fastapi.exceptions import RequestValidationError
|
||||||
from sqlalchemy.orm import Session
|
from sqlalchemy.orm import Session
|
||||||
from typing import List, Dict
|
from typing import List, Dict, Optional
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
# Configure logging
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.INFO,
|
||||||
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||||
|
)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
from backend.database import engine, Base, get_db
|
from backend.database import engine, Base, get_db
|
||||||
from backend.routers import roster, units, photos, roster_edit, dashboard, dashboard_tabs, activity
|
from backend.routers import roster, units, photos, roster_edit, roster_rename, dashboard, dashboard_tabs, activity, slmm, slm_ui, slm_dashboard, seismo_dashboard, projects, project_locations, scheduler, modem_dashboard
|
||||||
from backend.services.snapshot import emit_status_snapshot
|
from backend.services.snapshot import emit_status_snapshot
|
||||||
from backend.models import IgnoredUnit
|
from backend.models import IgnoredUnit
|
||||||
|
from backend.utils.timezone import get_user_timezone
|
||||||
|
|
||||||
# Create database tables
|
# Create database tables
|
||||||
Base.metadata.create_all(bind=engine)
|
Base.metadata.create_all(bind=engine)
|
||||||
@@ -20,13 +30,27 @@ Base.metadata.create_all(bind=engine)
|
|||||||
ENVIRONMENT = os.getenv("ENVIRONMENT", "production")
|
ENVIRONMENT = os.getenv("ENVIRONMENT", "production")
|
||||||
|
|
||||||
# Initialize FastAPI app
|
# Initialize FastAPI app
|
||||||
VERSION = "0.4.0"
|
VERSION = "0.9.2"
|
||||||
|
if ENVIRONMENT == "development":
|
||||||
|
_build = os.getenv("BUILD_NUMBER", "0")
|
||||||
|
if _build and _build != "0":
|
||||||
|
VERSION = f"{VERSION}-{_build}"
|
||||||
app = FastAPI(
|
app = FastAPI(
|
||||||
title="Seismo Fleet Manager",
|
title="Seismo Fleet Manager",
|
||||||
description="Backend API for managing seismograph fleet status",
|
description="Backend API for managing seismograph fleet status",
|
||||||
version=VERSION
|
version=VERSION
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Add validation error handler to log details
|
||||||
|
@app.exception_handler(RequestValidationError)
|
||||||
|
async def validation_exception_handler(request: Request, exc: RequestValidationError):
|
||||||
|
logger.error(f"Validation error on {request.url}: {exc.errors()}")
|
||||||
|
logger.error(f"Body: {await request.body()}")
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=400,
|
||||||
|
content={"detail": exc.errors()}
|
||||||
|
)
|
||||||
|
|
||||||
# Configure CORS
|
# Configure CORS
|
||||||
app.add_middleware(
|
app.add_middleware(
|
||||||
CORSMiddleware,
|
CORSMiddleware,
|
||||||
@@ -39,8 +63,8 @@ app.add_middleware(
|
|||||||
# Mount static files
|
# Mount static files
|
||||||
app.mount("/static", StaticFiles(directory="backend/static"), name="static")
|
app.mount("/static", StaticFiles(directory="backend/static"), name="static")
|
||||||
|
|
||||||
# Setup Jinja2 templates
|
# Use shared templates configuration with timezone filters
|
||||||
templates = Jinja2Templates(directory="templates")
|
from backend.templates_config import templates
|
||||||
|
|
||||||
# Add custom context processor to inject environment variable into all templates
|
# Add custom context processor to inject environment variable into all templates
|
||||||
@app.middleware("http")
|
@app.middleware("http")
|
||||||
@@ -65,13 +89,72 @@ app.include_router(roster.router)
|
|||||||
app.include_router(units.router)
|
app.include_router(units.router)
|
||||||
app.include_router(photos.router)
|
app.include_router(photos.router)
|
||||||
app.include_router(roster_edit.router)
|
app.include_router(roster_edit.router)
|
||||||
|
app.include_router(roster_rename.router)
|
||||||
app.include_router(dashboard.router)
|
app.include_router(dashboard.router)
|
||||||
app.include_router(dashboard_tabs.router)
|
app.include_router(dashboard_tabs.router)
|
||||||
app.include_router(activity.router)
|
app.include_router(activity.router)
|
||||||
|
app.include_router(slmm.router)
|
||||||
|
app.include_router(slm_ui.router)
|
||||||
|
app.include_router(slm_dashboard.router)
|
||||||
|
app.include_router(seismo_dashboard.router)
|
||||||
|
app.include_router(modem_dashboard.router)
|
||||||
|
|
||||||
from backend.routers import settings
|
from backend.routers import settings
|
||||||
app.include_router(settings.router)
|
app.include_router(settings.router)
|
||||||
|
|
||||||
|
from backend.routers import watcher_manager
|
||||||
|
app.include_router(watcher_manager.router)
|
||||||
|
|
||||||
|
# Projects system routers
|
||||||
|
app.include_router(projects.router)
|
||||||
|
app.include_router(project_locations.router)
|
||||||
|
app.include_router(scheduler.router)
|
||||||
|
|
||||||
|
# Report templates router
|
||||||
|
from backend.routers import report_templates
|
||||||
|
app.include_router(report_templates.router)
|
||||||
|
|
||||||
|
# Alerts router
|
||||||
|
from backend.routers import alerts
|
||||||
|
app.include_router(alerts.router)
|
||||||
|
|
||||||
|
# Recurring schedules router
|
||||||
|
from backend.routers import recurring_schedules
|
||||||
|
app.include_router(recurring_schedules.router)
|
||||||
|
|
||||||
|
# Fleet Calendar router
|
||||||
|
from backend.routers import fleet_calendar
|
||||||
|
app.include_router(fleet_calendar.router)
|
||||||
|
|
||||||
|
# Deployment Records router
|
||||||
|
from backend.routers import deployments
|
||||||
|
app.include_router(deployments.router)
|
||||||
|
|
||||||
|
# Start scheduler service and device status monitor on application startup
|
||||||
|
from backend.services.scheduler import start_scheduler, stop_scheduler
|
||||||
|
from backend.services.device_status_monitor import start_device_status_monitor, stop_device_status_monitor
|
||||||
|
|
||||||
|
@app.on_event("startup")
|
||||||
|
async def startup_event():
|
||||||
|
"""Initialize services on app startup"""
|
||||||
|
logger.info("Starting scheduler service...")
|
||||||
|
await start_scheduler()
|
||||||
|
logger.info("Scheduler service started")
|
||||||
|
|
||||||
|
logger.info("Starting device status monitor...")
|
||||||
|
await start_device_status_monitor()
|
||||||
|
logger.info("Device status monitor started")
|
||||||
|
|
||||||
|
@app.on_event("shutdown")
|
||||||
|
def shutdown_event():
|
||||||
|
"""Clean up services on app shutdown"""
|
||||||
|
logger.info("Stopping device status monitor...")
|
||||||
|
stop_device_status_monitor()
|
||||||
|
logger.info("Device status monitor stopped")
|
||||||
|
|
||||||
|
logger.info("Stopping scheduler service...")
|
||||||
|
stop_scheduler()
|
||||||
|
logger.info("Scheduler service stopped")
|
||||||
|
|
||||||
|
|
||||||
# Legacy routes from the original backend
|
# Legacy routes from the original backend
|
||||||
@@ -107,6 +190,220 @@ async def settings_page(request: Request):
|
|||||||
return templates.TemplateResponse("settings.html", {"request": request})
|
return templates.TemplateResponse("settings.html", {"request": request})
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/sound-level-meters", response_class=HTMLResponse)
|
||||||
|
async def sound_level_meters_page(request: Request):
|
||||||
|
"""Sound Level Meters management dashboard"""
|
||||||
|
return templates.TemplateResponse("sound_level_meters.html", {"request": request})
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/slm/{unit_id}", response_class=HTMLResponse)
|
||||||
|
async def slm_legacy_dashboard(
|
||||||
|
request: Request,
|
||||||
|
unit_id: str,
|
||||||
|
from_project: Optional[str] = None,
|
||||||
|
from_nrl: Optional[str] = None,
|
||||||
|
db: Session = Depends(get_db)
|
||||||
|
):
|
||||||
|
"""Legacy SLM control center dashboard for a specific unit"""
|
||||||
|
# Get project details if from_project is provided
|
||||||
|
project = None
|
||||||
|
if from_project:
|
||||||
|
from backend.models import Project
|
||||||
|
project = db.query(Project).filter_by(id=from_project).first()
|
||||||
|
|
||||||
|
# Get NRL location details if from_nrl is provided
|
||||||
|
nrl_location = None
|
||||||
|
if from_nrl:
|
||||||
|
from backend.models import NRLLocation
|
||||||
|
nrl_location = db.query(NRLLocation).filter_by(id=from_nrl).first()
|
||||||
|
|
||||||
|
return templates.TemplateResponse("slm_legacy_dashboard.html", {
|
||||||
|
"request": request,
|
||||||
|
"unit_id": unit_id,
|
||||||
|
"from_project": from_project,
|
||||||
|
"from_nrl": from_nrl,
|
||||||
|
"project": project,
|
||||||
|
"nrl_location": nrl_location
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/seismographs", response_class=HTMLResponse)
|
||||||
|
async def seismographs_page(request: Request):
|
||||||
|
"""Seismographs management dashboard"""
|
||||||
|
return templates.TemplateResponse("seismographs.html", {"request": request})
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/modems", response_class=HTMLResponse)
|
||||||
|
async def modems_page(request: Request):
|
||||||
|
"""Field modems management dashboard"""
|
||||||
|
return templates.TemplateResponse("modems.html", {"request": request})
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/pair-devices", response_class=HTMLResponse)
|
||||||
|
async def pair_devices_page(request: Request, db: Session = Depends(get_db)):
|
||||||
|
"""
|
||||||
|
Device pairing page - two-column layout for pairing recorders with modems.
|
||||||
|
"""
|
||||||
|
from backend.models import RosterUnit
|
||||||
|
|
||||||
|
# Get all non-retired recorders (seismographs and SLMs)
|
||||||
|
recorders = db.query(RosterUnit).filter(
|
||||||
|
RosterUnit.retired == False,
|
||||||
|
RosterUnit.device_type.in_(["seismograph", "slm", None]) # None defaults to seismograph
|
||||||
|
).order_by(RosterUnit.id).all()
|
||||||
|
|
||||||
|
# Get all non-retired modems
|
||||||
|
modems = db.query(RosterUnit).filter(
|
||||||
|
RosterUnit.retired == False,
|
||||||
|
RosterUnit.device_type == "modem"
|
||||||
|
).order_by(RosterUnit.id).all()
|
||||||
|
|
||||||
|
# Build existing pairings list
|
||||||
|
pairings = []
|
||||||
|
for recorder in recorders:
|
||||||
|
if recorder.deployed_with_modem_id:
|
||||||
|
modem = next((m for m in modems if m.id == recorder.deployed_with_modem_id), None)
|
||||||
|
pairings.append({
|
||||||
|
"recorder_id": recorder.id,
|
||||||
|
"recorder_type": (recorder.device_type or "seismograph").upper(),
|
||||||
|
"modem_id": recorder.deployed_with_modem_id,
|
||||||
|
"modem_ip": modem.ip_address if modem else None
|
||||||
|
})
|
||||||
|
|
||||||
|
# Convert to dicts for template
|
||||||
|
recorders_data = [
|
||||||
|
{
|
||||||
|
"id": r.id,
|
||||||
|
"device_type": r.device_type or "seismograph",
|
||||||
|
"deployed": r.deployed,
|
||||||
|
"deployed_with_modem_id": r.deployed_with_modem_id
|
||||||
|
}
|
||||||
|
for r in recorders
|
||||||
|
]
|
||||||
|
|
||||||
|
modems_data = [
|
||||||
|
{
|
||||||
|
"id": m.id,
|
||||||
|
"deployed": m.deployed,
|
||||||
|
"deployed_with_unit_id": m.deployed_with_unit_id,
|
||||||
|
"ip_address": m.ip_address,
|
||||||
|
"phone_number": m.phone_number
|
||||||
|
}
|
||||||
|
for m in modems
|
||||||
|
]
|
||||||
|
|
||||||
|
return templates.TemplateResponse("pair_devices.html", {
|
||||||
|
"request": request,
|
||||||
|
"recorders": recorders_data,
|
||||||
|
"modems": modems_data,
|
||||||
|
"pairings": pairings
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/projects", response_class=HTMLResponse)
|
||||||
|
async def projects_page(request: Request):
|
||||||
|
"""Projects management and overview"""
|
||||||
|
return templates.TemplateResponse("projects/overview.html", {"request": request})
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/projects/{project_id}", response_class=HTMLResponse)
|
||||||
|
async def project_detail_page(request: Request, project_id: str):
|
||||||
|
"""Project detail dashboard"""
|
||||||
|
return templates.TemplateResponse("projects/detail.html", {
|
||||||
|
"request": request,
|
||||||
|
"project_id": project_id
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/projects/{project_id}/nrl/{location_id}", response_class=HTMLResponse)
|
||||||
|
async def nrl_detail_page(
|
||||||
|
request: Request,
|
||||||
|
project_id: str,
|
||||||
|
location_id: str,
|
||||||
|
db: Session = Depends(get_db)
|
||||||
|
):
|
||||||
|
"""NRL (Noise Recording Location) detail page with tabs"""
|
||||||
|
from backend.models import Project, MonitoringLocation, UnitAssignment, RosterUnit, MonitoringSession, DataFile
|
||||||
|
from sqlalchemy import and_
|
||||||
|
|
||||||
|
# Get project
|
||||||
|
project = db.query(Project).filter_by(id=project_id).first()
|
||||||
|
if not project:
|
||||||
|
return templates.TemplateResponse("404.html", {
|
||||||
|
"request": request,
|
||||||
|
"message": "Project not found"
|
||||||
|
}, status_code=404)
|
||||||
|
|
||||||
|
# Get location
|
||||||
|
location = db.query(MonitoringLocation).filter_by(
|
||||||
|
id=location_id,
|
||||||
|
project_id=project_id
|
||||||
|
).first()
|
||||||
|
|
||||||
|
if not location:
|
||||||
|
return templates.TemplateResponse("404.html", {
|
||||||
|
"request": request,
|
||||||
|
"message": "Location not found"
|
||||||
|
}, status_code=404)
|
||||||
|
|
||||||
|
# Get active assignment
|
||||||
|
assignment = db.query(UnitAssignment).filter(
|
||||||
|
and_(
|
||||||
|
UnitAssignment.location_id == location_id,
|
||||||
|
UnitAssignment.status == "active"
|
||||||
|
)
|
||||||
|
).first()
|
||||||
|
|
||||||
|
assigned_unit = None
|
||||||
|
assigned_modem = None
|
||||||
|
if assignment:
|
||||||
|
assigned_unit = db.query(RosterUnit).filter_by(id=assignment.unit_id).first()
|
||||||
|
if assigned_unit and assigned_unit.deployed_with_modem_id:
|
||||||
|
assigned_modem = db.query(RosterUnit).filter_by(id=assigned_unit.deployed_with_modem_id).first()
|
||||||
|
|
||||||
|
# Get session count
|
||||||
|
session_count = db.query(MonitoringSession).filter_by(location_id=location_id).count()
|
||||||
|
|
||||||
|
# Get file count (DataFile links to session, not directly to location)
|
||||||
|
file_count = db.query(DataFile).join(
|
||||||
|
MonitoringSession,
|
||||||
|
DataFile.session_id == MonitoringSession.id
|
||||||
|
).filter(MonitoringSession.location_id == location_id).count()
|
||||||
|
|
||||||
|
# Check for active session
|
||||||
|
active_session = db.query(MonitoringSession).filter(
|
||||||
|
and_(
|
||||||
|
MonitoringSession.location_id == location_id,
|
||||||
|
MonitoringSession.status == "recording"
|
||||||
|
)
|
||||||
|
).first()
|
||||||
|
|
||||||
|
# Parse connection_mode from location_metadata JSON
|
||||||
|
import json as _json
|
||||||
|
connection_mode = "connected"
|
||||||
|
try:
|
||||||
|
meta = _json.loads(location.location_metadata or "{}")
|
||||||
|
connection_mode = meta.get("connection_mode", "connected")
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
template = "vibration_location_detail.html" if location.location_type == "vibration" else "nrl_detail.html"
|
||||||
|
return templates.TemplateResponse(template, {
|
||||||
|
"request": request,
|
||||||
|
"project_id": project_id,
|
||||||
|
"location_id": location_id,
|
||||||
|
"project": project,
|
||||||
|
"location": location,
|
||||||
|
"assignment": assignment,
|
||||||
|
"assigned_unit": assigned_unit,
|
||||||
|
"assigned_modem": assigned_modem,
|
||||||
|
"session_count": session_count,
|
||||||
|
"file_count": file_count,
|
||||||
|
"active_session": active_session,
|
||||||
|
"connection_mode": connection_mode,
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
# ===== PWA ROUTES =====
|
# ===== PWA ROUTES =====
|
||||||
|
|
||||||
@app.get("/sw.js")
|
@app.get("/sw.js")
|
||||||
@@ -356,6 +653,192 @@ async def unknown_emitters_partial(request: Request):
|
|||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/partials/devices-all", response_class=HTMLResponse)
|
||||||
|
async def devices_all_partial(request: Request):
|
||||||
|
"""Unified partial template for ALL devices with comprehensive filtering support"""
|
||||||
|
from datetime import datetime
|
||||||
|
snapshot = emit_status_snapshot()
|
||||||
|
|
||||||
|
units_list = []
|
||||||
|
|
||||||
|
# Add deployed/active units
|
||||||
|
for unit_id, unit_data in snapshot["active"].items():
|
||||||
|
units_list.append({
|
||||||
|
"id": unit_id,
|
||||||
|
"status": unit_data.get("status", "Unknown"),
|
||||||
|
"age": unit_data.get("age", "N/A"),
|
||||||
|
"last_seen": unit_data.get("last", "Never"),
|
||||||
|
"deployed": True,
|
||||||
|
"retired": False,
|
||||||
|
"out_for_calibration": False,
|
||||||
|
"ignored": False,
|
||||||
|
"note": unit_data.get("note", ""),
|
||||||
|
"device_type": unit_data.get("device_type", "seismograph"),
|
||||||
|
"address": unit_data.get("address", ""),
|
||||||
|
"coordinates": unit_data.get("coordinates", ""),
|
||||||
|
"project_id": unit_data.get("project_id", ""),
|
||||||
|
"last_calibrated": unit_data.get("last_calibrated"),
|
||||||
|
"next_calibration_due": unit_data.get("next_calibration_due"),
|
||||||
|
"deployed_with_modem_id": unit_data.get("deployed_with_modem_id"),
|
||||||
|
"deployed_with_unit_id": unit_data.get("deployed_with_unit_id"),
|
||||||
|
"ip_address": unit_data.get("ip_address"),
|
||||||
|
"phone_number": unit_data.get("phone_number"),
|
||||||
|
"hardware_model": unit_data.get("hardware_model"),
|
||||||
|
})
|
||||||
|
|
||||||
|
# Add benched units
|
||||||
|
for unit_id, unit_data in snapshot["benched"].items():
|
||||||
|
units_list.append({
|
||||||
|
"id": unit_id,
|
||||||
|
"status": unit_data.get("status", "N/A"),
|
||||||
|
"age": unit_data.get("age", "N/A"),
|
||||||
|
"last_seen": unit_data.get("last", "Never"),
|
||||||
|
"deployed": False,
|
||||||
|
"retired": False,
|
||||||
|
"out_for_calibration": False,
|
||||||
|
"ignored": False,
|
||||||
|
"note": unit_data.get("note", ""),
|
||||||
|
"device_type": unit_data.get("device_type", "seismograph"),
|
||||||
|
"address": unit_data.get("address", ""),
|
||||||
|
"coordinates": unit_data.get("coordinates", ""),
|
||||||
|
"project_id": unit_data.get("project_id", ""),
|
||||||
|
"last_calibrated": unit_data.get("last_calibrated"),
|
||||||
|
"next_calibration_due": unit_data.get("next_calibration_due"),
|
||||||
|
"deployed_with_modem_id": unit_data.get("deployed_with_modem_id"),
|
||||||
|
"deployed_with_unit_id": unit_data.get("deployed_with_unit_id"),
|
||||||
|
"ip_address": unit_data.get("ip_address"),
|
||||||
|
"phone_number": unit_data.get("phone_number"),
|
||||||
|
"hardware_model": unit_data.get("hardware_model"),
|
||||||
|
})
|
||||||
|
|
||||||
|
# Add allocated units
|
||||||
|
for unit_id, unit_data in snapshot.get("allocated", {}).items():
|
||||||
|
units_list.append({
|
||||||
|
"id": unit_id,
|
||||||
|
"status": "Allocated",
|
||||||
|
"age": "N/A",
|
||||||
|
"last_seen": "N/A",
|
||||||
|
"deployed": False,
|
||||||
|
"retired": False,
|
||||||
|
"out_for_calibration": False,
|
||||||
|
"allocated": True,
|
||||||
|
"allocated_to_project_id": unit_data.get("allocated_to_project_id", ""),
|
||||||
|
"ignored": False,
|
||||||
|
"note": unit_data.get("note", ""),
|
||||||
|
"device_type": unit_data.get("device_type", "seismograph"),
|
||||||
|
"address": unit_data.get("address", ""),
|
||||||
|
"coordinates": unit_data.get("coordinates", ""),
|
||||||
|
"project_id": unit_data.get("project_id", ""),
|
||||||
|
"last_calibrated": unit_data.get("last_calibrated"),
|
||||||
|
"next_calibration_due": unit_data.get("next_calibration_due"),
|
||||||
|
"deployed_with_modem_id": unit_data.get("deployed_with_modem_id"),
|
||||||
|
"deployed_with_unit_id": unit_data.get("deployed_with_unit_id"),
|
||||||
|
"ip_address": unit_data.get("ip_address"),
|
||||||
|
"phone_number": unit_data.get("phone_number"),
|
||||||
|
"hardware_model": unit_data.get("hardware_model"),
|
||||||
|
})
|
||||||
|
|
||||||
|
# Add out-for-calibration units
|
||||||
|
for unit_id, unit_data in snapshot["out_for_calibration"].items():
|
||||||
|
units_list.append({
|
||||||
|
"id": unit_id,
|
||||||
|
"status": "Out for Calibration",
|
||||||
|
"age": "N/A",
|
||||||
|
"last_seen": "N/A",
|
||||||
|
"deployed": False,
|
||||||
|
"retired": False,
|
||||||
|
"out_for_calibration": True,
|
||||||
|
"ignored": False,
|
||||||
|
"note": unit_data.get("note", ""),
|
||||||
|
"device_type": unit_data.get("device_type", "seismograph"),
|
||||||
|
"address": unit_data.get("address", ""),
|
||||||
|
"coordinates": unit_data.get("coordinates", ""),
|
||||||
|
"project_id": unit_data.get("project_id", ""),
|
||||||
|
"last_calibrated": unit_data.get("last_calibrated"),
|
||||||
|
"next_calibration_due": unit_data.get("next_calibration_due"),
|
||||||
|
"deployed_with_modem_id": unit_data.get("deployed_with_modem_id"),
|
||||||
|
"deployed_with_unit_id": unit_data.get("deployed_with_unit_id"),
|
||||||
|
"ip_address": unit_data.get("ip_address"),
|
||||||
|
"phone_number": unit_data.get("phone_number"),
|
||||||
|
"hardware_model": unit_data.get("hardware_model"),
|
||||||
|
})
|
||||||
|
|
||||||
|
# Add retired units
|
||||||
|
for unit_id, unit_data in snapshot["retired"].items():
|
||||||
|
units_list.append({
|
||||||
|
"id": unit_id,
|
||||||
|
"status": "Retired",
|
||||||
|
"age": "N/A",
|
||||||
|
"last_seen": "N/A",
|
||||||
|
"deployed": False,
|
||||||
|
"retired": True,
|
||||||
|
"out_for_calibration": False,
|
||||||
|
"ignored": False,
|
||||||
|
"note": unit_data.get("note", ""),
|
||||||
|
"device_type": unit_data.get("device_type", "seismograph"),
|
||||||
|
"address": unit_data.get("address", ""),
|
||||||
|
"coordinates": unit_data.get("coordinates", ""),
|
||||||
|
"project_id": unit_data.get("project_id", ""),
|
||||||
|
"last_calibrated": unit_data.get("last_calibrated"),
|
||||||
|
"next_calibration_due": unit_data.get("next_calibration_due"),
|
||||||
|
"deployed_with_modem_id": unit_data.get("deployed_with_modem_id"),
|
||||||
|
"deployed_with_unit_id": unit_data.get("deployed_with_unit_id"),
|
||||||
|
"ip_address": unit_data.get("ip_address"),
|
||||||
|
"phone_number": unit_data.get("phone_number"),
|
||||||
|
"hardware_model": unit_data.get("hardware_model"),
|
||||||
|
})
|
||||||
|
|
||||||
|
# Add ignored units
|
||||||
|
for unit_id, unit_data in snapshot.get("ignored", {}).items():
|
||||||
|
units_list.append({
|
||||||
|
"id": unit_id,
|
||||||
|
"status": "Ignored",
|
||||||
|
"age": "N/A",
|
||||||
|
"last_seen": "N/A",
|
||||||
|
"deployed": False,
|
||||||
|
"retired": False,
|
||||||
|
"out_for_calibration": False,
|
||||||
|
"ignored": True,
|
||||||
|
"note": unit_data.get("note", unit_data.get("reason", "")),
|
||||||
|
"device_type": unit_data.get("device_type", "unknown"),
|
||||||
|
"address": "",
|
||||||
|
"coordinates": "",
|
||||||
|
"project_id": "",
|
||||||
|
"last_calibrated": None,
|
||||||
|
"next_calibration_due": None,
|
||||||
|
"deployed_with_modem_id": None,
|
||||||
|
"deployed_with_unit_id": None,
|
||||||
|
"ip_address": None,
|
||||||
|
"phone_number": None,
|
||||||
|
"hardware_model": None,
|
||||||
|
})
|
||||||
|
|
||||||
|
# Sort by status category, then by ID
|
||||||
|
def sort_key(unit):
|
||||||
|
# Priority: deployed (active) -> allocated -> benched -> out_for_calibration -> retired -> ignored
|
||||||
|
if unit["deployed"]:
|
||||||
|
return (0, unit["id"])
|
||||||
|
elif unit.get("allocated"):
|
||||||
|
return (1, unit["id"])
|
||||||
|
elif not unit["retired"] and not unit["out_for_calibration"] and not unit["ignored"]:
|
||||||
|
return (2, unit["id"])
|
||||||
|
elif unit["out_for_calibration"]:
|
||||||
|
return (3, unit["id"])
|
||||||
|
elif unit["retired"]:
|
||||||
|
return (4, unit["id"])
|
||||||
|
else:
|
||||||
|
return (5, unit["id"])
|
||||||
|
|
||||||
|
units_list.sort(key=sort_key)
|
||||||
|
|
||||||
|
return templates.TemplateResponse("partials/devices_table.html", {
|
||||||
|
"request": request,
|
||||||
|
"units": units_list,
|
||||||
|
"timestamp": datetime.now().strftime("%H:%M:%S"),
|
||||||
|
"user_timezone": get_user_timezone()
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
@app.get("/health")
|
@app.get("/health")
|
||||||
def health_check():
|
def health_check():
|
||||||
"""Health check endpoint"""
|
"""Health check endpoint"""
|
||||||
|
|||||||
35
backend/migrate_add_allocated.py
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
"""
|
||||||
|
Migration: Add allocated and allocated_to_project_id columns to roster table.
|
||||||
|
Run once: python backend/migrate_add_allocated.py
|
||||||
|
"""
|
||||||
|
import sqlite3
|
||||||
|
import os
|
||||||
|
|
||||||
|
DB_PATH = os.path.join(os.path.dirname(__file__), '..', 'data', 'seismo_fleet.db')
|
||||||
|
|
||||||
|
def run():
|
||||||
|
conn = sqlite3.connect(DB_PATH)
|
||||||
|
cur = conn.cursor()
|
||||||
|
|
||||||
|
# Check existing columns
|
||||||
|
cur.execute("PRAGMA table_info(roster)")
|
||||||
|
cols = {row[1] for row in cur.fetchall()}
|
||||||
|
|
||||||
|
if 'allocated' not in cols:
|
||||||
|
cur.execute("ALTER TABLE roster ADD COLUMN allocated BOOLEAN DEFAULT 0 NOT NULL")
|
||||||
|
print("Added column: allocated")
|
||||||
|
else:
|
||||||
|
print("Column already exists: allocated")
|
||||||
|
|
||||||
|
if 'allocated_to_project_id' not in cols:
|
||||||
|
cur.execute("ALTER TABLE roster ADD COLUMN allocated_to_project_id VARCHAR")
|
||||||
|
print("Added column: allocated_to_project_id")
|
||||||
|
else:
|
||||||
|
print("Column already exists: allocated_to_project_id")
|
||||||
|
|
||||||
|
conn.commit()
|
||||||
|
conn.close()
|
||||||
|
print("Migration complete.")
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
run()
|
||||||
67
backend/migrate_add_auto_increment_index.py
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
"""
|
||||||
|
Migration: Add auto_increment_index column to recurring_schedules table
|
||||||
|
|
||||||
|
This migration adds the auto_increment_index column that controls whether
|
||||||
|
the scheduler should automatically find an unused store index before starting
|
||||||
|
a new measurement.
|
||||||
|
|
||||||
|
Run this script once to update existing databases:
|
||||||
|
python -m backend.migrate_add_auto_increment_index
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sqlite3
|
||||||
|
import os
|
||||||
|
|
||||||
|
DB_PATH = "data/seismo_fleet.db"
|
||||||
|
|
||||||
|
|
||||||
|
def migrate():
|
||||||
|
"""Add auto_increment_index column to recurring_schedules table."""
|
||||||
|
if not os.path.exists(DB_PATH):
|
||||||
|
print(f"Database not found at {DB_PATH}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
conn = sqlite3.connect(DB_PATH)
|
||||||
|
cursor = conn.cursor()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Check if recurring_schedules table exists
|
||||||
|
cursor.execute("""
|
||||||
|
SELECT name FROM sqlite_master
|
||||||
|
WHERE type='table' AND name='recurring_schedules'
|
||||||
|
""")
|
||||||
|
if not cursor.fetchone():
|
||||||
|
print("recurring_schedules table does not exist yet. Will be created on app startup.")
|
||||||
|
conn.close()
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Check if auto_increment_index column already exists
|
||||||
|
cursor.execute("PRAGMA table_info(recurring_schedules)")
|
||||||
|
columns = [row[1] for row in cursor.fetchall()]
|
||||||
|
|
||||||
|
if "auto_increment_index" in columns:
|
||||||
|
print("auto_increment_index column already exists in recurring_schedules table.")
|
||||||
|
conn.close()
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Add the column
|
||||||
|
print("Adding auto_increment_index column to recurring_schedules table...")
|
||||||
|
cursor.execute("""
|
||||||
|
ALTER TABLE recurring_schedules
|
||||||
|
ADD COLUMN auto_increment_index BOOLEAN DEFAULT 1
|
||||||
|
""")
|
||||||
|
conn.commit()
|
||||||
|
print("Successfully added auto_increment_index column.")
|
||||||
|
|
||||||
|
conn.close()
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Migration failed: {e}")
|
||||||
|
conn.close()
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
success = migrate()
|
||||||
|
exit(0 if success else 1)
|
||||||
79
backend/migrate_add_deployment_records.py
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
"""
|
||||||
|
Migration: Add deployment_records table.
|
||||||
|
|
||||||
|
Tracks each time a unit is sent to the field and returned.
|
||||||
|
The active deployment is the row with actual_removal_date IS NULL.
|
||||||
|
|
||||||
|
Run once per database:
|
||||||
|
python backend/migrate_add_deployment_records.py
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sqlite3
|
||||||
|
import os
|
||||||
|
|
||||||
|
DB_PATH = "./data/seismo_fleet.db"
|
||||||
|
|
||||||
|
|
||||||
|
def migrate_database():
|
||||||
|
if not os.path.exists(DB_PATH):
|
||||||
|
print(f"Database not found at {DB_PATH}")
|
||||||
|
return
|
||||||
|
|
||||||
|
conn = sqlite3.connect(DB_PATH)
|
||||||
|
cursor = conn.cursor()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Check if table already exists
|
||||||
|
cursor.execute("""
|
||||||
|
SELECT name FROM sqlite_master
|
||||||
|
WHERE type='table' AND name='deployment_records'
|
||||||
|
""")
|
||||||
|
if cursor.fetchone():
|
||||||
|
print("✓ deployment_records table already exists, skipping")
|
||||||
|
return
|
||||||
|
|
||||||
|
print("Creating deployment_records table...")
|
||||||
|
cursor.execute("""
|
||||||
|
CREATE TABLE deployment_records (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
unit_id TEXT NOT NULL,
|
||||||
|
deployed_date DATE,
|
||||||
|
estimated_removal_date DATE,
|
||||||
|
actual_removal_date DATE,
|
||||||
|
project_ref TEXT,
|
||||||
|
project_id TEXT,
|
||||||
|
location_name TEXT,
|
||||||
|
notes TEXT,
|
||||||
|
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||||
|
)
|
||||||
|
""")
|
||||||
|
|
||||||
|
cursor.execute("""
|
||||||
|
CREATE INDEX idx_deployment_records_unit_id
|
||||||
|
ON deployment_records(unit_id)
|
||||||
|
""")
|
||||||
|
cursor.execute("""
|
||||||
|
CREATE INDEX idx_deployment_records_project_id
|
||||||
|
ON deployment_records(project_id)
|
||||||
|
""")
|
||||||
|
# Index for finding active deployments quickly
|
||||||
|
cursor.execute("""
|
||||||
|
CREATE INDEX idx_deployment_records_active
|
||||||
|
ON deployment_records(unit_id, actual_removal_date)
|
||||||
|
""")
|
||||||
|
|
||||||
|
conn.commit()
|
||||||
|
print("✓ deployment_records table created successfully")
|
||||||
|
print("✓ Indexes created")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
conn.rollback()
|
||||||
|
print(f"✗ Migration failed: {e}")
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
migrate_database()
|
||||||
84
backend/migrate_add_deployment_type.py
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
"""
|
||||||
|
Migration script to add deployment_type and deployed_with_unit_id fields to roster table.
|
||||||
|
|
||||||
|
deployment_type: tracks what type of device a modem is deployed with:
|
||||||
|
- "seismograph" - Modem is connected to a seismograph
|
||||||
|
- "slm" - Modem is connected to a sound level meter
|
||||||
|
- NULL/empty - Not assigned or unknown
|
||||||
|
|
||||||
|
deployed_with_unit_id: stores the ID of the seismograph/SLM this modem is deployed with
|
||||||
|
(reverse relationship of deployed_with_modem_id)
|
||||||
|
|
||||||
|
Run this script once to migrate an existing database.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sqlite3
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Database path
|
||||||
|
DB_PATH = "./data/seismo_fleet.db"
|
||||||
|
|
||||||
|
|
||||||
|
def migrate_database():
|
||||||
|
"""Add deployment_type and deployed_with_unit_id columns to roster table"""
|
||||||
|
|
||||||
|
if not os.path.exists(DB_PATH):
|
||||||
|
print(f"Database not found at {DB_PATH}")
|
||||||
|
print("The database will be created automatically when you run the application.")
|
||||||
|
return
|
||||||
|
|
||||||
|
print(f"Migrating database: {DB_PATH}")
|
||||||
|
|
||||||
|
conn = sqlite3.connect(DB_PATH)
|
||||||
|
cursor = conn.cursor()
|
||||||
|
|
||||||
|
# Check if roster table exists
|
||||||
|
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='roster'")
|
||||||
|
table_exists = cursor.fetchone()
|
||||||
|
|
||||||
|
if not table_exists:
|
||||||
|
print("Roster table does not exist yet - will be created when app runs")
|
||||||
|
conn.close()
|
||||||
|
return
|
||||||
|
|
||||||
|
# Check existing columns
|
||||||
|
cursor.execute("PRAGMA table_info(roster)")
|
||||||
|
columns = [col[1] for col in cursor.fetchall()]
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Add deployment_type if not exists
|
||||||
|
if 'deployment_type' not in columns:
|
||||||
|
print("Adding deployment_type column to roster table...")
|
||||||
|
cursor.execute("ALTER TABLE roster ADD COLUMN deployment_type TEXT")
|
||||||
|
print(" Added deployment_type column")
|
||||||
|
|
||||||
|
cursor.execute("CREATE INDEX IF NOT EXISTS ix_roster_deployment_type ON roster(deployment_type)")
|
||||||
|
print(" Created index on deployment_type")
|
||||||
|
else:
|
||||||
|
print("deployment_type column already exists")
|
||||||
|
|
||||||
|
# Add deployed_with_unit_id if not exists
|
||||||
|
if 'deployed_with_unit_id' not in columns:
|
||||||
|
print("Adding deployed_with_unit_id column to roster table...")
|
||||||
|
cursor.execute("ALTER TABLE roster ADD COLUMN deployed_with_unit_id TEXT")
|
||||||
|
print(" Added deployed_with_unit_id column")
|
||||||
|
|
||||||
|
cursor.execute("CREATE INDEX IF NOT EXISTS ix_roster_deployed_with_unit_id ON roster(deployed_with_unit_id)")
|
||||||
|
print(" Created index on deployed_with_unit_id")
|
||||||
|
else:
|
||||||
|
print("deployed_with_unit_id column already exists")
|
||||||
|
|
||||||
|
conn.commit()
|
||||||
|
print("\nMigration completed successfully!")
|
||||||
|
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
print(f"\nError during migration: {e}")
|
||||||
|
conn.rollback()
|
||||||
|
raise
|
||||||
|
|
||||||
|
finally:
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
migrate_database()
|
||||||
62
backend/migrate_add_estimated_units.py
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
"""
|
||||||
|
Migration: Add estimated_units to job_reservations
|
||||||
|
|
||||||
|
Adds column:
|
||||||
|
- job_reservations.estimated_units: Estimated number of units for the reservation (nullable integer)
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sqlite3
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Default database path (matches production pattern)
|
||||||
|
DB_PATH = "./data/seismo_fleet.db"
|
||||||
|
|
||||||
|
|
||||||
|
def migrate(db_path: str):
|
||||||
|
"""Run the migration."""
|
||||||
|
print(f"Migrating database: {db_path}")
|
||||||
|
|
||||||
|
conn = sqlite3.connect(db_path)
|
||||||
|
cursor = conn.cursor()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Check if job_reservations table exists
|
||||||
|
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='job_reservations'")
|
||||||
|
if not cursor.fetchone():
|
||||||
|
print("job_reservations table does not exist. Skipping migration.")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Get existing columns in job_reservations
|
||||||
|
cursor.execute("PRAGMA table_info(job_reservations)")
|
||||||
|
existing_cols = {row[1] for row in cursor.fetchall()}
|
||||||
|
|
||||||
|
# Add estimated_units column if it doesn't exist
|
||||||
|
if 'estimated_units' not in existing_cols:
|
||||||
|
print("Adding estimated_units column to job_reservations...")
|
||||||
|
cursor.execute("ALTER TABLE job_reservations ADD COLUMN estimated_units INTEGER")
|
||||||
|
else:
|
||||||
|
print("estimated_units column already exists. Skipping.")
|
||||||
|
|
||||||
|
conn.commit()
|
||||||
|
print("Migration completed successfully!")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Migration failed: {e}")
|
||||||
|
conn.rollback()
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
db_path = DB_PATH
|
||||||
|
|
||||||
|
if len(sys.argv) > 1:
|
||||||
|
db_path = sys.argv[1]
|
||||||
|
|
||||||
|
if not Path(db_path).exists():
|
||||||
|
print(f"Database not found: {db_path}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
migrate(db_path)
|
||||||
103
backend/migrate_add_job_reservations.py
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
"""
|
||||||
|
Migration script to add job reservations for the Fleet Calendar feature.
|
||||||
|
|
||||||
|
This creates two tables:
|
||||||
|
- job_reservations: Track future unit assignments for jobs/projects
|
||||||
|
- job_reservation_units: Link specific units to reservations
|
||||||
|
|
||||||
|
Run this script once to migrate an existing database.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sqlite3
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Database path
|
||||||
|
DB_PATH = "./data/seismo_fleet.db"
|
||||||
|
|
||||||
|
|
||||||
|
def migrate_database():
|
||||||
|
"""Create the job_reservations and job_reservation_units tables"""
|
||||||
|
|
||||||
|
if not os.path.exists(DB_PATH):
|
||||||
|
print(f"Database not found at {DB_PATH}")
|
||||||
|
print("The database will be created automatically when you run the application.")
|
||||||
|
return
|
||||||
|
|
||||||
|
print(f"Migrating database: {DB_PATH}")
|
||||||
|
|
||||||
|
conn = sqlite3.connect(DB_PATH)
|
||||||
|
cursor = conn.cursor()
|
||||||
|
|
||||||
|
# Check if job_reservations table already exists
|
||||||
|
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='job_reservations'")
|
||||||
|
if cursor.fetchone():
|
||||||
|
print("Migration already applied - job_reservations table exists")
|
||||||
|
conn.close()
|
||||||
|
return
|
||||||
|
|
||||||
|
print("Creating job_reservations table...")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Create job_reservations table
|
||||||
|
cursor.execute("""
|
||||||
|
CREATE TABLE job_reservations (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
project_id TEXT,
|
||||||
|
start_date DATE NOT NULL,
|
||||||
|
end_date DATE NOT NULL,
|
||||||
|
assignment_type TEXT NOT NULL DEFAULT 'quantity',
|
||||||
|
device_type TEXT DEFAULT 'seismograph',
|
||||||
|
quantity_needed INTEGER,
|
||||||
|
notes TEXT,
|
||||||
|
color TEXT DEFAULT '#3B82F6',
|
||||||
|
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||||
|
)
|
||||||
|
""")
|
||||||
|
print(" Created job_reservations table")
|
||||||
|
|
||||||
|
# Create indexes for job_reservations
|
||||||
|
cursor.execute("CREATE INDEX idx_job_reservations_project_id ON job_reservations(project_id)")
|
||||||
|
print(" Created index on project_id")
|
||||||
|
|
||||||
|
cursor.execute("CREATE INDEX idx_job_reservations_dates ON job_reservations(start_date, end_date)")
|
||||||
|
print(" Created index on dates")
|
||||||
|
|
||||||
|
# Create job_reservation_units table
|
||||||
|
print("Creating job_reservation_units table...")
|
||||||
|
cursor.execute("""
|
||||||
|
CREATE TABLE job_reservation_units (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
reservation_id TEXT NOT NULL,
|
||||||
|
unit_id TEXT NOT NULL,
|
||||||
|
assignment_source TEXT DEFAULT 'specific',
|
||||||
|
assigned_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
FOREIGN KEY (reservation_id) REFERENCES job_reservations(id),
|
||||||
|
FOREIGN KEY (unit_id) REFERENCES roster(id)
|
||||||
|
)
|
||||||
|
""")
|
||||||
|
print(" Created job_reservation_units table")
|
||||||
|
|
||||||
|
# Create indexes for job_reservation_units
|
||||||
|
cursor.execute("CREATE INDEX idx_job_reservation_units_reservation_id ON job_reservation_units(reservation_id)")
|
||||||
|
print(" Created index on reservation_id")
|
||||||
|
|
||||||
|
cursor.execute("CREATE INDEX idx_job_reservation_units_unit_id ON job_reservation_units(unit_id)")
|
||||||
|
print(" Created index on unit_id")
|
||||||
|
|
||||||
|
conn.commit()
|
||||||
|
print("\nMigration completed successfully!")
|
||||||
|
print("You can now use the Fleet Calendar to manage unit reservations.")
|
||||||
|
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
print(f"\nError during migration: {e}")
|
||||||
|
conn.rollback()
|
||||||
|
raise
|
||||||
|
|
||||||
|
finally:
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
migrate_database()
|
||||||
24
backend/migrate_add_location_slots.py
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
"""
|
||||||
|
Migration: Add location_slots column to job_reservations table.
|
||||||
|
Stores the full ordered slot list (including empty/unassigned slots) as JSON.
|
||||||
|
Run once per database.
|
||||||
|
"""
|
||||||
|
import sqlite3
|
||||||
|
import os
|
||||||
|
|
||||||
|
DB_PATH = os.environ.get("DB_PATH", "/app/data/seismo_fleet.db")
|
||||||
|
|
||||||
|
def run():
|
||||||
|
conn = sqlite3.connect(DB_PATH)
|
||||||
|
cursor = conn.cursor()
|
||||||
|
existing = [r[1] for r in cursor.execute("PRAGMA table_info(job_reservations)").fetchall()]
|
||||||
|
if "location_slots" not in existing:
|
||||||
|
cursor.execute("ALTER TABLE job_reservations ADD COLUMN location_slots TEXT")
|
||||||
|
conn.commit()
|
||||||
|
print("Added location_slots column to job_reservations.")
|
||||||
|
else:
|
||||||
|
print("location_slots column already exists, skipping.")
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
run()
|
||||||
73
backend/migrate_add_oneoff_schedule_fields.py
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
"""
|
||||||
|
Migration: Add one-off schedule fields to recurring_schedules table
|
||||||
|
|
||||||
|
Adds start_datetime and end_datetime columns for one-off recording schedules.
|
||||||
|
|
||||||
|
Run this script once to update existing databases:
|
||||||
|
python -m backend.migrate_add_oneoff_schedule_fields
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sqlite3
|
||||||
|
import os
|
||||||
|
|
||||||
|
DB_PATH = "data/seismo_fleet.db"
|
||||||
|
|
||||||
|
|
||||||
|
def migrate():
|
||||||
|
"""Add one-off schedule columns to recurring_schedules table."""
|
||||||
|
if not os.path.exists(DB_PATH):
|
||||||
|
print(f"Database not found at {DB_PATH}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
conn = sqlite3.connect(DB_PATH)
|
||||||
|
cursor = conn.cursor()
|
||||||
|
|
||||||
|
try:
|
||||||
|
cursor.execute("""
|
||||||
|
SELECT name FROM sqlite_master
|
||||||
|
WHERE type='table' AND name='recurring_schedules'
|
||||||
|
""")
|
||||||
|
if not cursor.fetchone():
|
||||||
|
print("recurring_schedules table does not exist yet. Will be created on app startup.")
|
||||||
|
conn.close()
|
||||||
|
return True
|
||||||
|
|
||||||
|
cursor.execute("PRAGMA table_info(recurring_schedules)")
|
||||||
|
columns = [row[1] for row in cursor.fetchall()]
|
||||||
|
|
||||||
|
added = False
|
||||||
|
|
||||||
|
if "start_datetime" not in columns:
|
||||||
|
print("Adding start_datetime column to recurring_schedules table...")
|
||||||
|
cursor.execute("""
|
||||||
|
ALTER TABLE recurring_schedules
|
||||||
|
ADD COLUMN start_datetime DATETIME NULL
|
||||||
|
""")
|
||||||
|
added = True
|
||||||
|
|
||||||
|
if "end_datetime" not in columns:
|
||||||
|
print("Adding end_datetime column to recurring_schedules table...")
|
||||||
|
cursor.execute("""
|
||||||
|
ALTER TABLE recurring_schedules
|
||||||
|
ADD COLUMN end_datetime DATETIME NULL
|
||||||
|
""")
|
||||||
|
added = True
|
||||||
|
|
||||||
|
if added:
|
||||||
|
conn.commit()
|
||||||
|
print("Successfully added one-off schedule columns.")
|
||||||
|
else:
|
||||||
|
print("One-off schedule columns already exist.")
|
||||||
|
|
||||||
|
conn.close()
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Migration failed: {e}")
|
||||||
|
conn.close()
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
success = migrate()
|
||||||
|
exit(0 if success else 1)
|
||||||
54
backend/migrate_add_out_for_calibration.py
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
"""
|
||||||
|
Database Migration: Add out_for_calibration field to roster table
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
- Adds out_for_calibration BOOLEAN column (default FALSE) to roster table
|
||||||
|
- Safe to run multiple times (idempotent)
|
||||||
|
- No data loss
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python backend/migrate_add_out_for_calibration.py
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
from sqlalchemy import create_engine, text
|
||||||
|
from sqlalchemy.orm import sessionmaker
|
||||||
|
|
||||||
|
SQLALCHEMY_DATABASE_URL = "sqlite:///./data/seismo_fleet.db"
|
||||||
|
engine = create_engine(SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False})
|
||||||
|
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
||||||
|
|
||||||
|
|
||||||
|
def migrate():
|
||||||
|
db = SessionLocal()
|
||||||
|
try:
|
||||||
|
print("=" * 60)
|
||||||
|
print("Migration: Add out_for_calibration to roster")
|
||||||
|
print("=" * 60)
|
||||||
|
|
||||||
|
# Check if column already exists
|
||||||
|
result = db.execute(text("PRAGMA table_info(roster)")).fetchall()
|
||||||
|
columns = [row[1] for row in result]
|
||||||
|
|
||||||
|
if "out_for_calibration" in columns:
|
||||||
|
print("Column out_for_calibration already exists. Skipping.")
|
||||||
|
else:
|
||||||
|
db.execute(text("ALTER TABLE roster ADD COLUMN out_for_calibration BOOLEAN DEFAULT FALSE"))
|
||||||
|
db.commit()
|
||||||
|
print("Added out_for_calibration column to roster table.")
|
||||||
|
|
||||||
|
print("Migration complete.")
|
||||||
|
except Exception as e:
|
||||||
|
db.rollback()
|
||||||
|
print(f"Error: {e}")
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
db.close()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
migrate()
|
||||||
53
backend/migrate_add_project_data_collection_mode.py
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Migration: Add data_collection_mode column to projects table.
|
||||||
|
|
||||||
|
Values:
|
||||||
|
"remote" — units have modems; data pulled via FTP/scheduler automatically
|
||||||
|
"manual" — no modem; SD cards retrieved daily and uploaded by hand
|
||||||
|
|
||||||
|
All existing projects are backfilled to "manual" (safe conservative default).
|
||||||
|
|
||||||
|
Run once inside the Docker container:
|
||||||
|
docker exec terra-view python3 backend/migrate_add_project_data_collection_mode.py
|
||||||
|
"""
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
DB_PATH = Path("data/seismo_fleet.db")
|
||||||
|
|
||||||
|
|
||||||
|
def migrate():
|
||||||
|
import sqlite3
|
||||||
|
|
||||||
|
if not DB_PATH.exists():
|
||||||
|
print(f"Database not found at {DB_PATH}. Are you running from /home/serversdown/terra-view?")
|
||||||
|
return
|
||||||
|
|
||||||
|
conn = sqlite3.connect(DB_PATH)
|
||||||
|
conn.row_factory = sqlite3.Row
|
||||||
|
cur = conn.cursor()
|
||||||
|
|
||||||
|
# ── 1. Add column (idempotent) ───────────────────────────────────────────
|
||||||
|
cur.execute("PRAGMA table_info(projects)")
|
||||||
|
existing_cols = {row["name"] for row in cur.fetchall()}
|
||||||
|
|
||||||
|
if "data_collection_mode" not in existing_cols:
|
||||||
|
cur.execute("ALTER TABLE projects ADD COLUMN data_collection_mode TEXT DEFAULT 'manual'")
|
||||||
|
conn.commit()
|
||||||
|
print("✓ Added column data_collection_mode to projects")
|
||||||
|
else:
|
||||||
|
print("○ Column data_collection_mode already exists — skipping ALTER TABLE")
|
||||||
|
|
||||||
|
# ── 2. Backfill NULLs to 'manual' ────────────────────────────────────────
|
||||||
|
cur.execute("UPDATE projects SET data_collection_mode = 'manual' WHERE data_collection_mode IS NULL")
|
||||||
|
updated = cur.rowcount
|
||||||
|
conn.commit()
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
if updated:
|
||||||
|
print(f"✓ Backfilled {updated} project(s) to data_collection_mode='manual'.")
|
||||||
|
print("Migration complete.")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
migrate()
|
||||||
56
backend/migrate_add_project_deleted_at.py
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
"""
|
||||||
|
Migration: Add deleted_at column to projects table
|
||||||
|
|
||||||
|
Adds columns:
|
||||||
|
- projects.deleted_at: Timestamp set when status='deleted'; data hard-deleted after 60 days
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sqlite3
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
def migrate(db_path: str):
|
||||||
|
"""Run the migration."""
|
||||||
|
print(f"Migrating database: {db_path}")
|
||||||
|
|
||||||
|
conn = sqlite3.connect(db_path)
|
||||||
|
cursor = conn.cursor()
|
||||||
|
|
||||||
|
try:
|
||||||
|
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='projects'")
|
||||||
|
if not cursor.fetchone():
|
||||||
|
print("projects table does not exist. Skipping migration.")
|
||||||
|
return
|
||||||
|
|
||||||
|
cursor.execute("PRAGMA table_info(projects)")
|
||||||
|
existing_cols = {row[1] for row in cursor.fetchall()}
|
||||||
|
|
||||||
|
if 'deleted_at' not in existing_cols:
|
||||||
|
print("Adding deleted_at column to projects...")
|
||||||
|
cursor.execute("ALTER TABLE projects ADD COLUMN deleted_at DATETIME")
|
||||||
|
else:
|
||||||
|
print("deleted_at column already exists. Skipping.")
|
||||||
|
|
||||||
|
conn.commit()
|
||||||
|
print("Migration completed successfully!")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Migration failed: {e}")
|
||||||
|
conn.rollback()
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
db_path = "./data/seismo_fleet.db"
|
||||||
|
|
||||||
|
if len(sys.argv) > 1:
|
||||||
|
db_path = sys.argv[1]
|
||||||
|
|
||||||
|
if not Path(db_path).exists():
|
||||||
|
print(f"Database not found: {db_path}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
migrate(db_path)
|
||||||
80
backend/migrate_add_project_number.py
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
"""
|
||||||
|
Migration script to add project_number field to projects table.
|
||||||
|
|
||||||
|
This adds a new column for TMI internal project numbering:
|
||||||
|
- Format: xxxx-YY (e.g., "2567-23")
|
||||||
|
- xxxx = incremental project number
|
||||||
|
- YY = year project was started
|
||||||
|
|
||||||
|
Combined with client_name and name (project/site name), this enables
|
||||||
|
smart searching across all project identifiers.
|
||||||
|
|
||||||
|
Run this script once to migrate an existing database.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sqlite3
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Database path
|
||||||
|
DB_PATH = "./data/seismo_fleet.db"
|
||||||
|
|
||||||
|
|
||||||
|
def migrate_database():
|
||||||
|
"""Add project_number column to projects table"""
|
||||||
|
|
||||||
|
if not os.path.exists(DB_PATH):
|
||||||
|
print(f"Database not found at {DB_PATH}")
|
||||||
|
print("The database will be created automatically when you run the application.")
|
||||||
|
return
|
||||||
|
|
||||||
|
print(f"Migrating database: {DB_PATH}")
|
||||||
|
|
||||||
|
conn = sqlite3.connect(DB_PATH)
|
||||||
|
cursor = conn.cursor()
|
||||||
|
|
||||||
|
# Check if projects table exists
|
||||||
|
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='projects'")
|
||||||
|
table_exists = cursor.fetchone()
|
||||||
|
|
||||||
|
if not table_exists:
|
||||||
|
print("Projects table does not exist yet - will be created when app runs")
|
||||||
|
conn.close()
|
||||||
|
return
|
||||||
|
|
||||||
|
# Check if project_number column already exists
|
||||||
|
cursor.execute("PRAGMA table_info(projects)")
|
||||||
|
columns = [col[1] for col in cursor.fetchall()]
|
||||||
|
|
||||||
|
if 'project_number' in columns:
|
||||||
|
print("Migration already applied - project_number column exists")
|
||||||
|
conn.close()
|
||||||
|
return
|
||||||
|
|
||||||
|
print("Adding project_number column to projects table...")
|
||||||
|
|
||||||
|
try:
|
||||||
|
cursor.execute("ALTER TABLE projects ADD COLUMN project_number TEXT")
|
||||||
|
print(" Added project_number column")
|
||||||
|
|
||||||
|
# Create index for faster searching
|
||||||
|
cursor.execute("CREATE INDEX IF NOT EXISTS ix_projects_project_number ON projects(project_number)")
|
||||||
|
print(" Created index on project_number")
|
||||||
|
|
||||||
|
# Also add index on client_name if it doesn't exist
|
||||||
|
cursor.execute("CREATE INDEX IF NOT EXISTS ix_projects_client_name ON projects(client_name)")
|
||||||
|
print(" Created index on client_name")
|
||||||
|
|
||||||
|
conn.commit()
|
||||||
|
print("\nMigration completed successfully!")
|
||||||
|
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
print(f"\nError during migration: {e}")
|
||||||
|
conn.rollback()
|
||||||
|
raise
|
||||||
|
|
||||||
|
finally:
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
migrate_database()
|
||||||
88
backend/migrate_add_report_templates.py
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
"""
|
||||||
|
Migration script to add report_templates table.
|
||||||
|
|
||||||
|
This creates a new table for storing report generation configurations:
|
||||||
|
- Template name and project association
|
||||||
|
- Time filtering settings (start/end time)
|
||||||
|
- Date range filtering (optional)
|
||||||
|
- Report title defaults
|
||||||
|
|
||||||
|
Run this script once to migrate an existing database.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sqlite3
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Database path
|
||||||
|
DB_PATH = "./data/seismo_fleet.db"
|
||||||
|
|
||||||
|
def migrate_database():
|
||||||
|
"""Create report_templates table"""
|
||||||
|
|
||||||
|
if not os.path.exists(DB_PATH):
|
||||||
|
print(f"Database not found at {DB_PATH}")
|
||||||
|
print("The database will be created automatically when you run the application.")
|
||||||
|
return
|
||||||
|
|
||||||
|
print(f"Migrating database: {DB_PATH}")
|
||||||
|
|
||||||
|
conn = sqlite3.connect(DB_PATH)
|
||||||
|
cursor = conn.cursor()
|
||||||
|
|
||||||
|
# Check if report_templates table already exists
|
||||||
|
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='report_templates'")
|
||||||
|
table_exists = cursor.fetchone()
|
||||||
|
|
||||||
|
if table_exists:
|
||||||
|
print("Migration already applied - report_templates table exists")
|
||||||
|
conn.close()
|
||||||
|
return
|
||||||
|
|
||||||
|
print("Creating report_templates table...")
|
||||||
|
|
||||||
|
try:
|
||||||
|
cursor.execute("""
|
||||||
|
CREATE TABLE report_templates (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
project_id TEXT,
|
||||||
|
report_title TEXT DEFAULT 'Background Noise Study',
|
||||||
|
start_time TEXT,
|
||||||
|
end_time TEXT,
|
||||||
|
start_date TEXT,
|
||||||
|
end_date TEXT,
|
||||||
|
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||||
|
)
|
||||||
|
""")
|
||||||
|
print(" ✓ Created report_templates table")
|
||||||
|
|
||||||
|
# Insert default templates
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
default_templates = [
|
||||||
|
(str(uuid.uuid4()), "Nighttime (7PM-7AM)", None, "Background Noise Study", "19:00", "07:00", None, None),
|
||||||
|
(str(uuid.uuid4()), "Daytime (7AM-7PM)", None, "Background Noise Study", "07:00", "19:00", None, None),
|
||||||
|
(str(uuid.uuid4()), "Full Day (All Data)", None, "Background Noise Study", None, None, None, None),
|
||||||
|
]
|
||||||
|
|
||||||
|
cursor.executemany("""
|
||||||
|
INSERT INTO report_templates (id, name, project_id, report_title, start_time, end_time, start_date, end_date)
|
||||||
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||||
|
""", default_templates)
|
||||||
|
print(" ✓ Inserted default templates (Nighttime, Daytime, Full Day)")
|
||||||
|
|
||||||
|
conn.commit()
|
||||||
|
print("\nMigration completed successfully!")
|
||||||
|
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
print(f"\nError during migration: {e}")
|
||||||
|
conn.rollback()
|
||||||
|
raise
|
||||||
|
|
||||||
|
finally:
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
migrate_database()
|
||||||
127
backend/migrate_add_session_device_model.py
Normal file
@@ -0,0 +1,127 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Migration: Add device_model column to monitoring_sessions table.
|
||||||
|
|
||||||
|
Records which physical SLM model produced each session's data (e.g. "NL-43",
|
||||||
|
"NL-53", "NL-32"). Used by report generation to apply the correct parsing
|
||||||
|
logic without re-opening files to detect format.
|
||||||
|
|
||||||
|
Run once inside the Docker container:
|
||||||
|
docker exec terra-view python3 backend/migrate_add_session_device_model.py
|
||||||
|
|
||||||
|
Backfill strategy for existing rows:
|
||||||
|
1. If session.unit_id is set, use roster.slm_model for that unit.
|
||||||
|
2. Else, peek at the first .rnd file in the session: presence of the 'LAeq'
|
||||||
|
column header identifies AU2 / NL-32 format.
|
||||||
|
Sessions where neither hint is available remain NULL — the file-content
|
||||||
|
fallback in report code handles them transparently.
|
||||||
|
"""
|
||||||
|
import csv
|
||||||
|
import io
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
DB_PATH = Path("data/seismo_fleet.db")
|
||||||
|
|
||||||
|
|
||||||
|
def _peek_first_row(abs_path: Path) -> dict:
|
||||||
|
"""Read only the header + first data row of an RND file. Very cheap."""
|
||||||
|
try:
|
||||||
|
with open(abs_path, "r", encoding="utf-8", errors="replace") as f:
|
||||||
|
reader = csv.DictReader(f)
|
||||||
|
return next(reader, None) or {}
|
||||||
|
except Exception:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def _detect_model_from_rnd(abs_path: Path) -> str | None:
|
||||||
|
"""Return 'NL-32' if file uses AU2 column format, else None."""
|
||||||
|
row = _peek_first_row(abs_path)
|
||||||
|
if "LAeq" in row:
|
||||||
|
return "NL-32"
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def migrate():
|
||||||
|
import sqlite3
|
||||||
|
|
||||||
|
if not DB_PATH.exists():
|
||||||
|
print(f"Database not found at {DB_PATH}. Are you running from /home/serversdown/terra-view?")
|
||||||
|
return
|
||||||
|
|
||||||
|
conn = sqlite3.connect(DB_PATH)
|
||||||
|
conn.row_factory = sqlite3.Row
|
||||||
|
cur = conn.cursor()
|
||||||
|
|
||||||
|
# ── 1. Add column (idempotent) ───────────────────────────────────────────
|
||||||
|
cur.execute("PRAGMA table_info(monitoring_sessions)")
|
||||||
|
existing_cols = {row["name"] for row in cur.fetchall()}
|
||||||
|
|
||||||
|
if "device_model" not in existing_cols:
|
||||||
|
cur.execute("ALTER TABLE monitoring_sessions ADD COLUMN device_model TEXT")
|
||||||
|
conn.commit()
|
||||||
|
print("✓ Added column device_model to monitoring_sessions")
|
||||||
|
else:
|
||||||
|
print("○ Column device_model already exists — skipping ALTER TABLE")
|
||||||
|
|
||||||
|
# ── 2. Backfill existing NULL rows ───────────────────────────────────────
|
||||||
|
cur.execute(
|
||||||
|
"SELECT id, unit_id FROM monitoring_sessions WHERE device_model IS NULL"
|
||||||
|
)
|
||||||
|
sessions = cur.fetchall()
|
||||||
|
print(f"Backfilling {len(sessions)} session(s) with device_model=NULL...")
|
||||||
|
|
||||||
|
updated = skipped = 0
|
||||||
|
for row in sessions:
|
||||||
|
session_id = row["id"]
|
||||||
|
unit_id = row["unit_id"]
|
||||||
|
device_model = None
|
||||||
|
|
||||||
|
# Strategy A: look up unit's slm_model from the roster
|
||||||
|
if unit_id:
|
||||||
|
cur.execute(
|
||||||
|
"SELECT slm_model FROM roster WHERE id = ?", (unit_id,)
|
||||||
|
)
|
||||||
|
unit_row = cur.fetchone()
|
||||||
|
if unit_row and unit_row["slm_model"]:
|
||||||
|
device_model = unit_row["slm_model"]
|
||||||
|
|
||||||
|
# Strategy B: detect from first .rnd file in the session
|
||||||
|
if device_model is None:
|
||||||
|
cur.execute(
|
||||||
|
"""SELECT file_path FROM data_files
|
||||||
|
WHERE session_id = ?
|
||||||
|
AND lower(file_path) LIKE '%.rnd'
|
||||||
|
LIMIT 1""",
|
||||||
|
(session_id,),
|
||||||
|
)
|
||||||
|
file_row = cur.fetchone()
|
||||||
|
if file_row:
|
||||||
|
abs_path = Path("data") / file_row["file_path"]
|
||||||
|
device_model = _detect_model_from_rnd(abs_path)
|
||||||
|
# None here means NL-43/NL-53 format (or unreadable file) —
|
||||||
|
# leave as NULL so the existing fallback applies.
|
||||||
|
|
||||||
|
if device_model:
|
||||||
|
cur.execute(
|
||||||
|
"UPDATE monitoring_sessions SET device_model = ? WHERE id = ?",
|
||||||
|
(device_model, session_id),
|
||||||
|
)
|
||||||
|
updated += 1
|
||||||
|
else:
|
||||||
|
skipped += 1
|
||||||
|
|
||||||
|
conn.commit()
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
print(f"✓ Backfilled {updated} session(s) with a device_model.")
|
||||||
|
if skipped:
|
||||||
|
print(
|
||||||
|
f" {skipped} session(s) left as NULL "
|
||||||
|
"(no unit link and no AU2 file hint — NL-43/NL-53 or unknown; "
|
||||||
|
"file-content detection applies at report time)."
|
||||||
|
)
|
||||||
|
print("Migration complete.")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
migrate()
|
||||||
42
backend/migrate_add_session_period_hours.py
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
"""
|
||||||
|
Migration: add period_start_hour and period_end_hour to monitoring_sessions.
|
||||||
|
|
||||||
|
Run once:
|
||||||
|
python backend/migrate_add_session_period_hours.py
|
||||||
|
|
||||||
|
Or inside the container:
|
||||||
|
docker exec terra-view python3 backend/migrate_add_session_period_hours.py
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
from backend.database import engine
|
||||||
|
from sqlalchemy import text
|
||||||
|
|
||||||
|
def run():
|
||||||
|
with engine.connect() as conn:
|
||||||
|
# Check which columns already exist
|
||||||
|
result = conn.execute(text("PRAGMA table_info(monitoring_sessions)"))
|
||||||
|
existing = {row[1] for row in result}
|
||||||
|
|
||||||
|
added = []
|
||||||
|
for col, definition in [
|
||||||
|
("period_start_hour", "INTEGER"),
|
||||||
|
("period_end_hour", "INTEGER"),
|
||||||
|
]:
|
||||||
|
if col not in existing:
|
||||||
|
conn.execute(text(f"ALTER TABLE monitoring_sessions ADD COLUMN {col} {definition}"))
|
||||||
|
added.append(col)
|
||||||
|
else:
|
||||||
|
print(f" Column '{col}' already exists — skipping.")
|
||||||
|
|
||||||
|
conn.commit()
|
||||||
|
|
||||||
|
if added:
|
||||||
|
print(f" Added columns: {', '.join(added)}")
|
||||||
|
print("Migration complete.")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
run()
|
||||||
131
backend/migrate_add_session_period_type.py
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Migration: Add session_label and period_type columns to monitoring_sessions.
|
||||||
|
|
||||||
|
session_label - user-editable display name, e.g. "NRL-1 Sun 2/23 Night"
|
||||||
|
period_type - one of: weekday_day | weekday_night | weekend_day | weekend_night
|
||||||
|
Auto-derived from started_at when NULL.
|
||||||
|
|
||||||
|
Period definitions (used in report stats table):
|
||||||
|
weekday_day Mon-Fri 07:00-22:00 -> Daytime (7AM-10PM)
|
||||||
|
weekday_night Mon-Fri 22:00-07:00 -> Nighttime (10PM-7AM)
|
||||||
|
weekend_day Sat-Sun 07:00-22:00 -> Daytime (7AM-10PM)
|
||||||
|
weekend_night Sat-Sun 22:00-07:00 -> Nighttime (10PM-7AM)
|
||||||
|
|
||||||
|
Run once inside the Docker container:
|
||||||
|
docker exec terra-view python3 backend/migrate_add_session_period_type.py
|
||||||
|
"""
|
||||||
|
from pathlib import Path
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
DB_PATH = Path("data/seismo_fleet.db")
|
||||||
|
|
||||||
|
|
||||||
|
def _derive_period_type(started_at_str: str) -> str | None:
|
||||||
|
"""Derive period_type from a started_at ISO datetime string."""
|
||||||
|
if not started_at_str:
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
dt = datetime.fromisoformat(started_at_str)
|
||||||
|
except ValueError:
|
||||||
|
return None
|
||||||
|
is_weekend = dt.weekday() >= 5 # 5=Sat, 6=Sun
|
||||||
|
is_night = dt.hour >= 22 or dt.hour < 7
|
||||||
|
if is_weekend:
|
||||||
|
return "weekend_night" if is_night else "weekend_day"
|
||||||
|
else:
|
||||||
|
return "weekday_night" if is_night else "weekday_day"
|
||||||
|
|
||||||
|
|
||||||
|
def _build_label(started_at_str: str, location_name: str | None, period_type: str | None) -> str | None:
|
||||||
|
"""Build a human-readable session label."""
|
||||||
|
if not started_at_str:
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
dt = datetime.fromisoformat(started_at_str)
|
||||||
|
except ValueError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
day_abbr = dt.strftime("%a") # Mon, Tue, Sun, etc.
|
||||||
|
date_str = dt.strftime("%-m/%-d") # 2/23
|
||||||
|
|
||||||
|
period_labels = {
|
||||||
|
"weekday_day": "Day",
|
||||||
|
"weekday_night": "Night",
|
||||||
|
"weekend_day": "Day",
|
||||||
|
"weekend_night": "Night",
|
||||||
|
}
|
||||||
|
period_str = period_labels.get(period_type or "", "")
|
||||||
|
|
||||||
|
parts = []
|
||||||
|
if location_name:
|
||||||
|
parts.append(location_name)
|
||||||
|
parts.append(f"{day_abbr} {date_str}")
|
||||||
|
if period_str:
|
||||||
|
parts.append(period_str)
|
||||||
|
return " — ".join(parts)
|
||||||
|
|
||||||
|
|
||||||
|
def migrate():
|
||||||
|
import sqlite3
|
||||||
|
|
||||||
|
if not DB_PATH.exists():
|
||||||
|
print(f"Database not found at {DB_PATH}. Are you running from /home/serversdown/terra-view?")
|
||||||
|
return
|
||||||
|
|
||||||
|
conn = sqlite3.connect(DB_PATH)
|
||||||
|
conn.row_factory = sqlite3.Row
|
||||||
|
cur = conn.cursor()
|
||||||
|
|
||||||
|
# 1. Add columns (idempotent)
|
||||||
|
cur.execute("PRAGMA table_info(monitoring_sessions)")
|
||||||
|
existing_cols = {row["name"] for row in cur.fetchall()}
|
||||||
|
|
||||||
|
for col, typedef in [("session_label", "TEXT"), ("period_type", "TEXT")]:
|
||||||
|
if col not in existing_cols:
|
||||||
|
cur.execute(f"ALTER TABLE monitoring_sessions ADD COLUMN {col} {typedef}")
|
||||||
|
conn.commit()
|
||||||
|
print(f"✓ Added column {col} to monitoring_sessions")
|
||||||
|
else:
|
||||||
|
print(f"○ Column {col} already exists — skipping ALTER TABLE")
|
||||||
|
|
||||||
|
# 2. Backfill existing rows
|
||||||
|
cur.execute(
|
||||||
|
"""SELECT ms.id, ms.started_at, ms.location_id
|
||||||
|
FROM monitoring_sessions ms
|
||||||
|
WHERE ms.period_type IS NULL OR ms.session_label IS NULL"""
|
||||||
|
)
|
||||||
|
sessions = cur.fetchall()
|
||||||
|
print(f"Backfilling {len(sessions)} session(s)...")
|
||||||
|
|
||||||
|
updated = 0
|
||||||
|
for row in sessions:
|
||||||
|
session_id = row["id"]
|
||||||
|
started_at = row["started_at"]
|
||||||
|
location_id = row["location_id"]
|
||||||
|
|
||||||
|
# Look up location name
|
||||||
|
location_name = None
|
||||||
|
if location_id:
|
||||||
|
cur.execute("SELECT name FROM monitoring_locations WHERE id = ?", (location_id,))
|
||||||
|
loc_row = cur.fetchone()
|
||||||
|
if loc_row:
|
||||||
|
location_name = loc_row["name"]
|
||||||
|
|
||||||
|
period_type = _derive_period_type(started_at)
|
||||||
|
label = _build_label(started_at, location_name, period_type)
|
||||||
|
|
||||||
|
cur.execute(
|
||||||
|
"UPDATE monitoring_sessions SET period_type = ?, session_label = ? WHERE id = ?",
|
||||||
|
(period_type, label, session_id),
|
||||||
|
)
|
||||||
|
updated += 1
|
||||||
|
|
||||||
|
conn.commit()
|
||||||
|
conn.close()
|
||||||
|
print(f"✓ Backfilled {updated} session(s).")
|
||||||
|
print("Migration complete.")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
migrate()
|
||||||
41
backend/migrate_add_session_report_date.py
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
"""
|
||||||
|
Migration: add report_date to monitoring_sessions.
|
||||||
|
|
||||||
|
Run once:
|
||||||
|
python backend/migrate_add_session_report_date.py
|
||||||
|
|
||||||
|
Or inside the container:
|
||||||
|
docker exec terra-view-terra-view-1 python3 backend/migrate_add_session_report_date.py
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
from backend.database import engine
|
||||||
|
from sqlalchemy import text
|
||||||
|
|
||||||
|
def run():
|
||||||
|
with engine.connect() as conn:
|
||||||
|
# Check which columns already exist
|
||||||
|
result = conn.execute(text("PRAGMA table_info(monitoring_sessions)"))
|
||||||
|
existing = {row[1] for row in result}
|
||||||
|
|
||||||
|
added = []
|
||||||
|
for col, definition in [
|
||||||
|
("report_date", "DATE"),
|
||||||
|
]:
|
||||||
|
if col not in existing:
|
||||||
|
conn.execute(text(f"ALTER TABLE monitoring_sessions ADD COLUMN {col} {definition}"))
|
||||||
|
added.append(col)
|
||||||
|
else:
|
||||||
|
print(f" Column '{col}' already exists — skipping.")
|
||||||
|
|
||||||
|
conn.commit()
|
||||||
|
|
||||||
|
if added:
|
||||||
|
print(f" Added columns: {', '.join(added)}")
|
||||||
|
print("Migration complete.")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
run()
|
||||||
78
backend/migrate_add_slm_fields.py
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Database migration: Add sound level meter fields to roster table.
|
||||||
|
|
||||||
|
Adds columns for sound_level_meter device type support.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sqlite3
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
def migrate():
|
||||||
|
"""Add SLM fields to roster table if they don't exist."""
|
||||||
|
|
||||||
|
# Try multiple possible database locations
|
||||||
|
possible_paths = [
|
||||||
|
Path("data/seismo_fleet.db"),
|
||||||
|
Path("data/sfm.db"),
|
||||||
|
Path("data/seismo.db"),
|
||||||
|
]
|
||||||
|
|
||||||
|
db_path = None
|
||||||
|
for path in possible_paths:
|
||||||
|
if path.exists():
|
||||||
|
db_path = path
|
||||||
|
break
|
||||||
|
|
||||||
|
if db_path is None:
|
||||||
|
print(f"Database not found in any of: {[str(p) for p in possible_paths]}")
|
||||||
|
print("Creating database with models.py will include new fields automatically.")
|
||||||
|
return
|
||||||
|
|
||||||
|
print(f"Using database: {db_path}")
|
||||||
|
|
||||||
|
conn = sqlite3.connect(db_path)
|
||||||
|
cursor = conn.cursor()
|
||||||
|
|
||||||
|
# Check if columns already exist
|
||||||
|
cursor.execute("PRAGMA table_info(roster)")
|
||||||
|
existing_columns = {row[1] for row in cursor.fetchall()}
|
||||||
|
|
||||||
|
new_columns = {
|
||||||
|
"slm_host": "TEXT",
|
||||||
|
"slm_tcp_port": "INTEGER",
|
||||||
|
"slm_model": "TEXT",
|
||||||
|
"slm_serial_number": "TEXT",
|
||||||
|
"slm_frequency_weighting": "TEXT",
|
||||||
|
"slm_time_weighting": "TEXT",
|
||||||
|
"slm_measurement_range": "TEXT",
|
||||||
|
"slm_last_check": "DATETIME",
|
||||||
|
}
|
||||||
|
|
||||||
|
migrations_applied = []
|
||||||
|
|
||||||
|
for column_name, column_type in new_columns.items():
|
||||||
|
if column_name not in existing_columns:
|
||||||
|
try:
|
||||||
|
cursor.execute(f"ALTER TABLE roster ADD COLUMN {column_name} {column_type}")
|
||||||
|
migrations_applied.append(column_name)
|
||||||
|
print(f"✓ Added column: {column_name} ({column_type})")
|
||||||
|
except sqlite3.OperationalError as e:
|
||||||
|
print(f"✗ Failed to add column {column_name}: {e}")
|
||||||
|
else:
|
||||||
|
print(f"○ Column already exists: {column_name}")
|
||||||
|
|
||||||
|
conn.commit()
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
if migrations_applied:
|
||||||
|
print(f"\n✓ Migration complete! Added {len(migrations_applied)} new columns.")
|
||||||
|
else:
|
||||||
|
print("\n○ No migration needed - all columns already exist.")
|
||||||
|
|
||||||
|
print("\nSound level meter fields are now available in the roster table.")
|
||||||
|
print("Note: Use device_type='slm' for Sound Level Meters. Legacy 'sound_level_meter' has been deprecated.")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
migrate()
|
||||||
89
backend/migrate_add_tbd_dates.py
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
"""
|
||||||
|
Migration: Add TBD date support to job reservations
|
||||||
|
|
||||||
|
Adds columns:
|
||||||
|
- job_reservations.estimated_end_date: For planning when end is TBD
|
||||||
|
- job_reservations.end_date_tbd: Boolean flag for TBD end dates
|
||||||
|
- job_reservation_units.unit_start_date: Unit-specific start (for swaps)
|
||||||
|
- job_reservation_units.unit_end_date: Unit-specific end (for swaps)
|
||||||
|
- job_reservation_units.unit_end_tbd: Unit-specific TBD flag
|
||||||
|
- job_reservation_units.notes: Notes for the assignment
|
||||||
|
|
||||||
|
Also makes job_reservations.end_date nullable.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sqlite3
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
def migrate(db_path: str):
|
||||||
|
"""Run the migration."""
|
||||||
|
print(f"Migrating database: {db_path}")
|
||||||
|
|
||||||
|
conn = sqlite3.connect(db_path)
|
||||||
|
cursor = conn.cursor()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Check if job_reservations table exists
|
||||||
|
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='job_reservations'")
|
||||||
|
if not cursor.fetchone():
|
||||||
|
print("job_reservations table does not exist. Skipping migration.")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Get existing columns in job_reservations
|
||||||
|
cursor.execute("PRAGMA table_info(job_reservations)")
|
||||||
|
existing_cols = {row[1] for row in cursor.fetchall()}
|
||||||
|
|
||||||
|
# Add new columns to job_reservations if they don't exist
|
||||||
|
if 'estimated_end_date' not in existing_cols:
|
||||||
|
print("Adding estimated_end_date column to job_reservations...")
|
||||||
|
cursor.execute("ALTER TABLE job_reservations ADD COLUMN estimated_end_date DATE")
|
||||||
|
|
||||||
|
if 'end_date_tbd' not in existing_cols:
|
||||||
|
print("Adding end_date_tbd column to job_reservations...")
|
||||||
|
cursor.execute("ALTER TABLE job_reservations ADD COLUMN end_date_tbd BOOLEAN DEFAULT 0")
|
||||||
|
|
||||||
|
# Get existing columns in job_reservation_units
|
||||||
|
cursor.execute("PRAGMA table_info(job_reservation_units)")
|
||||||
|
unit_cols = {row[1] for row in cursor.fetchall()}
|
||||||
|
|
||||||
|
# Add new columns to job_reservation_units if they don't exist
|
||||||
|
if 'unit_start_date' not in unit_cols:
|
||||||
|
print("Adding unit_start_date column to job_reservation_units...")
|
||||||
|
cursor.execute("ALTER TABLE job_reservation_units ADD COLUMN unit_start_date DATE")
|
||||||
|
|
||||||
|
if 'unit_end_date' not in unit_cols:
|
||||||
|
print("Adding unit_end_date column to job_reservation_units...")
|
||||||
|
cursor.execute("ALTER TABLE job_reservation_units ADD COLUMN unit_end_date DATE")
|
||||||
|
|
||||||
|
if 'unit_end_tbd' not in unit_cols:
|
||||||
|
print("Adding unit_end_tbd column to job_reservation_units...")
|
||||||
|
cursor.execute("ALTER TABLE job_reservation_units ADD COLUMN unit_end_tbd BOOLEAN DEFAULT 0")
|
||||||
|
|
||||||
|
if 'notes' not in unit_cols:
|
||||||
|
print("Adding notes column to job_reservation_units...")
|
||||||
|
cursor.execute("ALTER TABLE job_reservation_units ADD COLUMN notes TEXT")
|
||||||
|
|
||||||
|
conn.commit()
|
||||||
|
print("Migration completed successfully!")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Migration failed: {e}")
|
||||||
|
conn.rollback()
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Default to dev database
|
||||||
|
db_path = "./data-dev/seismo_fleet.db"
|
||||||
|
|
||||||
|
if len(sys.argv) > 1:
|
||||||
|
db_path = sys.argv[1]
|
||||||
|
|
||||||
|
if not Path(db_path).exists():
|
||||||
|
print(f"Database not found: {db_path}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
migrate(db_path)
|
||||||
105
backend/migrate_fix_end_date_nullable.py
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
"""
|
||||||
|
Migration: Make job_reservations.end_date nullable for TBD support
|
||||||
|
|
||||||
|
SQLite doesn't support ALTER COLUMN, so we need to:
|
||||||
|
1. Create a new table with the correct schema
|
||||||
|
2. Copy data
|
||||||
|
3. Drop old table
|
||||||
|
4. Rename new table
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sqlite3
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
def migrate(db_path: str):
|
||||||
|
"""Run the migration."""
|
||||||
|
print(f"Migrating database: {db_path}")
|
||||||
|
|
||||||
|
conn = sqlite3.connect(db_path)
|
||||||
|
cursor = conn.cursor()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Check if job_reservations table exists
|
||||||
|
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='job_reservations'")
|
||||||
|
if not cursor.fetchone():
|
||||||
|
print("job_reservations table does not exist. Skipping migration.")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Check current schema
|
||||||
|
cursor.execute("PRAGMA table_info(job_reservations)")
|
||||||
|
columns = cursor.fetchall()
|
||||||
|
col_info = {row[1]: row for row in columns}
|
||||||
|
|
||||||
|
# Check if end_date is already nullable (notnull=0)
|
||||||
|
if 'end_date' in col_info and col_info['end_date'][3] == 0:
|
||||||
|
print("end_date is already nullable. Skipping table recreation.")
|
||||||
|
return
|
||||||
|
|
||||||
|
print("Recreating job_reservations table with nullable end_date...")
|
||||||
|
|
||||||
|
# Create new table with correct schema
|
||||||
|
cursor.execute("""
|
||||||
|
CREATE TABLE job_reservations_new (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
project_id TEXT,
|
||||||
|
start_date DATE NOT NULL,
|
||||||
|
end_date DATE,
|
||||||
|
estimated_end_date DATE,
|
||||||
|
end_date_tbd BOOLEAN DEFAULT 0,
|
||||||
|
assignment_type TEXT NOT NULL DEFAULT 'quantity',
|
||||||
|
device_type TEXT DEFAULT 'seismograph',
|
||||||
|
quantity_needed INTEGER,
|
||||||
|
notes TEXT,
|
||||||
|
color TEXT DEFAULT '#3B82F6',
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME
|
||||||
|
)
|
||||||
|
""")
|
||||||
|
|
||||||
|
# Copy existing data
|
||||||
|
cursor.execute("""
|
||||||
|
INSERT INTO job_reservations_new
|
||||||
|
SELECT
|
||||||
|
id, name, project_id, start_date, end_date,
|
||||||
|
COALESCE(estimated_end_date, NULL) as estimated_end_date,
|
||||||
|
COALESCE(end_date_tbd, 0) as end_date_tbd,
|
||||||
|
assignment_type, device_type, quantity_needed, notes, color,
|
||||||
|
created_at, updated_at
|
||||||
|
FROM job_reservations
|
||||||
|
""")
|
||||||
|
|
||||||
|
# Drop old table
|
||||||
|
cursor.execute("DROP TABLE job_reservations")
|
||||||
|
|
||||||
|
# Rename new table
|
||||||
|
cursor.execute("ALTER TABLE job_reservations_new RENAME TO job_reservations")
|
||||||
|
|
||||||
|
# Recreate index
|
||||||
|
cursor.execute("CREATE INDEX IF NOT EXISTS ix_job_reservations_id ON job_reservations (id)")
|
||||||
|
cursor.execute("CREATE INDEX IF NOT EXISTS ix_job_reservations_project_id ON job_reservations (project_id)")
|
||||||
|
|
||||||
|
conn.commit()
|
||||||
|
print("Migration completed successfully!")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Migration failed: {e}")
|
||||||
|
conn.rollback()
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Default to dev database
|
||||||
|
db_path = "./data-dev/seismo_fleet.db"
|
||||||
|
|
||||||
|
if len(sys.argv) > 1:
|
||||||
|
db_path = sys.argv[1]
|
||||||
|
|
||||||
|
if not Path(db_path).exists():
|
||||||
|
print(f"Database not found: {db_path}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
migrate(db_path)
|
||||||
54
backend/migrate_rename_recording_to_monitoring_sessions.py
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
"""
|
||||||
|
Migration: Rename recording_sessions table to monitoring_sessions
|
||||||
|
|
||||||
|
Renames the table and updates the model name from RecordingSession to MonitoringSession.
|
||||||
|
Run once per database: python backend/migrate_rename_recording_to_monitoring_sessions.py
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sqlite3
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
def migrate(db_path: str):
|
||||||
|
"""Run the migration."""
|
||||||
|
print(f"Migrating database: {db_path}")
|
||||||
|
|
||||||
|
conn = sqlite3.connect(db_path)
|
||||||
|
cursor = conn.cursor()
|
||||||
|
|
||||||
|
try:
|
||||||
|
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='recording_sessions'")
|
||||||
|
if not cursor.fetchone():
|
||||||
|
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='monitoring_sessions'")
|
||||||
|
if cursor.fetchone():
|
||||||
|
print("monitoring_sessions table already exists. Skipping migration.")
|
||||||
|
else:
|
||||||
|
print("recording_sessions table does not exist. Skipping migration.")
|
||||||
|
return
|
||||||
|
|
||||||
|
print("Renaming recording_sessions -> monitoring_sessions...")
|
||||||
|
cursor.execute("ALTER TABLE recording_sessions RENAME TO monitoring_sessions")
|
||||||
|
|
||||||
|
conn.commit()
|
||||||
|
print("Migration completed successfully!")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Migration failed: {e}")
|
||||||
|
conn.rollback()
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
db_path = "./data/seismo_fleet.db"
|
||||||
|
|
||||||
|
if len(sys.argv) > 1:
|
||||||
|
db_path = sys.argv[1]
|
||||||
|
|
||||||
|
if not Path(db_path).exists():
|
||||||
|
print(f"Database not found: {db_path}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
migrate(db_path)
|
||||||
106
backend/migrate_standardize_device_types.py
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
"""
|
||||||
|
Database Migration: Standardize device_type values
|
||||||
|
|
||||||
|
This migration ensures all device_type values follow the official schema:
|
||||||
|
- "seismograph" - Seismic monitoring devices
|
||||||
|
- "modem" - Field modems and network equipment
|
||||||
|
- "slm" - Sound level meters (NL-43/NL-53)
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
- Converts "sound_level_meter" → "slm"
|
||||||
|
- Safe to run multiple times (idempotent)
|
||||||
|
- No data loss
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python backend/migrate_standardize_device_types.py
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Add parent directory to path so we can import backend modules
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
from sqlalchemy import create_engine, text
|
||||||
|
from sqlalchemy.orm import sessionmaker
|
||||||
|
|
||||||
|
# Database configuration
|
||||||
|
SQLALCHEMY_DATABASE_URL = "sqlite:///./data/seismo_fleet.db"
|
||||||
|
engine = create_engine(SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False})
|
||||||
|
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
||||||
|
|
||||||
|
|
||||||
|
def migrate():
|
||||||
|
"""Standardize device_type values in the database"""
|
||||||
|
db = SessionLocal()
|
||||||
|
|
||||||
|
try:
|
||||||
|
print("=" * 70)
|
||||||
|
print("Database Migration: Standardize device_type values")
|
||||||
|
print("=" * 70)
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Check for existing "sound_level_meter" values
|
||||||
|
result = db.execute(
|
||||||
|
text("SELECT COUNT(*) as count FROM roster WHERE device_type = 'sound_level_meter'")
|
||||||
|
).fetchone()
|
||||||
|
|
||||||
|
count_to_migrate = result[0] if result else 0
|
||||||
|
|
||||||
|
if count_to_migrate == 0:
|
||||||
|
print("✓ No records need migration - all device_type values are already standardized")
|
||||||
|
print()
|
||||||
|
print("Current device_type distribution:")
|
||||||
|
|
||||||
|
# Show distribution
|
||||||
|
distribution = db.execute(
|
||||||
|
text("SELECT device_type, COUNT(*) as count FROM roster GROUP BY device_type ORDER BY count DESC")
|
||||||
|
).fetchall()
|
||||||
|
|
||||||
|
for row in distribution:
|
||||||
|
device_type, count = row
|
||||||
|
print(f" - {device_type}: {count} units")
|
||||||
|
|
||||||
|
print()
|
||||||
|
print("Migration not needed.")
|
||||||
|
return
|
||||||
|
|
||||||
|
print(f"Found {count_to_migrate} record(s) with device_type='sound_level_meter'")
|
||||||
|
print()
|
||||||
|
print("Converting 'sound_level_meter' → 'slm'...")
|
||||||
|
|
||||||
|
# Perform the migration
|
||||||
|
db.execute(
|
||||||
|
text("UPDATE roster SET device_type = 'slm' WHERE device_type = 'sound_level_meter'")
|
||||||
|
)
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
print(f"✓ Successfully migrated {count_to_migrate} record(s)")
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Show final distribution
|
||||||
|
print("Updated device_type distribution:")
|
||||||
|
distribution = db.execute(
|
||||||
|
text("SELECT device_type, COUNT(*) as count FROM roster GROUP BY device_type ORDER BY count DESC")
|
||||||
|
).fetchall()
|
||||||
|
|
||||||
|
for row in distribution:
|
||||||
|
device_type, count = row
|
||||||
|
print(f" - {device_type}: {count} units")
|
||||||
|
|
||||||
|
print()
|
||||||
|
print("=" * 70)
|
||||||
|
print("Migration completed successfully!")
|
||||||
|
print("=" * 70)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
db.rollback()
|
||||||
|
print(f"\n❌ Error during migration: {e}")
|
||||||
|
print("\nRolling back changes...")
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
db.close()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
migrate()
|
||||||
@@ -19,16 +19,22 @@ class RosterUnit(Base):
|
|||||||
Roster table: represents our *intended assignment* of a unit.
|
Roster table: represents our *intended assignment* of a unit.
|
||||||
This is editable from the GUI.
|
This is editable from the GUI.
|
||||||
|
|
||||||
Supports multiple device types (seismograph, modem) with type-specific fields.
|
Supports multiple device types with type-specific fields:
|
||||||
|
- "seismograph" - Seismic monitoring devices (default)
|
||||||
|
- "modem" - Field modems and network equipment
|
||||||
|
- "slm" - Sound level meters (NL-43/NL-53)
|
||||||
"""
|
"""
|
||||||
__tablename__ = "roster"
|
__tablename__ = "roster"
|
||||||
|
|
||||||
# Core fields (all device types)
|
# Core fields (all device types)
|
||||||
id = Column(String, primary_key=True, index=True)
|
id = Column(String, primary_key=True, index=True)
|
||||||
unit_type = Column(String, default="series3") # Backward compatibility
|
unit_type = Column(String, default="series3") # Backward compatibility
|
||||||
device_type = Column(String, default="seismograph") # "seismograph" | "modem"
|
device_type = Column(String, default="seismograph") # "seismograph" | "modem" | "slm"
|
||||||
deployed = Column(Boolean, default=True)
|
deployed = Column(Boolean, default=True)
|
||||||
retired = Column(Boolean, default=False)
|
retired = Column(Boolean, default=False)
|
||||||
|
out_for_calibration = Column(Boolean, default=False)
|
||||||
|
allocated = Column(Boolean, default=False) # Staged for an upcoming job, not yet deployed
|
||||||
|
allocated_to_project_id = Column(String, nullable=True) # Which project it's allocated to
|
||||||
note = Column(String, nullable=True)
|
note = Column(String, nullable=True)
|
||||||
project_id = Column(String, nullable=True)
|
project_id = Column(String, nullable=True)
|
||||||
location = Column(String, nullable=True) # Legacy field - use address/coordinates instead
|
location = Column(String, nullable=True) # Legacy field - use address/coordinates instead
|
||||||
@@ -36,15 +42,50 @@ class RosterUnit(Base):
|
|||||||
coordinates = Column(String, nullable=True) # Lat,Lon format: "34.0522,-118.2437"
|
coordinates = Column(String, nullable=True) # Lat,Lon format: "34.0522,-118.2437"
|
||||||
last_updated = Column(DateTime, default=datetime.utcnow)
|
last_updated = Column(DateTime, default=datetime.utcnow)
|
||||||
|
|
||||||
# Seismograph-specific fields (nullable for modems)
|
# Seismograph-specific fields (nullable for modems and SLMs)
|
||||||
last_calibrated = Column(Date, nullable=True)
|
last_calibrated = Column(Date, nullable=True)
|
||||||
next_calibration_due = Column(Date, nullable=True)
|
next_calibration_due = Column(Date, nullable=True)
|
||||||
deployed_with_modem_id = Column(String, nullable=True) # FK to another RosterUnit
|
|
||||||
|
|
||||||
# Modem-specific fields (nullable for seismographs)
|
# Modem assignment (shared by seismographs and SLMs)
|
||||||
|
deployed_with_modem_id = Column(String, nullable=True) # FK to another RosterUnit (device_type=modem)
|
||||||
|
|
||||||
|
# Modem-specific fields (nullable for seismographs and SLMs)
|
||||||
ip_address = Column(String, nullable=True)
|
ip_address = Column(String, nullable=True)
|
||||||
phone_number = Column(String, nullable=True)
|
phone_number = Column(String, nullable=True)
|
||||||
hardware_model = Column(String, nullable=True)
|
hardware_model = Column(String, nullable=True)
|
||||||
|
deployment_type = Column(String, nullable=True) # "seismograph" | "slm" - what type of device this modem is deployed with
|
||||||
|
deployed_with_unit_id = Column(String, nullable=True) # ID of seismograph/SLM this modem is deployed with
|
||||||
|
|
||||||
|
# Sound Level Meter-specific fields (nullable for seismographs and modems)
|
||||||
|
slm_host = Column(String, nullable=True) # Device IP or hostname
|
||||||
|
slm_tcp_port = Column(Integer, nullable=True) # TCP control port (default 2255)
|
||||||
|
slm_ftp_port = Column(Integer, nullable=True) # FTP data retrieval port (default 21)
|
||||||
|
slm_model = Column(String, nullable=True) # NL-43, NL-53, etc.
|
||||||
|
slm_serial_number = Column(String, nullable=True) # Device serial number
|
||||||
|
slm_frequency_weighting = Column(String, nullable=True) # A, C, Z
|
||||||
|
slm_time_weighting = Column(String, nullable=True) # F (Fast), S (Slow), I (Impulse)
|
||||||
|
slm_measurement_range = Column(String, nullable=True) # e.g., "30-130 dB"
|
||||||
|
slm_last_check = Column(DateTime, nullable=True) # Last communication check
|
||||||
|
|
||||||
|
|
||||||
|
class WatcherAgent(Base):
|
||||||
|
"""
|
||||||
|
Watcher agents: tracks the watcher processes (series3-watcher, thor-watcher)
|
||||||
|
that run on field machines and report unit heartbeats.
|
||||||
|
|
||||||
|
Updated on every heartbeat received from each source_id.
|
||||||
|
"""
|
||||||
|
__tablename__ = "watcher_agents"
|
||||||
|
|
||||||
|
id = Column(String, primary_key=True, index=True) # source_id (hostname)
|
||||||
|
source_type = Column(String, nullable=False) # series3_watcher | series4_watcher
|
||||||
|
version = Column(String, nullable=True) # e.g. "1.4.0"
|
||||||
|
last_seen = Column(DateTime, default=datetime.utcnow)
|
||||||
|
status = Column(String, nullable=False, default="unknown") # ok | pending | missing | error | unknown
|
||||||
|
ip_address = Column(String, nullable=True)
|
||||||
|
log_tail = Column(Text, nullable=True) # last N log lines (JSON array of strings)
|
||||||
|
update_pending = Column(Boolean, default=False) # set True to trigger remote update
|
||||||
|
update_version = Column(String, nullable=True) # target version to update to
|
||||||
|
|
||||||
|
|
||||||
class IgnoredUnit(Base):
|
class IgnoredUnit(Base):
|
||||||
@@ -95,3 +136,442 @@ class UserPreferences(Base):
|
|||||||
status_ok_threshold_hours = Column(Integer, default=12)
|
status_ok_threshold_hours = Column(Integer, default=12)
|
||||||
status_pending_threshold_hours = Column(Integer, default=24)
|
status_pending_threshold_hours = Column(Integer, default=24)
|
||||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Project Management System
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
class ProjectType(Base):
|
||||||
|
"""
|
||||||
|
Project type templates: defines available project types and their capabilities.
|
||||||
|
Pre-populated with: sound_monitoring, vibration_monitoring, combined.
|
||||||
|
"""
|
||||||
|
__tablename__ = "project_types"
|
||||||
|
|
||||||
|
id = Column(String, primary_key=True) # sound_monitoring, vibration_monitoring, combined
|
||||||
|
name = Column(String, nullable=False, unique=True) # "Sound Monitoring", "Vibration Monitoring"
|
||||||
|
description = Column(Text, nullable=True)
|
||||||
|
icon = Column(String, nullable=True) # Icon identifier for UI
|
||||||
|
supports_sound = Column(Boolean, default=False) # Enables SLM features
|
||||||
|
supports_vibration = Column(Boolean, default=False) # Enables seismograph features
|
||||||
|
created_at = Column(DateTime, default=datetime.utcnow)
|
||||||
|
|
||||||
|
|
||||||
|
class Project(Base):
|
||||||
|
"""
|
||||||
|
Projects: top-level organization for monitoring work.
|
||||||
|
Type-aware to enable/disable features based on project_type_id.
|
||||||
|
|
||||||
|
Project naming convention:
|
||||||
|
- project_number: TMI internal ID format xxxx-YY (e.g., "2567-23")
|
||||||
|
- client_name: Client/contractor name (e.g., "PJ Dick")
|
||||||
|
- name: Project/site name (e.g., "RKM Hall", "CMU Campus")
|
||||||
|
|
||||||
|
Display format: "2567-23 - PJ Dick - RKM Hall"
|
||||||
|
Users can search by any of these fields.
|
||||||
|
"""
|
||||||
|
__tablename__ = "projects"
|
||||||
|
|
||||||
|
id = Column(String, primary_key=True, index=True) # UUID
|
||||||
|
project_number = Column(String, nullable=True, index=True) # TMI ID: xxxx-YY format (e.g., "2567-23")
|
||||||
|
name = Column(String, nullable=False, unique=True) # Project/site name (e.g., "RKM Hall")
|
||||||
|
description = Column(Text, nullable=True)
|
||||||
|
project_type_id = Column(String, nullable=False) # FK to ProjectType.id
|
||||||
|
status = Column(String, default="active") # active, on_hold, completed, archived, deleted
|
||||||
|
|
||||||
|
# Data collection mode: how field data reaches Terra-View.
|
||||||
|
# "remote" — units have modems; data pulled via FTP/scheduler automatically
|
||||||
|
# "manual" — no modem; SD cards retrieved daily and uploaded by hand
|
||||||
|
data_collection_mode = Column(String, default="manual") # remote | manual
|
||||||
|
|
||||||
|
# Project metadata
|
||||||
|
client_name = Column(String, nullable=True, index=True) # Client name (e.g., "PJ Dick")
|
||||||
|
site_address = Column(String, nullable=True)
|
||||||
|
site_coordinates = Column(String, nullable=True) # "lat,lon"
|
||||||
|
start_date = Column(Date, nullable=True)
|
||||||
|
end_date = Column(Date, nullable=True)
|
||||||
|
|
||||||
|
created_at = Column(DateTime, default=datetime.utcnow)
|
||||||
|
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||||
|
deleted_at = Column(DateTime, nullable=True) # Set when status='deleted'; hard delete scheduled after 60 days
|
||||||
|
|
||||||
|
|
||||||
|
class MonitoringLocation(Base):
|
||||||
|
"""
|
||||||
|
Monitoring locations: generic location for monitoring activities.
|
||||||
|
Can be NRL (Noise Recording Location) for sound projects,
|
||||||
|
or monitoring point for vibration projects.
|
||||||
|
"""
|
||||||
|
__tablename__ = "monitoring_locations"
|
||||||
|
|
||||||
|
id = Column(String, primary_key=True, index=True) # UUID
|
||||||
|
project_id = Column(String, nullable=False, index=True) # FK to Project.id
|
||||||
|
location_type = Column(String, nullable=False) # "sound" | "vibration"
|
||||||
|
|
||||||
|
name = Column(String, nullable=False) # NRL-001, VP-North, etc.
|
||||||
|
description = Column(Text, nullable=True)
|
||||||
|
coordinates = Column(String, nullable=True) # "lat,lon"
|
||||||
|
address = Column(String, nullable=True)
|
||||||
|
|
||||||
|
# Type-specific metadata stored as JSON
|
||||||
|
# For sound: {"ambient_conditions": "urban", "expected_sources": ["traffic"]}
|
||||||
|
# For vibration: {"ground_type": "bedrock", "depth": "10m"}
|
||||||
|
location_metadata = Column(Text, nullable=True)
|
||||||
|
|
||||||
|
created_at = Column(DateTime, default=datetime.utcnow)
|
||||||
|
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||||
|
|
||||||
|
|
||||||
|
class UnitAssignment(Base):
|
||||||
|
"""
|
||||||
|
Unit assignments: links devices (SLMs or seismographs) to monitoring locations.
|
||||||
|
Supports temporary assignments with assigned_until.
|
||||||
|
"""
|
||||||
|
__tablename__ = "unit_assignments"
|
||||||
|
|
||||||
|
id = Column(String, primary_key=True, index=True) # UUID
|
||||||
|
unit_id = Column(String, nullable=False, index=True) # FK to RosterUnit.id
|
||||||
|
location_id = Column(String, nullable=False, index=True) # FK to MonitoringLocation.id
|
||||||
|
|
||||||
|
assigned_at = Column(DateTime, default=datetime.utcnow)
|
||||||
|
assigned_until = Column(DateTime, nullable=True) # Null = indefinite
|
||||||
|
status = Column(String, default="active") # active, completed, cancelled
|
||||||
|
notes = Column(Text, nullable=True)
|
||||||
|
|
||||||
|
# Denormalized for efficient queries
|
||||||
|
device_type = Column(String, nullable=False) # "slm" | "seismograph"
|
||||||
|
project_id = Column(String, nullable=False, index=True) # FK to Project.id
|
||||||
|
|
||||||
|
created_at = Column(DateTime, default=datetime.utcnow)
|
||||||
|
|
||||||
|
|
||||||
|
class ScheduledAction(Base):
|
||||||
|
"""
|
||||||
|
Scheduled actions: automation for recording start/stop/download.
|
||||||
|
Terra-View executes these by calling SLMM or SFM endpoints.
|
||||||
|
"""
|
||||||
|
__tablename__ = "scheduled_actions"
|
||||||
|
|
||||||
|
id = Column(String, primary_key=True, index=True) # UUID
|
||||||
|
project_id = Column(String, nullable=False, index=True) # FK to Project.id
|
||||||
|
location_id = Column(String, nullable=False, index=True) # FK to MonitoringLocation.id
|
||||||
|
unit_id = Column(String, nullable=True, index=True) # FK to RosterUnit.id (nullable if location-based)
|
||||||
|
|
||||||
|
action_type = Column(String, nullable=False) # start, stop, download, cycle, calibrate
|
||||||
|
device_type = Column(String, nullable=False) # "slm" | "seismograph"
|
||||||
|
|
||||||
|
scheduled_time = Column(DateTime, nullable=False, index=True)
|
||||||
|
executed_at = Column(DateTime, nullable=True)
|
||||||
|
execution_status = Column(String, default="pending") # pending, completed, failed, cancelled
|
||||||
|
|
||||||
|
# Response from device module (SLMM or SFM)
|
||||||
|
module_response = Column(Text, nullable=True) # JSON
|
||||||
|
error_message = Column(Text, nullable=True)
|
||||||
|
|
||||||
|
notes = Column(Text, nullable=True)
|
||||||
|
created_at = Column(DateTime, default=datetime.utcnow)
|
||||||
|
|
||||||
|
|
||||||
|
class MonitoringSession(Base):
|
||||||
|
"""
|
||||||
|
Monitoring sessions: tracks actual monitoring sessions.
|
||||||
|
Created when monitoring starts, updated when it stops.
|
||||||
|
"""
|
||||||
|
__tablename__ = "monitoring_sessions"
|
||||||
|
|
||||||
|
id = Column(String, primary_key=True, index=True) # UUID
|
||||||
|
project_id = Column(String, nullable=False, index=True) # FK to Project.id
|
||||||
|
location_id = Column(String, nullable=False, index=True) # FK to MonitoringLocation.id
|
||||||
|
unit_id = Column(String, nullable=True, index=True) # FK to RosterUnit.id (nullable for offline uploads)
|
||||||
|
|
||||||
|
# Physical device model that produced this session's data (e.g. "NL-43", "NL-53", "NL-32").
|
||||||
|
# Null for older records; report code falls back to file-content detection when null.
|
||||||
|
device_model = Column(String, nullable=True)
|
||||||
|
|
||||||
|
session_type = Column(String, nullable=False) # sound | vibration
|
||||||
|
started_at = Column(DateTime, nullable=False)
|
||||||
|
stopped_at = Column(DateTime, nullable=True)
|
||||||
|
duration_seconds = Column(Integer, nullable=True)
|
||||||
|
status = Column(String, default="recording") # recording, completed, failed
|
||||||
|
|
||||||
|
# Human-readable label auto-derived from date/location, editable by user.
|
||||||
|
# e.g. "NRL-1 — Sun 2/23 — Night"
|
||||||
|
session_label = Column(String, nullable=True)
|
||||||
|
|
||||||
|
# Period classification for report stats columns.
|
||||||
|
# weekday_day | weekday_night | weekend_day | weekend_night
|
||||||
|
period_type = Column(String, nullable=True)
|
||||||
|
|
||||||
|
# Effective monitoring window (hours 0–23). Night sessions cross midnight
|
||||||
|
# (period_end_hour < period_start_hour). NULL = no filtering applied.
|
||||||
|
# e.g. Day: start=7, end=19 Night: start=19, end=7
|
||||||
|
period_start_hour = Column(Integer, nullable=True)
|
||||||
|
period_end_hour = Column(Integer, nullable=True)
|
||||||
|
|
||||||
|
# For day sessions: the specific calendar date to use for report filtering.
|
||||||
|
# Overrides the automatic "last date with daytime rows" heuristic.
|
||||||
|
# Null = use heuristic.
|
||||||
|
report_date = Column(Date, nullable=True)
|
||||||
|
|
||||||
|
# Snapshot of device configuration at recording time
|
||||||
|
session_metadata = Column(Text, nullable=True) # JSON
|
||||||
|
|
||||||
|
created_at = Column(DateTime, default=datetime.utcnow)
|
||||||
|
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||||
|
|
||||||
|
|
||||||
|
class DataFile(Base):
|
||||||
|
"""
|
||||||
|
Data files: references to recorded data files.
|
||||||
|
Terra-View tracks file metadata; actual files stored in data/Projects/ directory.
|
||||||
|
"""
|
||||||
|
__tablename__ = "data_files"
|
||||||
|
|
||||||
|
id = Column(String, primary_key=True, index=True) # UUID
|
||||||
|
session_id = Column(String, nullable=False, index=True) # FK to MonitoringSession.id
|
||||||
|
|
||||||
|
file_path = Column(String, nullable=False) # Relative to data/Projects/
|
||||||
|
file_type = Column(String, nullable=False) # wav, csv, mseed, json
|
||||||
|
file_size_bytes = Column(Integer, nullable=True)
|
||||||
|
downloaded_at = Column(DateTime, nullable=True)
|
||||||
|
checksum = Column(String, nullable=True) # SHA256 or MD5
|
||||||
|
|
||||||
|
# Additional file metadata
|
||||||
|
file_metadata = Column(Text, nullable=True) # JSON
|
||||||
|
|
||||||
|
created_at = Column(DateTime, default=datetime.utcnow)
|
||||||
|
|
||||||
|
|
||||||
|
class ReportTemplate(Base):
|
||||||
|
"""
|
||||||
|
Report templates: saved configurations for generating Excel reports.
|
||||||
|
Allows users to save time filter presets, titles, etc. for reuse.
|
||||||
|
"""
|
||||||
|
__tablename__ = "report_templates"
|
||||||
|
|
||||||
|
id = Column(String, primary_key=True, index=True) # UUID
|
||||||
|
name = Column(String, nullable=False) # "Nighttime Report", "Full Day Report"
|
||||||
|
project_id = Column(String, nullable=True) # Optional: project-specific template
|
||||||
|
|
||||||
|
# Template settings
|
||||||
|
report_title = Column(String, default="Background Noise Study")
|
||||||
|
start_time = Column(String, nullable=True) # "19:00" format
|
||||||
|
end_time = Column(String, nullable=True) # "07:00" format
|
||||||
|
start_date = Column(String, nullable=True) # "2025-01-15" format (optional)
|
||||||
|
end_date = Column(String, nullable=True) # "2025-01-20" format (optional)
|
||||||
|
|
||||||
|
created_at = Column(DateTime, default=datetime.utcnow)
|
||||||
|
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Sound Monitoring Scheduler
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
class RecurringSchedule(Base):
|
||||||
|
"""
|
||||||
|
Recurring schedule definitions for automated sound monitoring.
|
||||||
|
|
||||||
|
Supports three schedule types:
|
||||||
|
- "weekly_calendar": Select specific days with start/end times (e.g., Mon/Wed/Fri 7pm-7am)
|
||||||
|
- "simple_interval": For 24/7 monitoring with daily stop/download/restart cycles
|
||||||
|
- "one_off": Single recording session with specific start and end date/time
|
||||||
|
"""
|
||||||
|
__tablename__ = "recurring_schedules"
|
||||||
|
|
||||||
|
id = Column(String, primary_key=True, index=True) # UUID
|
||||||
|
project_id = Column(String, nullable=False, index=True) # FK to Project.id
|
||||||
|
location_id = Column(String, nullable=False, index=True) # FK to MonitoringLocation.id
|
||||||
|
unit_id = Column(String, nullable=True, index=True) # FK to RosterUnit.id (optional, can use assignment)
|
||||||
|
|
||||||
|
name = Column(String, nullable=False) # "Weeknight Monitoring", "24/7 Continuous"
|
||||||
|
schedule_type = Column(String, nullable=False) # "weekly_calendar" | "simple_interval" | "one_off"
|
||||||
|
device_type = Column(String, nullable=False) # "slm" | "seismograph"
|
||||||
|
|
||||||
|
# Weekly Calendar fields (schedule_type = "weekly_calendar")
|
||||||
|
# JSON format: {
|
||||||
|
# "monday": {"enabled": true, "start": "19:00", "end": "07:00"},
|
||||||
|
# "tuesday": {"enabled": false},
|
||||||
|
# ...
|
||||||
|
# }
|
||||||
|
weekly_pattern = Column(Text, nullable=True)
|
||||||
|
|
||||||
|
# Simple Interval fields (schedule_type = "simple_interval")
|
||||||
|
interval_type = Column(String, nullable=True) # "daily" | "hourly"
|
||||||
|
cycle_time = Column(String, nullable=True) # "00:00" - time to run stop/download/restart
|
||||||
|
include_download = Column(Boolean, default=True) # Download data before restart
|
||||||
|
|
||||||
|
# One-Off fields (schedule_type = "one_off")
|
||||||
|
start_datetime = Column(DateTime, nullable=True) # Exact start date+time (stored as UTC)
|
||||||
|
end_datetime = Column(DateTime, nullable=True) # Exact end date+time (stored as UTC)
|
||||||
|
|
||||||
|
# Automation options (applies to all schedule types)
|
||||||
|
auto_increment_index = Column(Boolean, default=True) # Auto-increment store/index number before start
|
||||||
|
# When True: prevents "overwrite data?" prompts by using a new index each time
|
||||||
|
|
||||||
|
# Shared configuration
|
||||||
|
enabled = Column(Boolean, default=True)
|
||||||
|
timezone = Column(String, default="America/New_York")
|
||||||
|
|
||||||
|
# Tracking
|
||||||
|
last_generated_at = Column(DateTime, nullable=True) # When actions were last generated
|
||||||
|
next_occurrence = Column(DateTime, nullable=True) # Computed next action time
|
||||||
|
|
||||||
|
created_at = Column(DateTime, default=datetime.utcnow)
|
||||||
|
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||||
|
|
||||||
|
|
||||||
|
class Alert(Base):
|
||||||
|
"""
|
||||||
|
In-app alerts for device status changes and system events.
|
||||||
|
|
||||||
|
Designed for future expansion to email/webhook notifications.
|
||||||
|
Currently supports:
|
||||||
|
- device_offline: Device became unreachable
|
||||||
|
- device_online: Device came back online
|
||||||
|
- schedule_failed: Scheduled action failed to execute
|
||||||
|
- schedule_completed: Scheduled action completed successfully
|
||||||
|
"""
|
||||||
|
__tablename__ = "alerts"
|
||||||
|
|
||||||
|
id = Column(String, primary_key=True, index=True) # UUID
|
||||||
|
|
||||||
|
# Alert classification
|
||||||
|
alert_type = Column(String, nullable=False) # "device_offline" | "device_online" | "schedule_failed" | "schedule_completed"
|
||||||
|
severity = Column(String, default="warning") # "info" | "warning" | "critical"
|
||||||
|
|
||||||
|
# Related entities (nullable - may not all apply)
|
||||||
|
project_id = Column(String, nullable=True, index=True)
|
||||||
|
location_id = Column(String, nullable=True, index=True)
|
||||||
|
unit_id = Column(String, nullable=True, index=True)
|
||||||
|
schedule_id = Column(String, nullable=True) # RecurringSchedule or ScheduledAction id
|
||||||
|
|
||||||
|
# Alert content
|
||||||
|
title = Column(String, nullable=False) # "NRL-001 Device Offline"
|
||||||
|
message = Column(Text, nullable=True) # Detailed description
|
||||||
|
alert_metadata = Column(Text, nullable=True) # JSON: additional context data
|
||||||
|
|
||||||
|
# Status tracking
|
||||||
|
status = Column(String, default="active") # "active" | "acknowledged" | "resolved" | "dismissed"
|
||||||
|
acknowledged_at = Column(DateTime, nullable=True)
|
||||||
|
resolved_at = Column(DateTime, nullable=True)
|
||||||
|
|
||||||
|
created_at = Column(DateTime, default=datetime.utcnow)
|
||||||
|
expires_at = Column(DateTime, nullable=True) # Auto-dismiss after this time
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Deployment Records
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
class DeploymentRecord(Base):
|
||||||
|
"""
|
||||||
|
Deployment records: tracks each time a unit is sent to the field and returned.
|
||||||
|
|
||||||
|
Each row represents one deployment. The active deployment is the record
|
||||||
|
with actual_removal_date IS NULL. The fleet calendar uses this to show
|
||||||
|
units as "In Field" and surface their expected return date.
|
||||||
|
|
||||||
|
project_ref is a freeform string for legacy/vibration jobs like "Fay I-80".
|
||||||
|
project_id will be populated once those jobs are migrated to proper Project records.
|
||||||
|
"""
|
||||||
|
__tablename__ = "deployment_records"
|
||||||
|
|
||||||
|
id = Column(String, primary_key=True, index=True) # UUID
|
||||||
|
unit_id = Column(String, nullable=False, index=True) # FK to RosterUnit.id
|
||||||
|
|
||||||
|
deployed_date = Column(Date, nullable=True) # When unit left the yard
|
||||||
|
estimated_removal_date = Column(Date, nullable=True) # Expected return date
|
||||||
|
actual_removal_date = Column(Date, nullable=True) # Filled in when returned; NULL = still out
|
||||||
|
|
||||||
|
# Project linkage: freeform for legacy jobs, FK for proper project records
|
||||||
|
project_ref = Column(String, nullable=True) # e.g. "Fay I-80" (vibration jobs)
|
||||||
|
project_id = Column(String, nullable=True, index=True) # FK to Project.id (when available)
|
||||||
|
|
||||||
|
location_name = Column(String, nullable=True) # e.g. "North Gate", "VP-001"
|
||||||
|
notes = Column(Text, nullable=True)
|
||||||
|
|
||||||
|
created_at = Column(DateTime, default=datetime.utcnow)
|
||||||
|
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Fleet Calendar & Job Reservations
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
class JobReservation(Base):
|
||||||
|
"""
|
||||||
|
Job reservations: reserve units for future jobs/projects.
|
||||||
|
|
||||||
|
Supports two assignment modes:
|
||||||
|
- "specific": Pick exact units (SN-001, SN-002, etc.)
|
||||||
|
- "quantity": Reserve a number of units (e.g., "need 8 seismographs")
|
||||||
|
|
||||||
|
Used by the Fleet Calendar to visualize unit availability over time.
|
||||||
|
"""
|
||||||
|
__tablename__ = "job_reservations"
|
||||||
|
|
||||||
|
id = Column(String, primary_key=True, index=True) # UUID
|
||||||
|
name = Column(String, nullable=False) # "Job A - March deployment"
|
||||||
|
project_id = Column(String, nullable=True, index=True) # Optional FK to Project
|
||||||
|
|
||||||
|
# Date range for the reservation
|
||||||
|
start_date = Column(Date, nullable=False)
|
||||||
|
end_date = Column(Date, nullable=True) # Nullable = TBD / ongoing
|
||||||
|
estimated_end_date = Column(Date, nullable=True) # For planning when end is TBD
|
||||||
|
end_date_tbd = Column(Boolean, default=False) # True = end date unknown
|
||||||
|
|
||||||
|
# Assignment type: "specific" or "quantity"
|
||||||
|
assignment_type = Column(String, nullable=False, default="quantity")
|
||||||
|
|
||||||
|
# For quantity reservations
|
||||||
|
device_type = Column(String, default="seismograph") # seismograph | slm
|
||||||
|
quantity_needed = Column(Integer, nullable=True) # e.g., 8 units
|
||||||
|
estimated_units = Column(Integer, nullable=True)
|
||||||
|
|
||||||
|
# Full slot list as JSON: [{"location_name": "North Gate", "unit_id": null}, ...]
|
||||||
|
# Includes empty slots (no unit assigned yet). Filled slots are authoritative in JobReservationUnit.
|
||||||
|
location_slots = Column(Text, nullable=True)
|
||||||
|
|
||||||
|
# Metadata
|
||||||
|
notes = Column(Text, nullable=True)
|
||||||
|
color = Column(String, default="#3B82F6") # For calendar display (blue default)
|
||||||
|
|
||||||
|
created_at = Column(DateTime, default=datetime.utcnow)
|
||||||
|
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||||
|
|
||||||
|
|
||||||
|
class JobReservationUnit(Base):
|
||||||
|
"""
|
||||||
|
Links specific units to job reservations.
|
||||||
|
|
||||||
|
Used when:
|
||||||
|
- assignment_type="specific": Units are directly assigned
|
||||||
|
- assignment_type="quantity": Units can be filled in later
|
||||||
|
|
||||||
|
Supports unit swaps: same reservation can have multiple units with
|
||||||
|
different date ranges (e.g., BE17353 Feb-Jun, then BE18438 Jun-Nov).
|
||||||
|
"""
|
||||||
|
__tablename__ = "job_reservation_units"
|
||||||
|
|
||||||
|
id = Column(String, primary_key=True, index=True) # UUID
|
||||||
|
reservation_id = Column(String, nullable=False, index=True) # FK to JobReservation
|
||||||
|
unit_id = Column(String, nullable=False, index=True) # FK to RosterUnit
|
||||||
|
|
||||||
|
# Unit-specific date range (for swaps) - defaults to reservation dates if null
|
||||||
|
unit_start_date = Column(Date, nullable=True) # When this specific unit starts
|
||||||
|
unit_end_date = Column(Date, nullable=True) # When this unit ends (swap out date)
|
||||||
|
unit_end_tbd = Column(Boolean, default=False) # True = end unknown (until cal expires or job ends)
|
||||||
|
|
||||||
|
# Track how this assignment was made
|
||||||
|
assignment_source = Column(String, default="specific") # "specific" | "filled" | "swap"
|
||||||
|
assigned_at = Column(DateTime, default=datetime.utcnow)
|
||||||
|
notes = Column(Text, nullable=True) # "Replacing BE17353" etc.
|
||||||
|
|
||||||
|
# Power requirements for this deployment slot
|
||||||
|
power_type = Column(String, nullable=True) # "ac" | "solar" | None
|
||||||
|
|
||||||
|
# Location identity
|
||||||
|
location_name = Column(String, nullable=True) # e.g. "North Gate", "Main Entrance"
|
||||||
|
slot_index = Column(Integer, nullable=True) # Order within reservation (0-based)
|
||||||
|
|||||||
326
backend/routers/alerts.py
Normal file
@@ -0,0 +1,326 @@
|
|||||||
|
"""
|
||||||
|
Alerts Router
|
||||||
|
|
||||||
|
API endpoints for managing in-app alerts.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from fastapi import APIRouter, Request, Depends, HTTPException, Query
|
||||||
|
from fastapi.responses import HTMLResponse, JSONResponse
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
from typing import Optional
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
|
||||||
|
from backend.database import get_db
|
||||||
|
from backend.models import Alert, RosterUnit
|
||||||
|
from backend.services.alert_service import get_alert_service
|
||||||
|
from backend.templates_config import templates
|
||||||
|
|
||||||
|
router = APIRouter(prefix="/api/alerts", tags=["alerts"])
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Alert List and Count
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.get("/")
|
||||||
|
async def list_alerts(
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
status: Optional[str] = Query(None, description="Filter by status: active, acknowledged, resolved, dismissed"),
|
||||||
|
project_id: Optional[str] = Query(None),
|
||||||
|
unit_id: Optional[str] = Query(None),
|
||||||
|
alert_type: Optional[str] = Query(None, description="Filter by type: device_offline, device_online, schedule_failed"),
|
||||||
|
limit: int = Query(50, le=100),
|
||||||
|
offset: int = Query(0, ge=0),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
List alerts with optional filters.
|
||||||
|
"""
|
||||||
|
alert_service = get_alert_service(db)
|
||||||
|
|
||||||
|
alerts = alert_service.get_all_alerts(
|
||||||
|
status=status,
|
||||||
|
project_id=project_id,
|
||||||
|
unit_id=unit_id,
|
||||||
|
alert_type=alert_type,
|
||||||
|
limit=limit,
|
||||||
|
offset=offset,
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"alerts": [
|
||||||
|
{
|
||||||
|
"id": a.id,
|
||||||
|
"alert_type": a.alert_type,
|
||||||
|
"severity": a.severity,
|
||||||
|
"title": a.title,
|
||||||
|
"message": a.message,
|
||||||
|
"status": a.status,
|
||||||
|
"unit_id": a.unit_id,
|
||||||
|
"project_id": a.project_id,
|
||||||
|
"location_id": a.location_id,
|
||||||
|
"created_at": a.created_at.isoformat() if a.created_at else None,
|
||||||
|
"acknowledged_at": a.acknowledged_at.isoformat() if a.acknowledged_at else None,
|
||||||
|
"resolved_at": a.resolved_at.isoformat() if a.resolved_at else None,
|
||||||
|
}
|
||||||
|
for a in alerts
|
||||||
|
],
|
||||||
|
"count": len(alerts),
|
||||||
|
"limit": limit,
|
||||||
|
"offset": offset,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/active")
|
||||||
|
async def list_active_alerts(
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
project_id: Optional[str] = Query(None),
|
||||||
|
unit_id: Optional[str] = Query(None),
|
||||||
|
alert_type: Optional[str] = Query(None),
|
||||||
|
min_severity: Optional[str] = Query(None, description="Minimum severity: info, warning, critical"),
|
||||||
|
limit: int = Query(50, le=100),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
List only active alerts.
|
||||||
|
"""
|
||||||
|
alert_service = get_alert_service(db)
|
||||||
|
|
||||||
|
alerts = alert_service.get_active_alerts(
|
||||||
|
project_id=project_id,
|
||||||
|
unit_id=unit_id,
|
||||||
|
alert_type=alert_type,
|
||||||
|
min_severity=min_severity,
|
||||||
|
limit=limit,
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"alerts": [
|
||||||
|
{
|
||||||
|
"id": a.id,
|
||||||
|
"alert_type": a.alert_type,
|
||||||
|
"severity": a.severity,
|
||||||
|
"title": a.title,
|
||||||
|
"message": a.message,
|
||||||
|
"unit_id": a.unit_id,
|
||||||
|
"project_id": a.project_id,
|
||||||
|
"created_at": a.created_at.isoformat() if a.created_at else None,
|
||||||
|
}
|
||||||
|
for a in alerts
|
||||||
|
],
|
||||||
|
"count": len(alerts),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/active/count")
|
||||||
|
async def get_active_alert_count(db: Session = Depends(get_db)):
|
||||||
|
"""
|
||||||
|
Get count of active alerts (for navbar badge).
|
||||||
|
"""
|
||||||
|
alert_service = get_alert_service(db)
|
||||||
|
count = alert_service.get_active_alert_count()
|
||||||
|
return {"count": count}
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Single Alert Operations
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.get("/{alert_id}")
|
||||||
|
async def get_alert(
|
||||||
|
alert_id: str,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Get a specific alert.
|
||||||
|
"""
|
||||||
|
alert = db.query(Alert).filter_by(id=alert_id).first()
|
||||||
|
if not alert:
|
||||||
|
raise HTTPException(status_code=404, detail="Alert not found")
|
||||||
|
|
||||||
|
# Get related unit info
|
||||||
|
unit = None
|
||||||
|
if alert.unit_id:
|
||||||
|
unit = db.query(RosterUnit).filter_by(id=alert.unit_id).first()
|
||||||
|
|
||||||
|
return {
|
||||||
|
"id": alert.id,
|
||||||
|
"alert_type": alert.alert_type,
|
||||||
|
"severity": alert.severity,
|
||||||
|
"title": alert.title,
|
||||||
|
"message": alert.message,
|
||||||
|
"metadata": alert.alert_metadata,
|
||||||
|
"status": alert.status,
|
||||||
|
"unit_id": alert.unit_id,
|
||||||
|
"unit_name": unit.id if unit else None,
|
||||||
|
"project_id": alert.project_id,
|
||||||
|
"location_id": alert.location_id,
|
||||||
|
"schedule_id": alert.schedule_id,
|
||||||
|
"created_at": alert.created_at.isoformat() if alert.created_at else None,
|
||||||
|
"acknowledged_at": alert.acknowledged_at.isoformat() if alert.acknowledged_at else None,
|
||||||
|
"resolved_at": alert.resolved_at.isoformat() if alert.resolved_at else None,
|
||||||
|
"expires_at": alert.expires_at.isoformat() if alert.expires_at else None,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/{alert_id}/acknowledge")
|
||||||
|
async def acknowledge_alert(
|
||||||
|
alert_id: str,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Mark alert as acknowledged.
|
||||||
|
"""
|
||||||
|
alert_service = get_alert_service(db)
|
||||||
|
alert = alert_service.acknowledge_alert(alert_id)
|
||||||
|
|
||||||
|
if not alert:
|
||||||
|
raise HTTPException(status_code=404, detail="Alert not found")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"alert_id": alert.id,
|
||||||
|
"status": alert.status,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/{alert_id}/dismiss")
|
||||||
|
async def dismiss_alert(
|
||||||
|
alert_id: str,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Dismiss alert.
|
||||||
|
"""
|
||||||
|
alert_service = get_alert_service(db)
|
||||||
|
alert = alert_service.dismiss_alert(alert_id)
|
||||||
|
|
||||||
|
if not alert:
|
||||||
|
raise HTTPException(status_code=404, detail="Alert not found")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"alert_id": alert.id,
|
||||||
|
"status": alert.status,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/{alert_id}/resolve")
|
||||||
|
async def resolve_alert(
|
||||||
|
alert_id: str,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Manually resolve an alert.
|
||||||
|
"""
|
||||||
|
alert_service = get_alert_service(db)
|
||||||
|
alert = alert_service.resolve_alert(alert_id)
|
||||||
|
|
||||||
|
if not alert:
|
||||||
|
raise HTTPException(status_code=404, detail="Alert not found")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"alert_id": alert.id,
|
||||||
|
"status": alert.status,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# HTML Partials for HTMX
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.get("/partials/dropdown", response_class=HTMLResponse)
|
||||||
|
async def get_alert_dropdown(
|
||||||
|
request: Request,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Return HTML partial for alert dropdown in navbar.
|
||||||
|
"""
|
||||||
|
alert_service = get_alert_service(db)
|
||||||
|
alerts = alert_service.get_active_alerts(limit=10)
|
||||||
|
|
||||||
|
# Calculate relative time for each alert
|
||||||
|
now = datetime.utcnow()
|
||||||
|
alerts_data = []
|
||||||
|
for alert in alerts:
|
||||||
|
delta = now - alert.created_at
|
||||||
|
if delta.days > 0:
|
||||||
|
time_ago = f"{delta.days}d ago"
|
||||||
|
elif delta.seconds >= 3600:
|
||||||
|
time_ago = f"{delta.seconds // 3600}h ago"
|
||||||
|
elif delta.seconds >= 60:
|
||||||
|
time_ago = f"{delta.seconds // 60}m ago"
|
||||||
|
else:
|
||||||
|
time_ago = "just now"
|
||||||
|
|
||||||
|
alerts_data.append({
|
||||||
|
"alert": alert,
|
||||||
|
"time_ago": time_ago,
|
||||||
|
})
|
||||||
|
|
||||||
|
return templates.TemplateResponse("partials/alerts/alert_dropdown.html", {
|
||||||
|
"request": request,
|
||||||
|
"alerts": alerts_data,
|
||||||
|
"total_count": alert_service.get_active_alert_count(),
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/partials/list", response_class=HTMLResponse)
|
||||||
|
async def get_alert_list(
|
||||||
|
request: Request,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
status: Optional[str] = Query(None),
|
||||||
|
limit: int = Query(20),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Return HTML partial for alert list page.
|
||||||
|
"""
|
||||||
|
alert_service = get_alert_service(db)
|
||||||
|
|
||||||
|
if status:
|
||||||
|
alerts = alert_service.get_all_alerts(status=status, limit=limit)
|
||||||
|
else:
|
||||||
|
alerts = alert_service.get_all_alerts(limit=limit)
|
||||||
|
|
||||||
|
# Calculate relative time for each alert
|
||||||
|
now = datetime.utcnow()
|
||||||
|
alerts_data = []
|
||||||
|
for alert in alerts:
|
||||||
|
delta = now - alert.created_at
|
||||||
|
if delta.days > 0:
|
||||||
|
time_ago = f"{delta.days}d ago"
|
||||||
|
elif delta.seconds >= 3600:
|
||||||
|
time_ago = f"{delta.seconds // 3600}h ago"
|
||||||
|
elif delta.seconds >= 60:
|
||||||
|
time_ago = f"{delta.seconds // 60}m ago"
|
||||||
|
else:
|
||||||
|
time_ago = "just now"
|
||||||
|
|
||||||
|
alerts_data.append({
|
||||||
|
"alert": alert,
|
||||||
|
"time_ago": time_ago,
|
||||||
|
})
|
||||||
|
|
||||||
|
return templates.TemplateResponse("partials/alerts/alert_list.html", {
|
||||||
|
"request": request,
|
||||||
|
"alerts": alerts_data,
|
||||||
|
"status_filter": status,
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Cleanup
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.post("/cleanup-expired")
|
||||||
|
async def cleanup_expired_alerts(db: Session = Depends(get_db)):
|
||||||
|
"""
|
||||||
|
Cleanup expired alerts (admin/maintenance endpoint).
|
||||||
|
"""
|
||||||
|
alert_service = get_alert_service(db)
|
||||||
|
count = alert_service.cleanup_expired_alerts()
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"cleaned_up": count,
|
||||||
|
}
|
||||||
@@ -1,10 +1,15 @@
|
|||||||
from fastapi import APIRouter, Request, Depends
|
from fastapi import APIRouter, Request, Depends
|
||||||
from fastapi.templating import Jinja2Templates
|
from sqlalchemy.orm import Session
|
||||||
|
from sqlalchemy import and_
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
|
||||||
|
from backend.database import get_db
|
||||||
|
from backend.models import ScheduledAction, MonitoringLocation, Project
|
||||||
from backend.services.snapshot import emit_status_snapshot
|
from backend.services.snapshot import emit_status_snapshot
|
||||||
|
from backend.templates_config import templates
|
||||||
|
from backend.utils.timezone import utc_to_local, local_to_utc, get_user_timezone
|
||||||
|
|
||||||
router = APIRouter()
|
router = APIRouter()
|
||||||
templates = Jinja2Templates(directory="templates")
|
|
||||||
|
|
||||||
|
|
||||||
@router.get("/dashboard/active")
|
@router.get("/dashboard/active")
|
||||||
@@ -23,3 +28,79 @@ def dashboard_benched(request: Request):
|
|||||||
"partials/benched_table.html",
|
"partials/benched_table.html",
|
||||||
{"request": request, "units": snapshot["benched"]}
|
{"request": request, "units": snapshot["benched"]}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/dashboard/todays-actions")
|
||||||
|
def dashboard_todays_actions(request: Request, db: Session = Depends(get_db)):
|
||||||
|
"""
|
||||||
|
Get today's scheduled actions for the dashboard card.
|
||||||
|
Shows upcoming, completed, and failed actions for today.
|
||||||
|
"""
|
||||||
|
import json
|
||||||
|
from zoneinfo import ZoneInfo
|
||||||
|
|
||||||
|
# Get today's date range in local timezone
|
||||||
|
tz = ZoneInfo(get_user_timezone())
|
||||||
|
now_local = datetime.now(tz)
|
||||||
|
today_start_local = now_local.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||||
|
today_end_local = today_start_local + timedelta(days=1)
|
||||||
|
|
||||||
|
# Convert to UTC for database query
|
||||||
|
today_start_utc = today_start_local.astimezone(ZoneInfo("UTC")).replace(tzinfo=None)
|
||||||
|
today_end_utc = today_end_local.astimezone(ZoneInfo("UTC")).replace(tzinfo=None)
|
||||||
|
|
||||||
|
# Exclude actions from paused/removed projects
|
||||||
|
paused_project_ids = [
|
||||||
|
p.id for p in db.query(Project.id).filter(
|
||||||
|
Project.status.in_(["on_hold", "archived", "deleted"])
|
||||||
|
).all()
|
||||||
|
]
|
||||||
|
|
||||||
|
# Query today's actions
|
||||||
|
actions = db.query(ScheduledAction).filter(
|
||||||
|
ScheduledAction.scheduled_time >= today_start_utc,
|
||||||
|
ScheduledAction.scheduled_time < today_end_utc,
|
||||||
|
ScheduledAction.project_id.notin_(paused_project_ids),
|
||||||
|
).order_by(ScheduledAction.scheduled_time.asc()).all()
|
||||||
|
|
||||||
|
# Enrich with location/project info and parse results
|
||||||
|
enriched_actions = []
|
||||||
|
for action in actions:
|
||||||
|
location = None
|
||||||
|
project = None
|
||||||
|
if action.location_id:
|
||||||
|
location = db.query(MonitoringLocation).filter_by(id=action.location_id).first()
|
||||||
|
if action.project_id:
|
||||||
|
project = db.query(Project).filter_by(id=action.project_id).first()
|
||||||
|
|
||||||
|
# Parse module_response for result details
|
||||||
|
result_data = None
|
||||||
|
if action.module_response:
|
||||||
|
try:
|
||||||
|
result_data = json.loads(action.module_response)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
enriched_actions.append({
|
||||||
|
"action": action,
|
||||||
|
"location": location,
|
||||||
|
"project": project,
|
||||||
|
"result": result_data,
|
||||||
|
})
|
||||||
|
|
||||||
|
# Count by status
|
||||||
|
pending_count = sum(1 for a in actions if a.execution_status == "pending")
|
||||||
|
completed_count = sum(1 for a in actions if a.execution_status == "completed")
|
||||||
|
failed_count = sum(1 for a in actions if a.execution_status == "failed")
|
||||||
|
|
||||||
|
return templates.TemplateResponse(
|
||||||
|
"partials/dashboard/todays_actions.html",
|
||||||
|
{
|
||||||
|
"request": request,
|
||||||
|
"actions": enriched_actions,
|
||||||
|
"pending_count": pending_count,
|
||||||
|
"completed_count": completed_count,
|
||||||
|
"failed_count": failed_count,
|
||||||
|
"total_count": len(actions),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|||||||
154
backend/routers/deployments.py
Normal file
@@ -0,0 +1,154 @@
|
|||||||
|
from fastapi import APIRouter, Depends, HTTPException
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
from datetime import datetime, date
|
||||||
|
from typing import Optional
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from backend.database import get_db
|
||||||
|
from backend.models import DeploymentRecord, RosterUnit
|
||||||
|
|
||||||
|
router = APIRouter(prefix="/api", tags=["deployments"])
|
||||||
|
|
||||||
|
|
||||||
|
def _serialize(record: DeploymentRecord) -> dict:
|
||||||
|
return {
|
||||||
|
"id": record.id,
|
||||||
|
"unit_id": record.unit_id,
|
||||||
|
"deployed_date": record.deployed_date.isoformat() if record.deployed_date else None,
|
||||||
|
"estimated_removal_date": record.estimated_removal_date.isoformat() if record.estimated_removal_date else None,
|
||||||
|
"actual_removal_date": record.actual_removal_date.isoformat() if record.actual_removal_date else None,
|
||||||
|
"project_ref": record.project_ref,
|
||||||
|
"project_id": record.project_id,
|
||||||
|
"location_name": record.location_name,
|
||||||
|
"notes": record.notes,
|
||||||
|
"created_at": record.created_at.isoformat() if record.created_at else None,
|
||||||
|
"updated_at": record.updated_at.isoformat() if record.updated_at else None,
|
||||||
|
"is_active": record.actual_removal_date is None,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/deployments/{unit_id}")
|
||||||
|
def get_deployments(unit_id: str, db: Session = Depends(get_db)):
|
||||||
|
"""Get all deployment records for a unit, newest first."""
|
||||||
|
unit = db.query(RosterUnit).filter_by(id=unit_id).first()
|
||||||
|
if not unit:
|
||||||
|
raise HTTPException(status_code=404, detail=f"Unit {unit_id} not found")
|
||||||
|
|
||||||
|
records = (
|
||||||
|
db.query(DeploymentRecord)
|
||||||
|
.filter_by(unit_id=unit_id)
|
||||||
|
.order_by(DeploymentRecord.deployed_date.desc(), DeploymentRecord.created_at.desc())
|
||||||
|
.all()
|
||||||
|
)
|
||||||
|
return {"deployments": [_serialize(r) for r in records]}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/deployments/{unit_id}/active")
|
||||||
|
def get_active_deployment(unit_id: str, db: Session = Depends(get_db)):
|
||||||
|
"""Get the current active deployment (actual_removal_date is NULL), or null."""
|
||||||
|
record = (
|
||||||
|
db.query(DeploymentRecord)
|
||||||
|
.filter(
|
||||||
|
DeploymentRecord.unit_id == unit_id,
|
||||||
|
DeploymentRecord.actual_removal_date == None
|
||||||
|
)
|
||||||
|
.order_by(DeploymentRecord.created_at.desc())
|
||||||
|
.first()
|
||||||
|
)
|
||||||
|
return {"deployment": _serialize(record) if record else None}
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/deployments/{unit_id}")
|
||||||
|
def create_deployment(unit_id: str, payload: dict, db: Session = Depends(get_db)):
|
||||||
|
"""
|
||||||
|
Create a new deployment record for a unit.
|
||||||
|
|
||||||
|
Body fields (all optional):
|
||||||
|
deployed_date (YYYY-MM-DD)
|
||||||
|
estimated_removal_date (YYYY-MM-DD)
|
||||||
|
project_ref (freeform string)
|
||||||
|
project_id (UUID if linked to Project)
|
||||||
|
location_name
|
||||||
|
notes
|
||||||
|
"""
|
||||||
|
unit = db.query(RosterUnit).filter_by(id=unit_id).first()
|
||||||
|
if not unit:
|
||||||
|
raise HTTPException(status_code=404, detail=f"Unit {unit_id} not found")
|
||||||
|
|
||||||
|
def parse_date(val) -> Optional[date]:
|
||||||
|
if not val:
|
||||||
|
return None
|
||||||
|
if isinstance(val, date):
|
||||||
|
return val
|
||||||
|
return date.fromisoformat(str(val))
|
||||||
|
|
||||||
|
record = DeploymentRecord(
|
||||||
|
id=str(uuid.uuid4()),
|
||||||
|
unit_id=unit_id,
|
||||||
|
deployed_date=parse_date(payload.get("deployed_date")),
|
||||||
|
estimated_removal_date=parse_date(payload.get("estimated_removal_date")),
|
||||||
|
actual_removal_date=None,
|
||||||
|
project_ref=payload.get("project_ref"),
|
||||||
|
project_id=payload.get("project_id"),
|
||||||
|
location_name=payload.get("location_name"),
|
||||||
|
notes=payload.get("notes"),
|
||||||
|
created_at=datetime.utcnow(),
|
||||||
|
updated_at=datetime.utcnow(),
|
||||||
|
)
|
||||||
|
db.add(record)
|
||||||
|
db.commit()
|
||||||
|
db.refresh(record)
|
||||||
|
return _serialize(record)
|
||||||
|
|
||||||
|
|
||||||
|
@router.put("/deployments/{unit_id}/{deployment_id}")
|
||||||
|
def update_deployment(unit_id: str, deployment_id: str, payload: dict, db: Session = Depends(get_db)):
|
||||||
|
"""
|
||||||
|
Update a deployment record. Used for:
|
||||||
|
- Setting/changing estimated_removal_date
|
||||||
|
- Closing a deployment (set actual_removal_date to mark unit returned)
|
||||||
|
- Editing project_ref, location_name, notes
|
||||||
|
"""
|
||||||
|
record = db.query(DeploymentRecord).filter_by(id=deployment_id, unit_id=unit_id).first()
|
||||||
|
if not record:
|
||||||
|
raise HTTPException(status_code=404, detail="Deployment record not found")
|
||||||
|
|
||||||
|
def parse_date(val) -> Optional[date]:
|
||||||
|
if val is None:
|
||||||
|
return None
|
||||||
|
if val == "":
|
||||||
|
return None
|
||||||
|
if isinstance(val, date):
|
||||||
|
return val
|
||||||
|
return date.fromisoformat(str(val))
|
||||||
|
|
||||||
|
if "deployed_date" in payload:
|
||||||
|
record.deployed_date = parse_date(payload["deployed_date"])
|
||||||
|
if "estimated_removal_date" in payload:
|
||||||
|
record.estimated_removal_date = parse_date(payload["estimated_removal_date"])
|
||||||
|
if "actual_removal_date" in payload:
|
||||||
|
record.actual_removal_date = parse_date(payload["actual_removal_date"])
|
||||||
|
if "project_ref" in payload:
|
||||||
|
record.project_ref = payload["project_ref"]
|
||||||
|
if "project_id" in payload:
|
||||||
|
record.project_id = payload["project_id"]
|
||||||
|
if "location_name" in payload:
|
||||||
|
record.location_name = payload["location_name"]
|
||||||
|
if "notes" in payload:
|
||||||
|
record.notes = payload["notes"]
|
||||||
|
|
||||||
|
record.updated_at = datetime.utcnow()
|
||||||
|
db.commit()
|
||||||
|
db.refresh(record)
|
||||||
|
return _serialize(record)
|
||||||
|
|
||||||
|
|
||||||
|
@router.delete("/deployments/{unit_id}/{deployment_id}")
|
||||||
|
def delete_deployment(unit_id: str, deployment_id: str, db: Session = Depends(get_db)):
|
||||||
|
"""Delete a deployment record."""
|
||||||
|
record = db.query(DeploymentRecord).filter_by(id=deployment_id, unit_id=unit_id).first()
|
||||||
|
if not record:
|
||||||
|
raise HTTPException(status_code=404, detail="Deployment record not found")
|
||||||
|
db.delete(record)
|
||||||
|
db.commit()
|
||||||
|
return {"ok": True}
|
||||||
928
backend/routers/fleet_calendar.py
Normal file
@@ -0,0 +1,928 @@
|
|||||||
|
"""
|
||||||
|
Fleet Calendar Router
|
||||||
|
|
||||||
|
API endpoints for the Fleet Calendar feature:
|
||||||
|
- Calendar page and data
|
||||||
|
- Job reservation CRUD
|
||||||
|
- Unit assignment management
|
||||||
|
- Availability checking
|
||||||
|
"""
|
||||||
|
|
||||||
|
from fastapi import APIRouter, Request, Depends, HTTPException, Query
|
||||||
|
from fastapi.responses import HTMLResponse, JSONResponse
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
from datetime import datetime, date, timedelta
|
||||||
|
from typing import Optional, List
|
||||||
|
import uuid
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from backend.database import get_db
|
||||||
|
from backend.models import (
|
||||||
|
RosterUnit, JobReservation, JobReservationUnit,
|
||||||
|
UserPreferences, Project, MonitoringLocation, UnitAssignment
|
||||||
|
)
|
||||||
|
from backend.templates_config import templates
|
||||||
|
from backend.services.fleet_calendar_service import (
|
||||||
|
get_day_summary,
|
||||||
|
get_calendar_year_data,
|
||||||
|
get_rolling_calendar_data,
|
||||||
|
check_calibration_conflicts,
|
||||||
|
get_available_units_for_period,
|
||||||
|
get_calibration_status
|
||||||
|
)
|
||||||
|
|
||||||
|
router = APIRouter(tags=["fleet-calendar"])
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Calendar Page
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.get("/fleet-calendar", response_class=HTMLResponse)
|
||||||
|
async def fleet_calendar_page(
|
||||||
|
request: Request,
|
||||||
|
year: Optional[int] = None,
|
||||||
|
month: Optional[int] = None,
|
||||||
|
device_type: str = "seismograph",
|
||||||
|
db: Session = Depends(get_db)
|
||||||
|
):
|
||||||
|
"""Main Fleet Calendar page with rolling 12-month view."""
|
||||||
|
today = date.today()
|
||||||
|
|
||||||
|
# Default to current month as the start
|
||||||
|
if year is None:
|
||||||
|
year = today.year
|
||||||
|
if month is None:
|
||||||
|
month = today.month
|
||||||
|
|
||||||
|
# Get calendar data for 12 months starting from year/month
|
||||||
|
calendar_data = get_rolling_calendar_data(db, year, month, device_type)
|
||||||
|
|
||||||
|
# Get projects for the reservation form dropdown
|
||||||
|
projects = db.query(Project).filter(
|
||||||
|
Project.status.in_(["active", "upcoming", "on_hold"])
|
||||||
|
).order_by(Project.name).all()
|
||||||
|
|
||||||
|
# Build a serializable list of items with dates for calendar bars
|
||||||
|
# Includes both tracked Projects (with dates) and Job Reservations (matching device_type)
|
||||||
|
project_colors = ['#3B82F6', '#10B981', '#F59E0B', '#EF4444', '#8B5CF6', '#EC4899', '#06B6D4', '#F97316']
|
||||||
|
# Map calendar device_type to project_type_ids
|
||||||
|
device_type_to_project_types = {
|
||||||
|
"seismograph": ["vibration_monitoring", "combined"],
|
||||||
|
"slm": ["sound_monitoring", "combined"],
|
||||||
|
}
|
||||||
|
relevant_project_types = device_type_to_project_types.get(device_type, [])
|
||||||
|
|
||||||
|
calendar_projects = []
|
||||||
|
for i, p in enumerate(projects):
|
||||||
|
if p.start_date and p.project_type_id in relevant_project_types:
|
||||||
|
calendar_projects.append({
|
||||||
|
"id": p.id,
|
||||||
|
"name": p.name,
|
||||||
|
"start_date": p.start_date.isoformat(),
|
||||||
|
"end_date": p.end_date.isoformat() if p.end_date else None,
|
||||||
|
"color": project_colors[i % len(project_colors)],
|
||||||
|
"confirmed": True,
|
||||||
|
})
|
||||||
|
|
||||||
|
# Add job reservations for this device_type as bars
|
||||||
|
from sqlalchemy import or_ as _or
|
||||||
|
cal_window_end = date(year + ((month + 10) // 12), ((month + 10) % 12) + 1, 1)
|
||||||
|
reservations_for_cal = db.query(JobReservation).filter(
|
||||||
|
JobReservation.device_type == device_type,
|
||||||
|
JobReservation.start_date <= cal_window_end,
|
||||||
|
_or(
|
||||||
|
JobReservation.end_date >= date(year, month, 1),
|
||||||
|
JobReservation.end_date == None,
|
||||||
|
)
|
||||||
|
).all()
|
||||||
|
for res in reservations_for_cal:
|
||||||
|
end = res.end_date or res.estimated_end_date
|
||||||
|
calendar_projects.append({
|
||||||
|
"id": res.id,
|
||||||
|
"name": res.name,
|
||||||
|
"start_date": res.start_date.isoformat(),
|
||||||
|
"end_date": end.isoformat() if end else None,
|
||||||
|
"color": res.color,
|
||||||
|
"confirmed": bool(res.project_id),
|
||||||
|
})
|
||||||
|
|
||||||
|
# Calculate prev/next month navigation
|
||||||
|
prev_year, prev_month = (year - 1, 12) if month == 1 else (year, month - 1)
|
||||||
|
next_year, next_month = (year + 1, 1) if month == 12 else (year, month + 1)
|
||||||
|
|
||||||
|
return templates.TemplateResponse(
|
||||||
|
"fleet_calendar.html",
|
||||||
|
{
|
||||||
|
"request": request,
|
||||||
|
"start_year": year,
|
||||||
|
"start_month": month,
|
||||||
|
"prev_year": prev_year,
|
||||||
|
"prev_month": prev_month,
|
||||||
|
"next_year": next_year,
|
||||||
|
"next_month": next_month,
|
||||||
|
"device_type": device_type,
|
||||||
|
"calendar_data": calendar_data,
|
||||||
|
"projects": projects,
|
||||||
|
"calendar_projects": calendar_projects,
|
||||||
|
"today": today.isoformat()
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Calendar Data API
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.get("/api/fleet-calendar/data", response_class=JSONResponse)
|
||||||
|
async def get_calendar_data(
|
||||||
|
year: int,
|
||||||
|
device_type: str = "seismograph",
|
||||||
|
db: Session = Depends(get_db)
|
||||||
|
):
|
||||||
|
"""Get calendar data for a specific year."""
|
||||||
|
return get_calendar_year_data(db, year, device_type)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/api/fleet-calendar/day/{date_str}", response_class=HTMLResponse)
|
||||||
|
async def get_day_detail(
|
||||||
|
request: Request,
|
||||||
|
date_str: str,
|
||||||
|
device_type: str = "seismograph",
|
||||||
|
db: Session = Depends(get_db)
|
||||||
|
):
|
||||||
|
"""Get detailed view for a specific day (HTMX partial)."""
|
||||||
|
try:
|
||||||
|
check_date = date.fromisoformat(date_str)
|
||||||
|
except ValueError:
|
||||||
|
raise HTTPException(status_code=400, detail="Invalid date format. Use YYYY-MM-DD")
|
||||||
|
|
||||||
|
day_data = get_day_summary(db, check_date, device_type)
|
||||||
|
|
||||||
|
# Get projects for display names
|
||||||
|
projects = {p.id: p for p in db.query(Project).all()}
|
||||||
|
|
||||||
|
return templates.TemplateResponse(
|
||||||
|
"partials/fleet_calendar/day_detail.html",
|
||||||
|
{
|
||||||
|
"request": request,
|
||||||
|
"day_data": day_data,
|
||||||
|
"date_str": date_str,
|
||||||
|
"date_display": check_date.strftime("%B %d, %Y"),
|
||||||
|
"device_type": device_type,
|
||||||
|
"projects": projects
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Reservation CRUD
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.post("/api/fleet-calendar/reservations", response_class=JSONResponse)
|
||||||
|
async def create_reservation(
|
||||||
|
request: Request,
|
||||||
|
db: Session = Depends(get_db)
|
||||||
|
):
|
||||||
|
"""Create a new job reservation."""
|
||||||
|
data = await request.json()
|
||||||
|
|
||||||
|
# Validate required fields
|
||||||
|
required = ["name", "start_date", "assignment_type"]
|
||||||
|
for field in required:
|
||||||
|
if field not in data:
|
||||||
|
raise HTTPException(status_code=400, detail=f"Missing required field: {field}")
|
||||||
|
|
||||||
|
# Need either end_date or end_date_tbd
|
||||||
|
end_date_tbd = data.get("end_date_tbd", False)
|
||||||
|
if not end_date_tbd and not data.get("end_date"):
|
||||||
|
raise HTTPException(status_code=400, detail="End date is required unless marked as TBD")
|
||||||
|
|
||||||
|
try:
|
||||||
|
start_date = date.fromisoformat(data["start_date"])
|
||||||
|
end_date = date.fromisoformat(data["end_date"]) if data.get("end_date") else None
|
||||||
|
estimated_end_date = date.fromisoformat(data["estimated_end_date"]) if data.get("estimated_end_date") else None
|
||||||
|
except ValueError:
|
||||||
|
raise HTTPException(status_code=400, detail="Invalid date format. Use YYYY-MM-DD")
|
||||||
|
|
||||||
|
if end_date and end_date < start_date:
|
||||||
|
raise HTTPException(status_code=400, detail="End date must be after start date")
|
||||||
|
|
||||||
|
if estimated_end_date and estimated_end_date < start_date:
|
||||||
|
raise HTTPException(status_code=400, detail="Estimated end date must be after start date")
|
||||||
|
|
||||||
|
import json as _json
|
||||||
|
reservation = JobReservation(
|
||||||
|
id=str(uuid.uuid4()),
|
||||||
|
name=data["name"],
|
||||||
|
project_id=data.get("project_id"),
|
||||||
|
start_date=start_date,
|
||||||
|
end_date=end_date,
|
||||||
|
estimated_end_date=estimated_end_date,
|
||||||
|
end_date_tbd=end_date_tbd,
|
||||||
|
assignment_type=data["assignment_type"],
|
||||||
|
device_type=data.get("device_type", "seismograph"),
|
||||||
|
quantity_needed=data.get("quantity_needed"),
|
||||||
|
estimated_units=data.get("estimated_units"),
|
||||||
|
location_slots=_json.dumps(data["location_slots"]) if data.get("location_slots") is not None else None,
|
||||||
|
notes=data.get("notes"),
|
||||||
|
color=data.get("color", "#3B82F6")
|
||||||
|
)
|
||||||
|
|
||||||
|
db.add(reservation)
|
||||||
|
|
||||||
|
# If specific units were provided, assign them
|
||||||
|
if data.get("unit_ids") and data["assignment_type"] == "specific":
|
||||||
|
for unit_id in data["unit_ids"]:
|
||||||
|
assignment = JobReservationUnit(
|
||||||
|
id=str(uuid.uuid4()),
|
||||||
|
reservation_id=reservation.id,
|
||||||
|
unit_id=unit_id,
|
||||||
|
assignment_source="specific"
|
||||||
|
)
|
||||||
|
db.add(assignment)
|
||||||
|
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
logger.info(f"Created reservation: {reservation.name} ({reservation.id})")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"reservation_id": reservation.id,
|
||||||
|
"message": f"Created reservation: {reservation.name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/api/fleet-calendar/reservations/{reservation_id}", response_class=JSONResponse)
|
||||||
|
async def get_reservation(
|
||||||
|
reservation_id: str,
|
||||||
|
db: Session = Depends(get_db)
|
||||||
|
):
|
||||||
|
"""Get a specific reservation with its assigned units."""
|
||||||
|
reservation = db.query(JobReservation).filter_by(id=reservation_id).first()
|
||||||
|
if not reservation:
|
||||||
|
raise HTTPException(status_code=404, detail="Reservation not found")
|
||||||
|
|
||||||
|
# Get assigned units
|
||||||
|
assignments = db.query(JobReservationUnit).filter_by(
|
||||||
|
reservation_id=reservation_id
|
||||||
|
).all()
|
||||||
|
|
||||||
|
# Sort assignments by slot_index so order is preserved
|
||||||
|
assignments_sorted = sorted(assignments, key=lambda a: (a.slot_index if a.slot_index is not None else 999))
|
||||||
|
unit_ids = [a.unit_id for a in assignments_sorted]
|
||||||
|
units = db.query(RosterUnit).filter(RosterUnit.id.in_(unit_ids)).all() if unit_ids else []
|
||||||
|
units_by_id = {u.id: u for u in units}
|
||||||
|
# Build per-unit lookups from assignments
|
||||||
|
assignment_map = {a.unit_id: a for a in assignments_sorted}
|
||||||
|
|
||||||
|
import json as _json
|
||||||
|
stored_slots = _json.loads(reservation.location_slots) if reservation.location_slots else None
|
||||||
|
|
||||||
|
return {
|
||||||
|
"id": reservation.id,
|
||||||
|
"name": reservation.name,
|
||||||
|
"project_id": reservation.project_id,
|
||||||
|
"start_date": reservation.start_date.isoformat(),
|
||||||
|
"end_date": reservation.end_date.isoformat() if reservation.end_date else None,
|
||||||
|
"estimated_end_date": reservation.estimated_end_date.isoformat() if reservation.estimated_end_date else None,
|
||||||
|
"end_date_tbd": reservation.end_date_tbd,
|
||||||
|
"assignment_type": reservation.assignment_type,
|
||||||
|
"device_type": reservation.device_type,
|
||||||
|
"quantity_needed": reservation.quantity_needed,
|
||||||
|
"estimated_units": reservation.estimated_units,
|
||||||
|
"location_slots": stored_slots,
|
||||||
|
"notes": reservation.notes,
|
||||||
|
"color": reservation.color,
|
||||||
|
"assigned_units": [
|
||||||
|
{
|
||||||
|
"id": uid,
|
||||||
|
"last_calibrated": units_by_id[uid].last_calibrated.isoformat() if uid in units_by_id and units_by_id[uid].last_calibrated else None,
|
||||||
|
"deployed": units_by_id[uid].deployed if uid in units_by_id else False,
|
||||||
|
"power_type": assignment_map[uid].power_type,
|
||||||
|
"notes": assignment_map[uid].notes,
|
||||||
|
"location_name": assignment_map[uid].location_name,
|
||||||
|
"slot_index": assignment_map[uid].slot_index,
|
||||||
|
}
|
||||||
|
for uid in unit_ids
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.put("/api/fleet-calendar/reservations/{reservation_id}", response_class=JSONResponse)
|
||||||
|
async def update_reservation(
|
||||||
|
reservation_id: str,
|
||||||
|
request: Request,
|
||||||
|
db: Session = Depends(get_db)
|
||||||
|
):
|
||||||
|
"""Update an existing reservation."""
|
||||||
|
reservation = db.query(JobReservation).filter_by(id=reservation_id).first()
|
||||||
|
if not reservation:
|
||||||
|
raise HTTPException(status_code=404, detail="Reservation not found")
|
||||||
|
|
||||||
|
data = await request.json()
|
||||||
|
|
||||||
|
# Update fields if provided
|
||||||
|
if "name" in data:
|
||||||
|
reservation.name = data["name"]
|
||||||
|
if "project_id" in data:
|
||||||
|
reservation.project_id = data["project_id"]
|
||||||
|
if "start_date" in data:
|
||||||
|
reservation.start_date = date.fromisoformat(data["start_date"])
|
||||||
|
if "end_date" in data:
|
||||||
|
reservation.end_date = date.fromisoformat(data["end_date"]) if data["end_date"] else None
|
||||||
|
if "estimated_end_date" in data:
|
||||||
|
reservation.estimated_end_date = date.fromisoformat(data["estimated_end_date"]) if data["estimated_end_date"] else None
|
||||||
|
if "end_date_tbd" in data:
|
||||||
|
reservation.end_date_tbd = data["end_date_tbd"]
|
||||||
|
if "assignment_type" in data:
|
||||||
|
reservation.assignment_type = data["assignment_type"]
|
||||||
|
if "quantity_needed" in data:
|
||||||
|
reservation.quantity_needed = data["quantity_needed"]
|
||||||
|
if "estimated_units" in data:
|
||||||
|
reservation.estimated_units = data["estimated_units"]
|
||||||
|
if "location_slots" in data:
|
||||||
|
import json as _json
|
||||||
|
reservation.location_slots = _json.dumps(data["location_slots"]) if data["location_slots"] is not None else None
|
||||||
|
if "notes" in data:
|
||||||
|
reservation.notes = data["notes"]
|
||||||
|
if "color" in data:
|
||||||
|
reservation.color = data["color"]
|
||||||
|
|
||||||
|
reservation.updated_at = datetime.utcnow()
|
||||||
|
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
logger.info(f"Updated reservation: {reservation.name} ({reservation.id})")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"message": f"Updated reservation: {reservation.name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.delete("/api/fleet-calendar/reservations/{reservation_id}", response_class=JSONResponse)
|
||||||
|
async def delete_reservation(
|
||||||
|
reservation_id: str,
|
||||||
|
db: Session = Depends(get_db)
|
||||||
|
):
|
||||||
|
"""Delete a reservation and its unit assignments."""
|
||||||
|
reservation = db.query(JobReservation).filter_by(id=reservation_id).first()
|
||||||
|
if not reservation:
|
||||||
|
raise HTTPException(status_code=404, detail="Reservation not found")
|
||||||
|
|
||||||
|
# Delete unit assignments first
|
||||||
|
db.query(JobReservationUnit).filter_by(reservation_id=reservation_id).delete()
|
||||||
|
|
||||||
|
# Delete the reservation
|
||||||
|
db.delete(reservation)
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
logger.info(f"Deleted reservation: {reservation.name} ({reservation_id})")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"message": "Reservation deleted"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Unit Assignment
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.post("/api/fleet-calendar/reservations/{reservation_id}/assign-units", response_class=JSONResponse)
|
||||||
|
async def assign_units_to_reservation(
|
||||||
|
reservation_id: str,
|
||||||
|
request: Request,
|
||||||
|
db: Session = Depends(get_db)
|
||||||
|
):
|
||||||
|
"""Assign specific units to a reservation."""
|
||||||
|
reservation = db.query(JobReservation).filter_by(id=reservation_id).first()
|
||||||
|
if not reservation:
|
||||||
|
raise HTTPException(status_code=404, detail="Reservation not found")
|
||||||
|
|
||||||
|
data = await request.json()
|
||||||
|
unit_ids = data.get("unit_ids", [])
|
||||||
|
# Optional per-unit dicts keyed by unit_id
|
||||||
|
power_types = data.get("power_types", {})
|
||||||
|
location_notes = data.get("location_notes", {})
|
||||||
|
location_names = data.get("location_names", {})
|
||||||
|
# slot_indices: {"BE17354": 0, "BE9441": 1, ...}
|
||||||
|
slot_indices = data.get("slot_indices", {})
|
||||||
|
|
||||||
|
# Verify units exist (allow empty list to clear all assignments)
|
||||||
|
if unit_ids:
|
||||||
|
units = db.query(RosterUnit).filter(RosterUnit.id.in_(unit_ids)).all()
|
||||||
|
found_ids = {u.id for u in units}
|
||||||
|
missing = set(unit_ids) - found_ids
|
||||||
|
if missing:
|
||||||
|
raise HTTPException(status_code=404, detail=f"Units not found: {', '.join(missing)}")
|
||||||
|
|
||||||
|
# Full replace: delete all existing assignments for this reservation first
|
||||||
|
db.query(JobReservationUnit).filter_by(reservation_id=reservation_id).delete()
|
||||||
|
db.flush()
|
||||||
|
|
||||||
|
# Check for conflicts with other reservations and insert new assignments
|
||||||
|
conflicts = []
|
||||||
|
for unit_id in unit_ids:
|
||||||
|
# Check overlapping reservations
|
||||||
|
if reservation.end_date:
|
||||||
|
overlapping = db.query(JobReservation).join(
|
||||||
|
JobReservationUnit, JobReservation.id == JobReservationUnit.reservation_id
|
||||||
|
).filter(
|
||||||
|
JobReservationUnit.unit_id == unit_id,
|
||||||
|
JobReservation.id != reservation_id,
|
||||||
|
JobReservation.start_date <= reservation.end_date,
|
||||||
|
JobReservation.end_date >= reservation.start_date
|
||||||
|
).first()
|
||||||
|
|
||||||
|
if overlapping:
|
||||||
|
conflicts.append({
|
||||||
|
"unit_id": unit_id,
|
||||||
|
"conflict_reservation": overlapping.name,
|
||||||
|
"conflict_dates": f"{overlapping.start_date} - {overlapping.end_date}"
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Add assignment
|
||||||
|
assignment = JobReservationUnit(
|
||||||
|
id=str(uuid.uuid4()),
|
||||||
|
reservation_id=reservation_id,
|
||||||
|
unit_id=unit_id,
|
||||||
|
assignment_source="filled" if reservation.assignment_type == "quantity" else "specific",
|
||||||
|
power_type=power_types.get(unit_id),
|
||||||
|
notes=location_notes.get(unit_id),
|
||||||
|
location_name=location_names.get(unit_id),
|
||||||
|
slot_index=slot_indices.get(unit_id),
|
||||||
|
)
|
||||||
|
db.add(assignment)
|
||||||
|
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
# Check for calibration conflicts
|
||||||
|
cal_conflicts = check_calibration_conflicts(db, reservation_id)
|
||||||
|
|
||||||
|
assigned_count = db.query(JobReservationUnit).filter_by(
|
||||||
|
reservation_id=reservation_id
|
||||||
|
).count()
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"assigned_count": assigned_count,
|
||||||
|
"conflicts": conflicts,
|
||||||
|
"calibration_warnings": cal_conflicts,
|
||||||
|
"message": f"Assigned {len(unit_ids) - len(conflicts)} units"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.delete("/api/fleet-calendar/reservations/{reservation_id}/units/{unit_id}", response_class=JSONResponse)
|
||||||
|
async def remove_unit_from_reservation(
|
||||||
|
reservation_id: str,
|
||||||
|
unit_id: str,
|
||||||
|
db: Session = Depends(get_db)
|
||||||
|
):
|
||||||
|
"""Remove a unit from a reservation."""
|
||||||
|
assignment = db.query(JobReservationUnit).filter_by(
|
||||||
|
reservation_id=reservation_id,
|
||||||
|
unit_id=unit_id
|
||||||
|
).first()
|
||||||
|
|
||||||
|
if not assignment:
|
||||||
|
raise HTTPException(status_code=404, detail="Unit assignment not found")
|
||||||
|
|
||||||
|
db.delete(assignment)
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"message": f"Removed {unit_id} from reservation"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Availability & Conflicts
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.get("/api/fleet-calendar/availability", response_class=JSONResponse)
|
||||||
|
async def check_availability(
|
||||||
|
start_date: str,
|
||||||
|
end_date: str,
|
||||||
|
device_type: str = "seismograph",
|
||||||
|
exclude_reservation_id: Optional[str] = None,
|
||||||
|
db: Session = Depends(get_db)
|
||||||
|
):
|
||||||
|
"""Get units available for a specific date range."""
|
||||||
|
try:
|
||||||
|
start = date.fromisoformat(start_date)
|
||||||
|
end = date.fromisoformat(end_date)
|
||||||
|
except ValueError:
|
||||||
|
raise HTTPException(status_code=400, detail="Invalid date format. Use YYYY-MM-DD")
|
||||||
|
|
||||||
|
available = get_available_units_for_period(
|
||||||
|
db, start, end, device_type, exclude_reservation_id
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"start_date": start_date,
|
||||||
|
"end_date": end_date,
|
||||||
|
"device_type": device_type,
|
||||||
|
"available_units": available,
|
||||||
|
"count": len(available)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/api/fleet-calendar/reservations/{reservation_id}/conflicts", response_class=JSONResponse)
|
||||||
|
async def get_reservation_conflicts(
|
||||||
|
reservation_id: str,
|
||||||
|
db: Session = Depends(get_db)
|
||||||
|
):
|
||||||
|
"""Check for calibration conflicts in a reservation."""
|
||||||
|
reservation = db.query(JobReservation).filter_by(id=reservation_id).first()
|
||||||
|
if not reservation:
|
||||||
|
raise HTTPException(status_code=404, detail="Reservation not found")
|
||||||
|
|
||||||
|
conflicts = check_calibration_conflicts(db, reservation_id)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"reservation_id": reservation_id,
|
||||||
|
"reservation_name": reservation.name,
|
||||||
|
"conflicts": conflicts,
|
||||||
|
"has_conflicts": len(conflicts) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# HTMX Partials
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.get("/api/fleet-calendar/reservations-list", response_class=HTMLResponse)
|
||||||
|
async def get_reservations_list(
|
||||||
|
request: Request,
|
||||||
|
year: Optional[int] = None,
|
||||||
|
month: Optional[int] = None,
|
||||||
|
device_type: str = "seismograph",
|
||||||
|
db: Session = Depends(get_db)
|
||||||
|
):
|
||||||
|
"""Get list of reservations as HTMX partial."""
|
||||||
|
from sqlalchemy import or_
|
||||||
|
|
||||||
|
today = date.today()
|
||||||
|
if year is None:
|
||||||
|
year = today.year
|
||||||
|
if month is None:
|
||||||
|
month = today.month
|
||||||
|
|
||||||
|
# Calculate 12-month window
|
||||||
|
start_date = date(year, month, 1)
|
||||||
|
# End date is 12 months later
|
||||||
|
end_year = year + ((month + 10) // 12)
|
||||||
|
end_month = ((month + 10) % 12) + 1
|
||||||
|
if end_month == 12:
|
||||||
|
end_date = date(end_year, 12, 31)
|
||||||
|
else:
|
||||||
|
end_date = date(end_year, end_month + 1, 1) - timedelta(days=1)
|
||||||
|
|
||||||
|
# Filter by device_type and date window
|
||||||
|
reservations = db.query(JobReservation).filter(
|
||||||
|
JobReservation.device_type == device_type,
|
||||||
|
JobReservation.start_date <= end_date,
|
||||||
|
or_(
|
||||||
|
JobReservation.end_date >= start_date,
|
||||||
|
JobReservation.end_date == None # TBD reservations
|
||||||
|
)
|
||||||
|
).order_by(JobReservation.start_date).all()
|
||||||
|
|
||||||
|
# Get assignment counts
|
||||||
|
reservation_data = []
|
||||||
|
for res in reservations:
|
||||||
|
assignments = db.query(JobReservationUnit).filter_by(
|
||||||
|
reservation_id=res.id
|
||||||
|
).all()
|
||||||
|
assigned_count = len(assignments)
|
||||||
|
|
||||||
|
# Enrich assignments with unit details, sorted by slot_index
|
||||||
|
assignments_sorted = sorted(assignments, key=lambda a: (a.slot_index if a.slot_index is not None else 999))
|
||||||
|
unit_ids = [a.unit_id for a in assignments_sorted]
|
||||||
|
units = db.query(RosterUnit).filter(RosterUnit.id.in_(unit_ids)).all() if unit_ids else []
|
||||||
|
units_by_id = {u.id: u for u in units}
|
||||||
|
assigned_units = [
|
||||||
|
{
|
||||||
|
"id": a.unit_id,
|
||||||
|
"power_type": a.power_type,
|
||||||
|
"notes": a.notes,
|
||||||
|
"location_name": a.location_name,
|
||||||
|
"slot_index": a.slot_index,
|
||||||
|
"deployed": units_by_id[a.unit_id].deployed if a.unit_id in units_by_id else False,
|
||||||
|
"last_calibrated": units_by_id[a.unit_id].last_calibrated if a.unit_id in units_by_id else None,
|
||||||
|
}
|
||||||
|
for a in assignments_sorted
|
||||||
|
]
|
||||||
|
|
||||||
|
# Check for calibration conflicts
|
||||||
|
conflicts = check_calibration_conflicts(db, res.id)
|
||||||
|
|
||||||
|
location_count = res.quantity_needed or assigned_count
|
||||||
|
reservation_data.append({
|
||||||
|
"reservation": res,
|
||||||
|
"assigned_count": assigned_count,
|
||||||
|
"location_count": location_count,
|
||||||
|
"assigned_units": assigned_units,
|
||||||
|
"has_conflicts": len(conflicts) > 0,
|
||||||
|
"conflict_count": len(conflicts)
|
||||||
|
})
|
||||||
|
|
||||||
|
return templates.TemplateResponse(
|
||||||
|
"partials/fleet_calendar/reservations_list.html",
|
||||||
|
{
|
||||||
|
"request": request,
|
||||||
|
"reservations": reservation_data,
|
||||||
|
"year": year,
|
||||||
|
"device_type": device_type
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/api/fleet-calendar/planner-availability", response_class=JSONResponse)
|
||||||
|
async def get_planner_availability(
|
||||||
|
device_type: str = "seismograph",
|
||||||
|
start_date: Optional[str] = None,
|
||||||
|
end_date: Optional[str] = None,
|
||||||
|
exclude_reservation_id: Optional[str] = None,
|
||||||
|
db: Session = Depends(get_db)
|
||||||
|
):
|
||||||
|
"""Get available units for the reservation planner split-panel UI.
|
||||||
|
Dates are optional — if omitted, returns all non-retired units regardless of reservations.
|
||||||
|
"""
|
||||||
|
if start_date and end_date:
|
||||||
|
try:
|
||||||
|
start = date.fromisoformat(start_date)
|
||||||
|
end = date.fromisoformat(end_date)
|
||||||
|
except ValueError:
|
||||||
|
raise HTTPException(status_code=400, detail="Invalid date format. Use YYYY-MM-DD")
|
||||||
|
units = get_available_units_for_period(db, start, end, device_type, exclude_reservation_id)
|
||||||
|
else:
|
||||||
|
# No dates: return all non-retired units of this type, with current reservation info
|
||||||
|
from backend.models import RosterUnit as RU
|
||||||
|
from datetime import timedelta
|
||||||
|
today = date.today()
|
||||||
|
all_units = db.query(RU).filter(
|
||||||
|
RU.device_type == device_type,
|
||||||
|
RU.retired == False
|
||||||
|
).all()
|
||||||
|
|
||||||
|
# Build a map: unit_id -> list of active/upcoming reservations
|
||||||
|
active_assignments = db.query(JobReservationUnit).join(
|
||||||
|
JobReservation, JobReservationUnit.reservation_id == JobReservation.id
|
||||||
|
).filter(
|
||||||
|
JobReservation.device_type == device_type,
|
||||||
|
JobReservation.end_date >= today
|
||||||
|
).all()
|
||||||
|
unit_reservations = {}
|
||||||
|
for assignment in active_assignments:
|
||||||
|
res = db.query(JobReservation).filter(JobReservation.id == assignment.reservation_id).first()
|
||||||
|
if not res:
|
||||||
|
continue
|
||||||
|
unit_reservations.setdefault(assignment.unit_id, []).append({
|
||||||
|
"reservation_id": res.id,
|
||||||
|
"reservation_name": res.name,
|
||||||
|
"start_date": res.start_date.isoformat() if res.start_date else None,
|
||||||
|
"end_date": res.end_date.isoformat() if res.end_date else None,
|
||||||
|
"color": res.color or "#3B82F6"
|
||||||
|
})
|
||||||
|
|
||||||
|
units = []
|
||||||
|
for u in all_units:
|
||||||
|
expiry = (u.last_calibrated + timedelta(days=365)) if u.last_calibrated else None
|
||||||
|
units.append({
|
||||||
|
"id": u.id,
|
||||||
|
"last_calibrated": u.last_calibrated.isoformat() if u.last_calibrated else None,
|
||||||
|
"expiry_date": expiry.isoformat() if expiry else None,
|
||||||
|
"calibration_status": "needs_calibration" if not u.last_calibrated else "valid",
|
||||||
|
"deployed": u.deployed,
|
||||||
|
"out_for_calibration": u.out_for_calibration or False,
|
||||||
|
"allocated": getattr(u, 'allocated', False) or False,
|
||||||
|
"allocated_to_project_id": getattr(u, 'allocated_to_project_id', None) or "",
|
||||||
|
"note": u.note or "",
|
||||||
|
"reservations": unit_reservations.get(u.id, [])
|
||||||
|
})
|
||||||
|
|
||||||
|
# Sort: benched first (easier to assign), then deployed, then by ID
|
||||||
|
units.sort(key=lambda u: (1 if u["deployed"] else 0, u["id"]))
|
||||||
|
|
||||||
|
return {
|
||||||
|
"units": units,
|
||||||
|
"start_date": start_date,
|
||||||
|
"end_date": end_date,
|
||||||
|
"count": len(units)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/api/fleet-calendar/unit-quick-info/{unit_id}", response_class=JSONResponse)
|
||||||
|
async def get_unit_quick_info(unit_id: str, db: Session = Depends(get_db)):
|
||||||
|
"""Return at-a-glance info for the planner quick-view modal."""
|
||||||
|
from backend.models import Emitter
|
||||||
|
u = db.query(RosterUnit).filter(RosterUnit.id == unit_id).first()
|
||||||
|
if not u:
|
||||||
|
raise HTTPException(status_code=404, detail="Unit not found")
|
||||||
|
|
||||||
|
today = date.today()
|
||||||
|
expiry = (u.last_calibrated + timedelta(days=365)) if u.last_calibrated else None
|
||||||
|
|
||||||
|
# Active/upcoming reservations
|
||||||
|
assignments = db.query(JobReservationUnit).filter(JobReservationUnit.unit_id == unit_id).all()
|
||||||
|
reservations = []
|
||||||
|
for a in assignments:
|
||||||
|
res = db.query(JobReservation).filter(
|
||||||
|
JobReservation.id == a.reservation_id,
|
||||||
|
JobReservation.end_date >= today
|
||||||
|
).first()
|
||||||
|
if res:
|
||||||
|
reservations.append({
|
||||||
|
"name": res.name,
|
||||||
|
"start_date": res.start_date.isoformat() if res.start_date else None,
|
||||||
|
"end_date": res.end_date.isoformat() if res.end_date else None,
|
||||||
|
"end_date_tbd": res.end_date_tbd,
|
||||||
|
"color": res.color or "#3B82F6",
|
||||||
|
"location_name": a.location_name,
|
||||||
|
})
|
||||||
|
|
||||||
|
# Last seen from emitter
|
||||||
|
emitter = db.query(Emitter).filter(Emitter.unit_type == unit_id).first()
|
||||||
|
|
||||||
|
return {
|
||||||
|
"id": u.id,
|
||||||
|
"unit_type": u.unit_type,
|
||||||
|
"deployed": u.deployed,
|
||||||
|
"out_for_calibration": u.out_for_calibration or False,
|
||||||
|
"note": u.note or "",
|
||||||
|
"project_id": u.project_id or "",
|
||||||
|
"address": u.address or u.location or "",
|
||||||
|
"coordinates": u.coordinates or "",
|
||||||
|
"deployed_with_modem_id": u.deployed_with_modem_id or "",
|
||||||
|
"last_calibrated": u.last_calibrated.isoformat() if u.last_calibrated else None,
|
||||||
|
"next_calibration_due": u.next_calibration_due.isoformat() if u.next_calibration_due else (expiry.isoformat() if expiry else None),
|
||||||
|
"cal_expired": not u.last_calibrated or (expiry and expiry < today),
|
||||||
|
"last_seen": emitter.last_seen.isoformat() if emitter and emitter.last_seen else None,
|
||||||
|
"reservations": reservations,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/api/fleet-calendar/available-units", response_class=HTMLResponse)
|
||||||
|
async def get_available_units_partial(
|
||||||
|
request: Request,
|
||||||
|
start_date: str,
|
||||||
|
end_date: str,
|
||||||
|
device_type: str = "seismograph",
|
||||||
|
reservation_id: Optional[str] = None,
|
||||||
|
db: Session = Depends(get_db)
|
||||||
|
):
|
||||||
|
"""Get available units as HTMX partial for the assignment modal."""
|
||||||
|
try:
|
||||||
|
start = date.fromisoformat(start_date)
|
||||||
|
end = date.fromisoformat(end_date)
|
||||||
|
except ValueError:
|
||||||
|
raise HTTPException(status_code=400, detail="Invalid date format")
|
||||||
|
|
||||||
|
available = get_available_units_for_period(
|
||||||
|
db, start, end, device_type, reservation_id
|
||||||
|
)
|
||||||
|
|
||||||
|
return templates.TemplateResponse(
|
||||||
|
"partials/fleet_calendar/available_units.html",
|
||||||
|
{
|
||||||
|
"request": request,
|
||||||
|
"units": available,
|
||||||
|
"start_date": start_date,
|
||||||
|
"end_date": end_date,
|
||||||
|
"device_type": device_type,
|
||||||
|
"reservation_id": reservation_id
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/api/fleet-calendar/month/{year}/{month}", response_class=HTMLResponse)
|
||||||
|
async def get_month_partial(
|
||||||
|
request: Request,
|
||||||
|
year: int,
|
||||||
|
month: int,
|
||||||
|
device_type: str = "seismograph",
|
||||||
|
db: Session = Depends(get_db)
|
||||||
|
):
|
||||||
|
"""Get a single month calendar as HTMX partial."""
|
||||||
|
calendar_data = get_calendar_year_data(db, year, device_type)
|
||||||
|
month_data = calendar_data["months"].get(month)
|
||||||
|
|
||||||
|
if not month_data:
|
||||||
|
raise HTTPException(status_code=404, detail="Invalid month")
|
||||||
|
|
||||||
|
return templates.TemplateResponse(
|
||||||
|
"partials/fleet_calendar/month_grid.html",
|
||||||
|
{
|
||||||
|
"request": request,
|
||||||
|
"year": year,
|
||||||
|
"month": month,
|
||||||
|
"month_data": month_data,
|
||||||
|
"device_type": device_type,
|
||||||
|
"today": date.today().isoformat()
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Promote Reservation to Project
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.post("/api/fleet-calendar/reservations/{reservation_id}/promote-to-project", response_class=JSONResponse)
|
||||||
|
async def promote_reservation_to_project(
|
||||||
|
reservation_id: str,
|
||||||
|
request: Request,
|
||||||
|
db: Session = Depends(get_db)
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Promote a job reservation to a full project in the projects DB.
|
||||||
|
Creates: Project + MonitoringLocations + UnitAssignments.
|
||||||
|
"""
|
||||||
|
reservation = db.query(JobReservation).filter_by(id=reservation_id).first()
|
||||||
|
if not reservation:
|
||||||
|
raise HTTPException(status_code=404, detail="Reservation not found")
|
||||||
|
|
||||||
|
data = await request.json()
|
||||||
|
project_number = data.get("project_number") or None
|
||||||
|
client_name = data.get("client_name") or None
|
||||||
|
|
||||||
|
# Map device_type to project_type_id
|
||||||
|
if reservation.device_type == "slm":
|
||||||
|
project_type_id = "sound_monitoring"
|
||||||
|
location_type = "sound"
|
||||||
|
else:
|
||||||
|
project_type_id = "vibration_monitoring"
|
||||||
|
location_type = "vibration"
|
||||||
|
|
||||||
|
# Check for duplicate project name
|
||||||
|
existing = db.query(Project).filter_by(name=reservation.name).first()
|
||||||
|
if existing:
|
||||||
|
raise HTTPException(status_code=409, detail=f"A project named '{reservation.name}' already exists.")
|
||||||
|
|
||||||
|
# Create the project
|
||||||
|
project_id = str(uuid.uuid4())
|
||||||
|
project = Project(
|
||||||
|
id=project_id,
|
||||||
|
name=reservation.name,
|
||||||
|
project_number=project_number,
|
||||||
|
client_name=client_name,
|
||||||
|
project_type_id=project_type_id,
|
||||||
|
status="upcoming",
|
||||||
|
start_date=reservation.start_date,
|
||||||
|
end_date=reservation.end_date,
|
||||||
|
description=reservation.notes,
|
||||||
|
)
|
||||||
|
db.add(project)
|
||||||
|
db.flush()
|
||||||
|
|
||||||
|
# Load assignments sorted by slot_index
|
||||||
|
assignments = db.query(JobReservationUnit).filter_by(reservation_id=reservation_id).all()
|
||||||
|
assignments_sorted = sorted(assignments, key=lambda a: (a.slot_index if a.slot_index is not None else 999))
|
||||||
|
|
||||||
|
locations_created = 0
|
||||||
|
units_assigned = 0
|
||||||
|
|
||||||
|
for i, assignment in enumerate(assignments_sorted):
|
||||||
|
loc_num = str(i + 1).zfill(3)
|
||||||
|
loc_name = assignment.location_name or f"Location {i + 1}"
|
||||||
|
|
||||||
|
location = MonitoringLocation(
|
||||||
|
id=str(uuid.uuid4()),
|
||||||
|
project_id=project_id,
|
||||||
|
location_type=location_type,
|
||||||
|
name=loc_name,
|
||||||
|
description=assignment.notes,
|
||||||
|
)
|
||||||
|
db.add(location)
|
||||||
|
db.flush()
|
||||||
|
locations_created += 1
|
||||||
|
|
||||||
|
if assignment.unit_id:
|
||||||
|
unit_assignment = UnitAssignment(
|
||||||
|
id=str(uuid.uuid4()),
|
||||||
|
unit_id=assignment.unit_id,
|
||||||
|
location_id=location.id,
|
||||||
|
project_id=project_id,
|
||||||
|
device_type=reservation.device_type or "seismograph",
|
||||||
|
status="active",
|
||||||
|
notes=f"Power: {assignment.power_type}" if assignment.power_type else None,
|
||||||
|
)
|
||||||
|
db.add(unit_assignment)
|
||||||
|
units_assigned += 1
|
||||||
|
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
logger.info(f"Promoted reservation '{reservation.name}' to project {project_id}")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"project_id": project_id,
|
||||||
|
"project_name": reservation.name,
|
||||||
|
"locations_created": locations_created,
|
||||||
|
"units_assigned": units_assigned,
|
||||||
|
}
|
||||||
429
backend/routers/modem_dashboard.py
Normal file
@@ -0,0 +1,429 @@
|
|||||||
|
"""
|
||||||
|
Modem Dashboard Router
|
||||||
|
|
||||||
|
Provides API endpoints for the Field Modems management page.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from fastapi import APIRouter, Request, Depends, Query
|
||||||
|
from fastapi.responses import HTMLResponse
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
from datetime import datetime
|
||||||
|
import subprocess
|
||||||
|
import time
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from backend.database import get_db
|
||||||
|
from backend.models import RosterUnit
|
||||||
|
from backend.templates_config import templates
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
router = APIRouter(prefix="/api/modem-dashboard", tags=["modem-dashboard"])
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/stats", response_class=HTMLResponse)
|
||||||
|
async def get_modem_stats(request: Request, db: Session = Depends(get_db)):
|
||||||
|
"""
|
||||||
|
Get summary statistics for modem dashboard.
|
||||||
|
Returns HTML partial with stat cards.
|
||||||
|
"""
|
||||||
|
# Query all modems
|
||||||
|
all_modems = db.query(RosterUnit).filter_by(device_type="modem").all()
|
||||||
|
|
||||||
|
# Get IDs of modems that have devices paired to them
|
||||||
|
paired_modem_ids = set()
|
||||||
|
devices_with_modems = db.query(RosterUnit).filter(
|
||||||
|
RosterUnit.deployed_with_modem_id.isnot(None),
|
||||||
|
RosterUnit.retired == False
|
||||||
|
).all()
|
||||||
|
for device in devices_with_modems:
|
||||||
|
if device.deployed_with_modem_id:
|
||||||
|
paired_modem_ids.add(device.deployed_with_modem_id)
|
||||||
|
|
||||||
|
# Count categories
|
||||||
|
total_count = len(all_modems)
|
||||||
|
retired_count = sum(1 for m in all_modems if m.retired)
|
||||||
|
|
||||||
|
# In use = deployed AND paired with a device
|
||||||
|
in_use_count = sum(1 for m in all_modems
|
||||||
|
if m.deployed and not m.retired and m.id in paired_modem_ids)
|
||||||
|
|
||||||
|
# Spare = deployed but NOT paired (available for assignment)
|
||||||
|
spare_count = sum(1 for m in all_modems
|
||||||
|
if m.deployed and not m.retired and m.id not in paired_modem_ids)
|
||||||
|
|
||||||
|
# Benched = not deployed and not retired
|
||||||
|
benched_count = sum(1 for m in all_modems if not m.deployed and not m.retired)
|
||||||
|
|
||||||
|
return templates.TemplateResponse("partials/modem_stats.html", {
|
||||||
|
"request": request,
|
||||||
|
"total_count": total_count,
|
||||||
|
"in_use_count": in_use_count,
|
||||||
|
"spare_count": spare_count,
|
||||||
|
"benched_count": benched_count,
|
||||||
|
"retired_count": retired_count
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/units", response_class=HTMLResponse)
|
||||||
|
async def get_modem_units(
|
||||||
|
request: Request,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
search: str = Query(None),
|
||||||
|
filter_status: str = Query(None), # "in_use", "spare", "benched", "retired"
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Get list of modem units for the dashboard.
|
||||||
|
Returns HTML partial with modem cards.
|
||||||
|
"""
|
||||||
|
query = db.query(RosterUnit).filter_by(device_type="modem")
|
||||||
|
|
||||||
|
# Filter by search term if provided
|
||||||
|
if search:
|
||||||
|
search_term = f"%{search}%"
|
||||||
|
query = query.filter(
|
||||||
|
(RosterUnit.id.ilike(search_term)) |
|
||||||
|
(RosterUnit.ip_address.ilike(search_term)) |
|
||||||
|
(RosterUnit.hardware_model.ilike(search_term)) |
|
||||||
|
(RosterUnit.phone_number.ilike(search_term)) |
|
||||||
|
(RosterUnit.location.ilike(search_term))
|
||||||
|
)
|
||||||
|
|
||||||
|
modems = query.order_by(
|
||||||
|
RosterUnit.retired.asc(),
|
||||||
|
RosterUnit.deployed.desc(),
|
||||||
|
RosterUnit.id.asc()
|
||||||
|
).all()
|
||||||
|
|
||||||
|
# Get paired device info for each modem
|
||||||
|
paired_devices = {}
|
||||||
|
devices_with_modems = db.query(RosterUnit).filter(
|
||||||
|
RosterUnit.deployed_with_modem_id.isnot(None),
|
||||||
|
RosterUnit.retired == False
|
||||||
|
).all()
|
||||||
|
for device in devices_with_modems:
|
||||||
|
if device.deployed_with_modem_id:
|
||||||
|
paired_devices[device.deployed_with_modem_id] = {
|
||||||
|
"id": device.id,
|
||||||
|
"device_type": device.device_type,
|
||||||
|
"deployed": device.deployed
|
||||||
|
}
|
||||||
|
|
||||||
|
# Annotate modems with paired device info
|
||||||
|
modem_list = []
|
||||||
|
for modem in modems:
|
||||||
|
paired = paired_devices.get(modem.id)
|
||||||
|
|
||||||
|
# Determine status category
|
||||||
|
if modem.retired:
|
||||||
|
status = "retired"
|
||||||
|
elif not modem.deployed:
|
||||||
|
status = "benched"
|
||||||
|
elif paired:
|
||||||
|
status = "in_use"
|
||||||
|
else:
|
||||||
|
status = "spare"
|
||||||
|
|
||||||
|
# Apply filter if specified
|
||||||
|
if filter_status and status != filter_status:
|
||||||
|
continue
|
||||||
|
|
||||||
|
modem_list.append({
|
||||||
|
"id": modem.id,
|
||||||
|
"ip_address": modem.ip_address,
|
||||||
|
"phone_number": modem.phone_number,
|
||||||
|
"hardware_model": modem.hardware_model,
|
||||||
|
"deployed": modem.deployed,
|
||||||
|
"retired": modem.retired,
|
||||||
|
"location": modem.location,
|
||||||
|
"project_id": modem.project_id,
|
||||||
|
"paired_device": paired,
|
||||||
|
"status": status
|
||||||
|
})
|
||||||
|
|
||||||
|
return templates.TemplateResponse("partials/modem_list.html", {
|
||||||
|
"request": request,
|
||||||
|
"modems": modem_list
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/{modem_id}/paired-device")
|
||||||
|
async def get_paired_device(modem_id: str, db: Session = Depends(get_db)):
|
||||||
|
"""
|
||||||
|
Get the device (SLM/seismograph) that is paired with this modem.
|
||||||
|
Returns JSON with device info or null if not paired.
|
||||||
|
"""
|
||||||
|
# Check modem exists
|
||||||
|
modem = db.query(RosterUnit).filter_by(id=modem_id, device_type="modem").first()
|
||||||
|
if not modem:
|
||||||
|
return {"status": "error", "detail": f"Modem {modem_id} not found"}
|
||||||
|
|
||||||
|
# Find device paired with this modem
|
||||||
|
device = db.query(RosterUnit).filter(
|
||||||
|
RosterUnit.deployed_with_modem_id == modem_id,
|
||||||
|
RosterUnit.retired == False
|
||||||
|
).first()
|
||||||
|
|
||||||
|
if device:
|
||||||
|
return {
|
||||||
|
"paired": True,
|
||||||
|
"device": {
|
||||||
|
"id": device.id,
|
||||||
|
"device_type": device.device_type,
|
||||||
|
"deployed": device.deployed,
|
||||||
|
"project_id": device.project_id,
|
||||||
|
"location": device.location or device.address
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {"paired": False, "device": None}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/{modem_id}/paired-device-html", response_class=HTMLResponse)
|
||||||
|
async def get_paired_device_html(modem_id: str, request: Request, db: Session = Depends(get_db)):
|
||||||
|
"""
|
||||||
|
Get HTML partial showing the device paired with this modem.
|
||||||
|
Used by unit_detail.html for modems.
|
||||||
|
"""
|
||||||
|
# Check modem exists
|
||||||
|
modem = db.query(RosterUnit).filter_by(id=modem_id, device_type="modem").first()
|
||||||
|
if not modem:
|
||||||
|
return HTMLResponse('<p class="text-red-500">Modem not found</p>')
|
||||||
|
|
||||||
|
# Find device paired with this modem
|
||||||
|
device = db.query(RosterUnit).filter(
|
||||||
|
RosterUnit.deployed_with_modem_id == modem_id,
|
||||||
|
RosterUnit.retired == False
|
||||||
|
).first()
|
||||||
|
|
||||||
|
return templates.TemplateResponse("partials/modem_paired_device.html", {
|
||||||
|
"request": request,
|
||||||
|
"modem_id": modem_id,
|
||||||
|
"device": device
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/{modem_id}/ping")
|
||||||
|
async def ping_modem(modem_id: str, db: Session = Depends(get_db)):
|
||||||
|
"""
|
||||||
|
Test modem connectivity with a simple ping.
|
||||||
|
Returns response time and connection status.
|
||||||
|
"""
|
||||||
|
# Get modem from database
|
||||||
|
modem = db.query(RosterUnit).filter_by(id=modem_id, device_type="modem").first()
|
||||||
|
|
||||||
|
if not modem:
|
||||||
|
return {"status": "error", "detail": f"Modem {modem_id} not found"}
|
||||||
|
|
||||||
|
if not modem.ip_address:
|
||||||
|
return {"status": "error", "detail": f"Modem {modem_id} has no IP address configured"}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Ping the modem (1 packet, 2 second timeout)
|
||||||
|
start_time = time.time()
|
||||||
|
result = subprocess.run(
|
||||||
|
["ping", "-c", "1", "-W", "2", modem.ip_address],
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
timeout=3
|
||||||
|
)
|
||||||
|
response_time = int((time.time() - start_time) * 1000) # Convert to milliseconds
|
||||||
|
|
||||||
|
if result.returncode == 0:
|
||||||
|
return {
|
||||||
|
"status": "success",
|
||||||
|
"modem_id": modem_id,
|
||||||
|
"ip_address": modem.ip_address,
|
||||||
|
"response_time_ms": response_time,
|
||||||
|
"message": "Modem is responding"
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return {
|
||||||
|
"status": "error",
|
||||||
|
"modem_id": modem_id,
|
||||||
|
"ip_address": modem.ip_address,
|
||||||
|
"detail": "Modem not responding to ping"
|
||||||
|
}
|
||||||
|
|
||||||
|
except subprocess.TimeoutExpired:
|
||||||
|
return {
|
||||||
|
"status": "error",
|
||||||
|
"modem_id": modem_id,
|
||||||
|
"ip_address": modem.ip_address,
|
||||||
|
"detail": "Ping timeout"
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to ping modem {modem_id}: {e}")
|
||||||
|
return {
|
||||||
|
"status": "error",
|
||||||
|
"modem_id": modem_id,
|
||||||
|
"detail": str(e)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/{modem_id}/diagnostics")
|
||||||
|
async def get_modem_diagnostics(modem_id: str, db: Session = Depends(get_db)):
|
||||||
|
"""
|
||||||
|
Get modem diagnostics (signal strength, data usage, uptime).
|
||||||
|
|
||||||
|
Currently returns placeholders. When ModemManager is available,
|
||||||
|
this endpoint will query it for real diagnostics.
|
||||||
|
"""
|
||||||
|
modem = db.query(RosterUnit).filter_by(id=modem_id, device_type="modem").first()
|
||||||
|
if not modem:
|
||||||
|
return {"status": "error", "detail": f"Modem {modem_id} not found"}
|
||||||
|
|
||||||
|
# TODO: Query ModemManager backend when available
|
||||||
|
return {
|
||||||
|
"status": "unavailable",
|
||||||
|
"message": "ModemManager integration not yet available",
|
||||||
|
"modem_id": modem_id,
|
||||||
|
"signal_strength_dbm": None,
|
||||||
|
"data_usage_mb": None,
|
||||||
|
"uptime_seconds": None,
|
||||||
|
"carrier": None,
|
||||||
|
"connection_type": None # LTE, 5G, etc.
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/{modem_id}/pairable-devices")
|
||||||
|
async def get_pairable_devices(
|
||||||
|
modem_id: str,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
search: str = Query(None),
|
||||||
|
hide_paired: bool = Query(True)
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Get list of devices (seismographs and SLMs) that can be paired with this modem.
|
||||||
|
Used by the device picker modal in unit_detail.html.
|
||||||
|
"""
|
||||||
|
# Check modem exists
|
||||||
|
modem = db.query(RosterUnit).filter_by(id=modem_id, device_type="modem").first()
|
||||||
|
if not modem:
|
||||||
|
return {"status": "error", "detail": f"Modem {modem_id} not found"}
|
||||||
|
|
||||||
|
# Query seismographs and SLMs
|
||||||
|
query = db.query(RosterUnit).filter(
|
||||||
|
RosterUnit.device_type.in_(["seismograph", "sound_level_meter"]),
|
||||||
|
RosterUnit.retired == False
|
||||||
|
)
|
||||||
|
|
||||||
|
# Filter by search term if provided
|
||||||
|
if search:
|
||||||
|
search_term = f"%{search}%"
|
||||||
|
query = query.filter(
|
||||||
|
(RosterUnit.id.ilike(search_term)) |
|
||||||
|
(RosterUnit.project_id.ilike(search_term)) |
|
||||||
|
(RosterUnit.location.ilike(search_term)) |
|
||||||
|
(RosterUnit.address.ilike(search_term)) |
|
||||||
|
(RosterUnit.note.ilike(search_term))
|
||||||
|
)
|
||||||
|
|
||||||
|
devices = query.order_by(
|
||||||
|
RosterUnit.deployed.desc(),
|
||||||
|
RosterUnit.device_type.asc(),
|
||||||
|
RosterUnit.id.asc()
|
||||||
|
).all()
|
||||||
|
|
||||||
|
# Build device list
|
||||||
|
device_list = []
|
||||||
|
for device in devices:
|
||||||
|
# Skip already paired devices if hide_paired is True
|
||||||
|
is_paired_to_other = (
|
||||||
|
device.deployed_with_modem_id is not None and
|
||||||
|
device.deployed_with_modem_id != modem_id
|
||||||
|
)
|
||||||
|
is_paired_to_this = device.deployed_with_modem_id == modem_id
|
||||||
|
|
||||||
|
if hide_paired and is_paired_to_other:
|
||||||
|
continue
|
||||||
|
|
||||||
|
device_list.append({
|
||||||
|
"id": device.id,
|
||||||
|
"device_type": device.device_type,
|
||||||
|
"deployed": device.deployed,
|
||||||
|
"project_id": device.project_id,
|
||||||
|
"location": device.location or device.address,
|
||||||
|
"note": device.note,
|
||||||
|
"paired_modem_id": device.deployed_with_modem_id,
|
||||||
|
"is_paired_to_this": is_paired_to_this,
|
||||||
|
"is_paired_to_other": is_paired_to_other
|
||||||
|
})
|
||||||
|
|
||||||
|
return {"devices": device_list, "modem_id": modem_id}
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/{modem_id}/pair")
|
||||||
|
async def pair_device_to_modem(
|
||||||
|
modem_id: str,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
device_id: str = Query(..., description="ID of the device to pair")
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Pair a device (seismograph or SLM) to this modem.
|
||||||
|
Updates the device's deployed_with_modem_id field.
|
||||||
|
"""
|
||||||
|
# Check modem exists
|
||||||
|
modem = db.query(RosterUnit).filter_by(id=modem_id, device_type="modem").first()
|
||||||
|
if not modem:
|
||||||
|
return {"status": "error", "detail": f"Modem {modem_id} not found"}
|
||||||
|
|
||||||
|
# Find the device
|
||||||
|
device = db.query(RosterUnit).filter(
|
||||||
|
RosterUnit.id == device_id,
|
||||||
|
RosterUnit.device_type.in_(["seismograph", "sound_level_meter"]),
|
||||||
|
RosterUnit.retired == False
|
||||||
|
).first()
|
||||||
|
if not device:
|
||||||
|
return {"status": "error", "detail": f"Device {device_id} not found"}
|
||||||
|
|
||||||
|
# Unpair any device currently paired to this modem
|
||||||
|
currently_paired = db.query(RosterUnit).filter(
|
||||||
|
RosterUnit.deployed_with_modem_id == modem_id
|
||||||
|
).all()
|
||||||
|
for paired_device in currently_paired:
|
||||||
|
paired_device.deployed_with_modem_id = None
|
||||||
|
|
||||||
|
# Pair the new device
|
||||||
|
device.deployed_with_modem_id = modem_id
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
return {
|
||||||
|
"status": "success",
|
||||||
|
"modem_id": modem_id,
|
||||||
|
"device_id": device_id,
|
||||||
|
"message": f"Device {device_id} paired to modem {modem_id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/{modem_id}/unpair")
|
||||||
|
async def unpair_device_from_modem(modem_id: str, db: Session = Depends(get_db)):
|
||||||
|
"""
|
||||||
|
Unpair any device currently paired to this modem.
|
||||||
|
"""
|
||||||
|
# Check modem exists
|
||||||
|
modem = db.query(RosterUnit).filter_by(id=modem_id, device_type="modem").first()
|
||||||
|
if not modem:
|
||||||
|
return {"status": "error", "detail": f"Modem {modem_id} not found"}
|
||||||
|
|
||||||
|
# Find and unpair device
|
||||||
|
device = db.query(RosterUnit).filter(
|
||||||
|
RosterUnit.deployed_with_modem_id == modem_id
|
||||||
|
).first()
|
||||||
|
|
||||||
|
if device:
|
||||||
|
old_device_id = device.id
|
||||||
|
device.deployed_with_modem_id = None
|
||||||
|
db.commit()
|
||||||
|
return {
|
||||||
|
"status": "success",
|
||||||
|
"modem_id": modem_id,
|
||||||
|
"unpaired_device_id": old_device_id,
|
||||||
|
"message": f"Device {old_device_id} unpaired from modem {modem_id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
"status": "success",
|
||||||
|
"modem_id": modem_id,
|
||||||
|
"message": "No device was paired to this modem"
|
||||||
|
}
|
||||||
1002
backend/routers/project_locations.py
Normal file
4720
backend/routers/projects.py
Normal file
522
backend/routers/recurring_schedules.py
Normal file
@@ -0,0 +1,522 @@
|
|||||||
|
"""
|
||||||
|
Recurring Schedules Router
|
||||||
|
|
||||||
|
API endpoints for managing recurring monitoring schedules.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from fastapi import APIRouter, Request, Depends, HTTPException, Query
|
||||||
|
from fastapi.responses import HTMLResponse, JSONResponse
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
from typing import Optional
|
||||||
|
from datetime import datetime
|
||||||
|
import json
|
||||||
|
|
||||||
|
from backend.database import get_db
|
||||||
|
from backend.models import RecurringSchedule, MonitoringLocation, Project, RosterUnit
|
||||||
|
from backend.services.recurring_schedule_service import get_recurring_schedule_service
|
||||||
|
from backend.templates_config import templates
|
||||||
|
|
||||||
|
router = APIRouter(prefix="/api/projects/{project_id}/recurring-schedules", tags=["recurring-schedules"])
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# List and Get
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.get("/")
|
||||||
|
async def list_recurring_schedules(
|
||||||
|
project_id: str,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
enabled_only: bool = Query(False),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
List all recurring schedules for a project.
|
||||||
|
"""
|
||||||
|
project = db.query(Project).filter_by(id=project_id).first()
|
||||||
|
if not project:
|
||||||
|
raise HTTPException(status_code=404, detail="Project not found")
|
||||||
|
|
||||||
|
query = db.query(RecurringSchedule).filter_by(project_id=project_id)
|
||||||
|
if enabled_only:
|
||||||
|
query = query.filter_by(enabled=True)
|
||||||
|
|
||||||
|
schedules = query.order_by(RecurringSchedule.created_at.desc()).all()
|
||||||
|
|
||||||
|
return {
|
||||||
|
"schedules": [
|
||||||
|
{
|
||||||
|
"id": s.id,
|
||||||
|
"name": s.name,
|
||||||
|
"schedule_type": s.schedule_type,
|
||||||
|
"device_type": s.device_type,
|
||||||
|
"location_id": s.location_id,
|
||||||
|
"unit_id": s.unit_id,
|
||||||
|
"enabled": s.enabled,
|
||||||
|
"weekly_pattern": json.loads(s.weekly_pattern) if s.weekly_pattern else None,
|
||||||
|
"interval_type": s.interval_type,
|
||||||
|
"cycle_time": s.cycle_time,
|
||||||
|
"include_download": s.include_download,
|
||||||
|
"timezone": s.timezone,
|
||||||
|
"next_occurrence": s.next_occurrence.isoformat() if s.next_occurrence else None,
|
||||||
|
"last_generated_at": s.last_generated_at.isoformat() if s.last_generated_at else None,
|
||||||
|
"created_at": s.created_at.isoformat() if s.created_at else None,
|
||||||
|
}
|
||||||
|
for s in schedules
|
||||||
|
],
|
||||||
|
"count": len(schedules),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/{schedule_id}")
|
||||||
|
async def get_recurring_schedule(
|
||||||
|
project_id: str,
|
||||||
|
schedule_id: str,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Get a specific recurring schedule.
|
||||||
|
"""
|
||||||
|
schedule = db.query(RecurringSchedule).filter_by(
|
||||||
|
id=schedule_id,
|
||||||
|
project_id=project_id,
|
||||||
|
).first()
|
||||||
|
|
||||||
|
if not schedule:
|
||||||
|
raise HTTPException(status_code=404, detail="Schedule not found")
|
||||||
|
|
||||||
|
# Get related location and unit info
|
||||||
|
location = db.query(MonitoringLocation).filter_by(id=schedule.location_id).first()
|
||||||
|
unit = None
|
||||||
|
if schedule.unit_id:
|
||||||
|
unit = db.query(RosterUnit).filter_by(id=schedule.unit_id).first()
|
||||||
|
|
||||||
|
return {
|
||||||
|
"id": schedule.id,
|
||||||
|
"name": schedule.name,
|
||||||
|
"schedule_type": schedule.schedule_type,
|
||||||
|
"device_type": schedule.device_type,
|
||||||
|
"location_id": schedule.location_id,
|
||||||
|
"location_name": location.name if location else None,
|
||||||
|
"unit_id": schedule.unit_id,
|
||||||
|
"unit_name": unit.id if unit else None,
|
||||||
|
"enabled": schedule.enabled,
|
||||||
|
"weekly_pattern": json.loads(schedule.weekly_pattern) if schedule.weekly_pattern else None,
|
||||||
|
"interval_type": schedule.interval_type,
|
||||||
|
"cycle_time": schedule.cycle_time,
|
||||||
|
"include_download": schedule.include_download,
|
||||||
|
"timezone": schedule.timezone,
|
||||||
|
"next_occurrence": schedule.next_occurrence.isoformat() if schedule.next_occurrence else None,
|
||||||
|
"last_generated_at": schedule.last_generated_at.isoformat() if schedule.last_generated_at else None,
|
||||||
|
"created_at": schedule.created_at.isoformat() if schedule.created_at else None,
|
||||||
|
"updated_at": schedule.updated_at.isoformat() if schedule.updated_at else None,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Create
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.post("/")
|
||||||
|
async def create_recurring_schedule(
|
||||||
|
project_id: str,
|
||||||
|
request: Request,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Create recurring schedules for one or more locations.
|
||||||
|
|
||||||
|
Body for weekly_calendar (supports multiple locations):
|
||||||
|
{
|
||||||
|
"name": "Weeknight Monitoring",
|
||||||
|
"schedule_type": "weekly_calendar",
|
||||||
|
"location_ids": ["uuid1", "uuid2"], // Array of location IDs
|
||||||
|
"weekly_pattern": {
|
||||||
|
"monday": {"enabled": true, "start": "19:00", "end": "07:00"},
|
||||||
|
"tuesday": {"enabled": false},
|
||||||
|
...
|
||||||
|
},
|
||||||
|
"include_download": true,
|
||||||
|
"auto_increment_index": true,
|
||||||
|
"timezone": "America/New_York"
|
||||||
|
}
|
||||||
|
|
||||||
|
Body for simple_interval (supports multiple locations):
|
||||||
|
{
|
||||||
|
"name": "24/7 Continuous",
|
||||||
|
"schedule_type": "simple_interval",
|
||||||
|
"location_ids": ["uuid1", "uuid2"], // Array of location IDs
|
||||||
|
"interval_type": "daily",
|
||||||
|
"cycle_time": "00:00",
|
||||||
|
"include_download": true,
|
||||||
|
"auto_increment_index": true,
|
||||||
|
"timezone": "America/New_York"
|
||||||
|
}
|
||||||
|
|
||||||
|
Legacy single location support (backwards compatible):
|
||||||
|
{
|
||||||
|
"name": "...",
|
||||||
|
"location_id": "uuid", // Single location ID
|
||||||
|
...
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
project = db.query(Project).filter_by(id=project_id).first()
|
||||||
|
if not project:
|
||||||
|
raise HTTPException(status_code=404, detail="Project not found")
|
||||||
|
|
||||||
|
data = await request.json()
|
||||||
|
|
||||||
|
# Support both location_ids (array) and location_id (single) for backwards compatibility
|
||||||
|
location_ids = data.get("location_ids", [])
|
||||||
|
if not location_ids and data.get("location_id"):
|
||||||
|
location_ids = [data.get("location_id")]
|
||||||
|
|
||||||
|
if not location_ids:
|
||||||
|
raise HTTPException(status_code=400, detail="At least one location is required")
|
||||||
|
|
||||||
|
# Validate all locations exist
|
||||||
|
locations = db.query(MonitoringLocation).filter(
|
||||||
|
MonitoringLocation.id.in_(location_ids),
|
||||||
|
MonitoringLocation.project_id == project_id,
|
||||||
|
).all()
|
||||||
|
|
||||||
|
if len(locations) != len(location_ids):
|
||||||
|
raise HTTPException(status_code=404, detail="One or more locations not found")
|
||||||
|
|
||||||
|
service = get_recurring_schedule_service(db)
|
||||||
|
created_schedules = []
|
||||||
|
base_name = data.get("name", "Unnamed Schedule")
|
||||||
|
|
||||||
|
# Parse one-off datetime fields if applicable
|
||||||
|
one_off_start = None
|
||||||
|
one_off_end = None
|
||||||
|
if data.get("schedule_type") == "one_off":
|
||||||
|
from zoneinfo import ZoneInfo
|
||||||
|
|
||||||
|
tz = ZoneInfo(data.get("timezone", "America/New_York"))
|
||||||
|
|
||||||
|
start_dt_str = data.get("start_datetime")
|
||||||
|
end_dt_str = data.get("end_datetime")
|
||||||
|
|
||||||
|
if not start_dt_str or not end_dt_str:
|
||||||
|
raise HTTPException(status_code=400, detail="One-off schedules require start and end date/time")
|
||||||
|
|
||||||
|
try:
|
||||||
|
start_local = datetime.fromisoformat(start_dt_str).replace(tzinfo=tz)
|
||||||
|
end_local = datetime.fromisoformat(end_dt_str).replace(tzinfo=tz)
|
||||||
|
except ValueError:
|
||||||
|
raise HTTPException(status_code=400, detail="Invalid datetime format")
|
||||||
|
|
||||||
|
duration = end_local - start_local
|
||||||
|
if duration.total_seconds() < 900:
|
||||||
|
raise HTTPException(status_code=400, detail="Duration must be at least 15 minutes")
|
||||||
|
if duration.total_seconds() > 86400:
|
||||||
|
raise HTTPException(status_code=400, detail="Duration cannot exceed 24 hours")
|
||||||
|
|
||||||
|
from datetime import timezone as dt_timezone
|
||||||
|
now_local = datetime.now(tz)
|
||||||
|
if start_local <= now_local:
|
||||||
|
raise HTTPException(status_code=400, detail="Start time must be in the future")
|
||||||
|
|
||||||
|
# Convert to UTC for storage
|
||||||
|
one_off_start = start_local.astimezone(ZoneInfo("UTC")).replace(tzinfo=None)
|
||||||
|
one_off_end = end_local.astimezone(ZoneInfo("UTC")).replace(tzinfo=None)
|
||||||
|
|
||||||
|
# Create a schedule for each location
|
||||||
|
for location in locations:
|
||||||
|
# Determine device type from location
|
||||||
|
device_type = "slm" if location.location_type == "sound" else "seismograph"
|
||||||
|
|
||||||
|
# Append location name if multiple locations
|
||||||
|
schedule_name = f"{base_name} - {location.name}" if len(locations) > 1 else base_name
|
||||||
|
|
||||||
|
schedule = service.create_schedule(
|
||||||
|
project_id=project_id,
|
||||||
|
location_id=location.id,
|
||||||
|
name=schedule_name,
|
||||||
|
schedule_type=data.get("schedule_type", "weekly_calendar"),
|
||||||
|
device_type=device_type,
|
||||||
|
unit_id=data.get("unit_id"),
|
||||||
|
weekly_pattern=data.get("weekly_pattern"),
|
||||||
|
interval_type=data.get("interval_type"),
|
||||||
|
cycle_time=data.get("cycle_time"),
|
||||||
|
include_download=data.get("include_download", True),
|
||||||
|
auto_increment_index=data.get("auto_increment_index", True),
|
||||||
|
timezone=data.get("timezone", "America/New_York"),
|
||||||
|
start_datetime=one_off_start,
|
||||||
|
end_datetime=one_off_end,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Generate actions immediately so they appear right away
|
||||||
|
generated_actions = service.generate_actions_for_schedule(schedule, horizon_days=7)
|
||||||
|
|
||||||
|
created_schedules.append({
|
||||||
|
"schedule_id": schedule.id,
|
||||||
|
"location_id": location.id,
|
||||||
|
"location_name": location.name,
|
||||||
|
"actions_generated": len(generated_actions),
|
||||||
|
})
|
||||||
|
|
||||||
|
total_actions = sum(s.get("actions_generated", 0) for s in created_schedules)
|
||||||
|
|
||||||
|
return JSONResponse({
|
||||||
|
"success": True,
|
||||||
|
"schedules": created_schedules,
|
||||||
|
"count": len(created_schedules),
|
||||||
|
"actions_generated": total_actions,
|
||||||
|
"message": f"Created {len(created_schedules)} recurring schedule(s) with {total_actions} upcoming actions",
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Update
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.put("/{schedule_id}")
|
||||||
|
async def update_recurring_schedule(
|
||||||
|
project_id: str,
|
||||||
|
schedule_id: str,
|
||||||
|
request: Request,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Update a recurring schedule.
|
||||||
|
"""
|
||||||
|
schedule = db.query(RecurringSchedule).filter_by(
|
||||||
|
id=schedule_id,
|
||||||
|
project_id=project_id,
|
||||||
|
).first()
|
||||||
|
|
||||||
|
if not schedule:
|
||||||
|
raise HTTPException(status_code=404, detail="Schedule not found")
|
||||||
|
|
||||||
|
data = await request.json()
|
||||||
|
service = get_recurring_schedule_service(db)
|
||||||
|
|
||||||
|
# Build update kwargs
|
||||||
|
update_kwargs = {}
|
||||||
|
for field in ["name", "weekly_pattern", "interval_type", "cycle_time",
|
||||||
|
"include_download", "auto_increment_index", "timezone", "unit_id"]:
|
||||||
|
if field in data:
|
||||||
|
update_kwargs[field] = data[field]
|
||||||
|
|
||||||
|
updated = service.update_schedule(schedule_id, **update_kwargs)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"schedule_id": updated.id,
|
||||||
|
"message": "Schedule updated successfully",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Delete
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.delete("/{schedule_id}")
|
||||||
|
async def delete_recurring_schedule(
|
||||||
|
project_id: str,
|
||||||
|
schedule_id: str,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Delete a recurring schedule.
|
||||||
|
"""
|
||||||
|
service = get_recurring_schedule_service(db)
|
||||||
|
deleted = service.delete_schedule(schedule_id)
|
||||||
|
|
||||||
|
if not deleted:
|
||||||
|
raise HTTPException(status_code=404, detail="Schedule not found")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"message": "Schedule deleted successfully",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Enable/Disable
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.post("/{schedule_id}/enable")
|
||||||
|
async def enable_schedule(
|
||||||
|
project_id: str,
|
||||||
|
schedule_id: str,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Enable a disabled schedule.
|
||||||
|
"""
|
||||||
|
service = get_recurring_schedule_service(db)
|
||||||
|
schedule = service.enable_schedule(schedule_id)
|
||||||
|
|
||||||
|
if not schedule:
|
||||||
|
raise HTTPException(status_code=404, detail="Schedule not found")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"schedule_id": schedule.id,
|
||||||
|
"enabled": schedule.enabled,
|
||||||
|
"message": "Schedule enabled",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/{schedule_id}/disable")
|
||||||
|
async def disable_schedule(
|
||||||
|
project_id: str,
|
||||||
|
schedule_id: str,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Disable a schedule and cancel all its pending actions.
|
||||||
|
"""
|
||||||
|
service = get_recurring_schedule_service(db)
|
||||||
|
|
||||||
|
# Count pending actions before disabling (for response message)
|
||||||
|
from sqlalchemy import and_
|
||||||
|
from backend.models import ScheduledAction
|
||||||
|
pending_count = db.query(ScheduledAction).filter(
|
||||||
|
and_(
|
||||||
|
ScheduledAction.execution_status == "pending",
|
||||||
|
ScheduledAction.notes.like(f'%"schedule_id": "{schedule_id}"%'),
|
||||||
|
)
|
||||||
|
).count()
|
||||||
|
|
||||||
|
schedule = service.disable_schedule(schedule_id)
|
||||||
|
|
||||||
|
if not schedule:
|
||||||
|
raise HTTPException(status_code=404, detail="Schedule not found")
|
||||||
|
|
||||||
|
message = "Schedule disabled"
|
||||||
|
if pending_count > 0:
|
||||||
|
message += f" and {pending_count} pending action(s) cancelled"
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"schedule_id": schedule.id,
|
||||||
|
"enabled": schedule.enabled,
|
||||||
|
"cancelled_actions": pending_count,
|
||||||
|
"message": message,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Preview Generated Actions
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.post("/{schedule_id}/generate-preview")
|
||||||
|
async def preview_generated_actions(
|
||||||
|
project_id: str,
|
||||||
|
schedule_id: str,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
days: int = Query(7, ge=1, le=30),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Preview what actions would be generated without saving them.
|
||||||
|
"""
|
||||||
|
schedule = db.query(RecurringSchedule).filter_by(
|
||||||
|
id=schedule_id,
|
||||||
|
project_id=project_id,
|
||||||
|
).first()
|
||||||
|
|
||||||
|
if not schedule:
|
||||||
|
raise HTTPException(status_code=404, detail="Schedule not found")
|
||||||
|
|
||||||
|
service = get_recurring_schedule_service(db)
|
||||||
|
actions = service.generate_actions_for_schedule(
|
||||||
|
schedule,
|
||||||
|
horizon_days=days,
|
||||||
|
preview_only=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"schedule_id": schedule_id,
|
||||||
|
"schedule_name": schedule.name,
|
||||||
|
"preview_days": days,
|
||||||
|
"actions": [
|
||||||
|
{
|
||||||
|
"action_type": a.action_type,
|
||||||
|
"scheduled_time": a.scheduled_time.isoformat(),
|
||||||
|
"notes": a.notes,
|
||||||
|
}
|
||||||
|
for a in actions
|
||||||
|
],
|
||||||
|
"action_count": len(actions),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Manual Generation Trigger
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.post("/{schedule_id}/generate")
|
||||||
|
async def generate_actions_now(
|
||||||
|
project_id: str,
|
||||||
|
schedule_id: str,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
days: int = Query(7, ge=1, le=30),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Manually trigger action generation for a schedule.
|
||||||
|
"""
|
||||||
|
schedule = db.query(RecurringSchedule).filter_by(
|
||||||
|
id=schedule_id,
|
||||||
|
project_id=project_id,
|
||||||
|
).first()
|
||||||
|
|
||||||
|
if not schedule:
|
||||||
|
raise HTTPException(status_code=404, detail="Schedule not found")
|
||||||
|
|
||||||
|
if not schedule.enabled:
|
||||||
|
raise HTTPException(status_code=400, detail="Schedule is disabled")
|
||||||
|
|
||||||
|
service = get_recurring_schedule_service(db)
|
||||||
|
actions = service.generate_actions_for_schedule(
|
||||||
|
schedule,
|
||||||
|
horizon_days=days,
|
||||||
|
preview_only=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"schedule_id": schedule_id,
|
||||||
|
"generated_count": len(actions),
|
||||||
|
"message": f"Generated {len(actions)} scheduled actions",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# HTML Partials
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.get("/partials/list", response_class=HTMLResponse)
|
||||||
|
async def get_schedule_list_partial(
|
||||||
|
project_id: str,
|
||||||
|
request: Request,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Return HTML partial for schedule list.
|
||||||
|
"""
|
||||||
|
project = db.query(Project).filter_by(id=project_id).first()
|
||||||
|
project_status = project.status if project else "active"
|
||||||
|
|
||||||
|
schedules = db.query(RecurringSchedule).filter_by(
|
||||||
|
project_id=project_id
|
||||||
|
).order_by(RecurringSchedule.created_at.desc()).all()
|
||||||
|
|
||||||
|
# Enrich with location info
|
||||||
|
schedule_data = []
|
||||||
|
for s in schedules:
|
||||||
|
location = db.query(MonitoringLocation).filter_by(id=s.location_id).first()
|
||||||
|
schedule_data.append({
|
||||||
|
"schedule": s,
|
||||||
|
"location": location,
|
||||||
|
"pattern": json.loads(s.weekly_pattern) if s.weekly_pattern else None,
|
||||||
|
})
|
||||||
|
|
||||||
|
return templates.TemplateResponse("partials/projects/recurring_schedule_list.html", {
|
||||||
|
"request": request,
|
||||||
|
"project_id": project_id,
|
||||||
|
"schedules": schedule_data,
|
||||||
|
"project_status": project_status,
|
||||||
|
})
|
||||||
187
backend/routers/report_templates.py
Normal file
@@ -0,0 +1,187 @@
|
|||||||
|
"""
|
||||||
|
Report Templates Router
|
||||||
|
|
||||||
|
CRUD operations for report template management.
|
||||||
|
Templates store time filter presets and report configuration for reuse.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from fastapi import APIRouter, Depends, HTTPException
|
||||||
|
from fastapi.responses import JSONResponse
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import Optional
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from backend.database import get_db
|
||||||
|
from backend.models import ReportTemplate
|
||||||
|
|
||||||
|
router = APIRouter(prefix="/api/report-templates", tags=["report-templates"])
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("")
|
||||||
|
async def list_templates(
|
||||||
|
project_id: Optional[str] = None,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
List all report templates.
|
||||||
|
Optionally filter by project_id (includes global templates with project_id=None).
|
||||||
|
"""
|
||||||
|
query = db.query(ReportTemplate)
|
||||||
|
|
||||||
|
if project_id:
|
||||||
|
# Include global templates (project_id=None) AND project-specific templates
|
||||||
|
query = query.filter(
|
||||||
|
(ReportTemplate.project_id == None) | (ReportTemplate.project_id == project_id)
|
||||||
|
)
|
||||||
|
|
||||||
|
templates = query.order_by(ReportTemplate.name).all()
|
||||||
|
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"id": t.id,
|
||||||
|
"name": t.name,
|
||||||
|
"project_id": t.project_id,
|
||||||
|
"report_title": t.report_title,
|
||||||
|
"start_time": t.start_time,
|
||||||
|
"end_time": t.end_time,
|
||||||
|
"start_date": t.start_date,
|
||||||
|
"end_date": t.end_date,
|
||||||
|
"created_at": t.created_at.isoformat() if t.created_at else None,
|
||||||
|
"updated_at": t.updated_at.isoformat() if t.updated_at else None,
|
||||||
|
}
|
||||||
|
for t in templates
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("")
|
||||||
|
async def create_template(
|
||||||
|
data: dict,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Create a new report template.
|
||||||
|
|
||||||
|
Request body:
|
||||||
|
- name: Template name (required)
|
||||||
|
- project_id: Optional project ID for project-specific template
|
||||||
|
- report_title: Default report title
|
||||||
|
- start_time: Start time filter (HH:MM format)
|
||||||
|
- end_time: End time filter (HH:MM format)
|
||||||
|
- start_date: Start date filter (YYYY-MM-DD format)
|
||||||
|
- end_date: End date filter (YYYY-MM-DD format)
|
||||||
|
"""
|
||||||
|
name = data.get("name")
|
||||||
|
if not name:
|
||||||
|
raise HTTPException(status_code=400, detail="Template name is required")
|
||||||
|
|
||||||
|
template = ReportTemplate(
|
||||||
|
id=str(uuid.uuid4()),
|
||||||
|
name=name,
|
||||||
|
project_id=data.get("project_id"),
|
||||||
|
report_title=data.get("report_title", "Background Noise Study"),
|
||||||
|
start_time=data.get("start_time"),
|
||||||
|
end_time=data.get("end_time"),
|
||||||
|
start_date=data.get("start_date"),
|
||||||
|
end_date=data.get("end_date"),
|
||||||
|
)
|
||||||
|
|
||||||
|
db.add(template)
|
||||||
|
db.commit()
|
||||||
|
db.refresh(template)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"id": template.id,
|
||||||
|
"name": template.name,
|
||||||
|
"project_id": template.project_id,
|
||||||
|
"report_title": template.report_title,
|
||||||
|
"start_time": template.start_time,
|
||||||
|
"end_time": template.end_time,
|
||||||
|
"start_date": template.start_date,
|
||||||
|
"end_date": template.end_date,
|
||||||
|
"created_at": template.created_at.isoformat() if template.created_at else None,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/{template_id}")
|
||||||
|
async def get_template(
|
||||||
|
template_id: str,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""Get a specific report template by ID."""
|
||||||
|
template = db.query(ReportTemplate).filter_by(id=template_id).first()
|
||||||
|
if not template:
|
||||||
|
raise HTTPException(status_code=404, detail="Template not found")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"id": template.id,
|
||||||
|
"name": template.name,
|
||||||
|
"project_id": template.project_id,
|
||||||
|
"report_title": template.report_title,
|
||||||
|
"start_time": template.start_time,
|
||||||
|
"end_time": template.end_time,
|
||||||
|
"start_date": template.start_date,
|
||||||
|
"end_date": template.end_date,
|
||||||
|
"created_at": template.created_at.isoformat() if template.created_at else None,
|
||||||
|
"updated_at": template.updated_at.isoformat() if template.updated_at else None,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.put("/{template_id}")
|
||||||
|
async def update_template(
|
||||||
|
template_id: str,
|
||||||
|
data: dict,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""Update an existing report template."""
|
||||||
|
template = db.query(ReportTemplate).filter_by(id=template_id).first()
|
||||||
|
if not template:
|
||||||
|
raise HTTPException(status_code=404, detail="Template not found")
|
||||||
|
|
||||||
|
# Update fields if provided
|
||||||
|
if "name" in data:
|
||||||
|
template.name = data["name"]
|
||||||
|
if "project_id" in data:
|
||||||
|
template.project_id = data["project_id"]
|
||||||
|
if "report_title" in data:
|
||||||
|
template.report_title = data["report_title"]
|
||||||
|
if "start_time" in data:
|
||||||
|
template.start_time = data["start_time"]
|
||||||
|
if "end_time" in data:
|
||||||
|
template.end_time = data["end_time"]
|
||||||
|
if "start_date" in data:
|
||||||
|
template.start_date = data["start_date"]
|
||||||
|
if "end_date" in data:
|
||||||
|
template.end_date = data["end_date"]
|
||||||
|
|
||||||
|
template.updated_at = datetime.utcnow()
|
||||||
|
db.commit()
|
||||||
|
db.refresh(template)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"id": template.id,
|
||||||
|
"name": template.name,
|
||||||
|
"project_id": template.project_id,
|
||||||
|
"report_title": template.report_title,
|
||||||
|
"start_time": template.start_time,
|
||||||
|
"end_time": template.end_time,
|
||||||
|
"start_date": template.start_date,
|
||||||
|
"end_date": template.end_date,
|
||||||
|
"updated_at": template.updated_at.isoformat() if template.updated_at else None,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.delete("/{template_id}")
|
||||||
|
async def delete_template(
|
||||||
|
template_id: str,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""Delete a report template."""
|
||||||
|
template = db.query(ReportTemplate).filter_by(id=template_id).first()
|
||||||
|
if not template:
|
||||||
|
raise HTTPException(status_code=404, detail="Template not found")
|
||||||
|
|
||||||
|
db.delete(template)
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
return JSONResponse({"status": "success", "message": "Template deleted"})
|
||||||
@@ -2,20 +2,32 @@ from fastapi import APIRouter, Depends
|
|||||||
from sqlalchemy.orm import Session
|
from sqlalchemy.orm import Session
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from typing import Dict, Any
|
from typing import Dict, Any
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
import random
|
import random
|
||||||
|
|
||||||
from backend.database import get_db
|
from backend.database import get_db
|
||||||
from backend.services.snapshot import emit_status_snapshot
|
from backend.services.snapshot import emit_status_snapshot
|
||||||
|
from backend.services.slm_status_sync import sync_slm_status_to_emitters
|
||||||
|
|
||||||
router = APIRouter(prefix="/api", tags=["roster"])
|
router = APIRouter(prefix="/api", tags=["roster"])
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@router.get("/status-snapshot")
|
@router.get("/status-snapshot")
|
||||||
def get_status_snapshot(db: Session = Depends(get_db)):
|
async def get_status_snapshot(db: Session = Depends(get_db)):
|
||||||
"""
|
"""
|
||||||
Calls emit_status_snapshot() to get current fleet status.
|
Calls emit_status_snapshot() to get current fleet status.
|
||||||
This will be replaced with real Series3 emitter logic later.
|
Syncs SLM status from SLMM before generating snapshot.
|
||||||
"""
|
"""
|
||||||
|
# Sync SLM status from SLMM (with timeout to prevent blocking)
|
||||||
|
try:
|
||||||
|
await asyncio.wait_for(sync_slm_status_to_emitters(), timeout=2.0)
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
logger.warning("SLM status sync timed out, using cached data")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"SLM status sync failed: {e}")
|
||||||
|
|
||||||
return emit_status_snapshot()
|
return emit_status_snapshot()
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
139
backend/routers/roster_rename.py
Normal file
@@ -0,0 +1,139 @@
|
|||||||
|
"""
|
||||||
|
Roster Unit Rename Router
|
||||||
|
|
||||||
|
Provides endpoint for safely renaming unit IDs across all database tables.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from fastapi import APIRouter, Depends, HTTPException, Form
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
from datetime import datetime
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from backend.database import get_db
|
||||||
|
from backend.models import RosterUnit, Emitter, UnitHistory
|
||||||
|
from backend.routers.roster_edit import record_history, sync_slm_to_slmm_cache
|
||||||
|
|
||||||
|
router = APIRouter(prefix="/api/roster", tags=["roster-rename"])
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/rename")
|
||||||
|
async def rename_unit(
|
||||||
|
old_id: str = Form(...),
|
||||||
|
new_id: str = Form(...),
|
||||||
|
db: Session = Depends(get_db)
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Rename a unit ID across all tables.
|
||||||
|
Updates the unit ID in roster, emitters, unit_history, and all foreign key references.
|
||||||
|
|
||||||
|
IMPORTANT: This operation updates the primary key, which affects all relationships.
|
||||||
|
"""
|
||||||
|
# Validate input
|
||||||
|
if not old_id or not new_id:
|
||||||
|
raise HTTPException(status_code=400, detail="Both old_id and new_id are required")
|
||||||
|
|
||||||
|
if old_id == new_id:
|
||||||
|
raise HTTPException(status_code=400, detail="New ID must be different from old ID")
|
||||||
|
|
||||||
|
# Check if old unit exists
|
||||||
|
old_unit = db.query(RosterUnit).filter(RosterUnit.id == old_id).first()
|
||||||
|
if not old_unit:
|
||||||
|
raise HTTPException(status_code=404, detail=f"Unit '{old_id}' not found")
|
||||||
|
|
||||||
|
# Check if new ID already exists
|
||||||
|
existing_unit = db.query(RosterUnit).filter(RosterUnit.id == new_id).first()
|
||||||
|
if existing_unit:
|
||||||
|
raise HTTPException(status_code=409, detail=f"Unit ID '{new_id}' already exists")
|
||||||
|
|
||||||
|
device_type = old_unit.device_type
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Record history for the rename operation (using old_id since that's still valid)
|
||||||
|
record_history(
|
||||||
|
db=db,
|
||||||
|
unit_id=old_id,
|
||||||
|
change_type="id_change",
|
||||||
|
field_name="id",
|
||||||
|
old_value=old_id,
|
||||||
|
new_value=new_id,
|
||||||
|
source="manual",
|
||||||
|
notes=f"Unit renamed from '{old_id}' to '{new_id}'"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Update roster table (primary)
|
||||||
|
old_unit.id = new_id
|
||||||
|
old_unit.last_updated = datetime.utcnow()
|
||||||
|
|
||||||
|
# Update emitters table
|
||||||
|
emitter = db.query(Emitter).filter(Emitter.id == old_id).first()
|
||||||
|
if emitter:
|
||||||
|
emitter.id = new_id
|
||||||
|
|
||||||
|
# Update unit_history table (all entries for this unit)
|
||||||
|
db.query(UnitHistory).filter(UnitHistory.unit_id == old_id).update(
|
||||||
|
{"unit_id": new_id},
|
||||||
|
synchronize_session=False
|
||||||
|
)
|
||||||
|
|
||||||
|
# Update deployed_with_modem_id references (units that reference this as modem)
|
||||||
|
db.query(RosterUnit).filter(RosterUnit.deployed_with_modem_id == old_id).update(
|
||||||
|
{"deployed_with_modem_id": new_id},
|
||||||
|
synchronize_session=False
|
||||||
|
)
|
||||||
|
|
||||||
|
# Update unit_assignments table (if exists)
|
||||||
|
try:
|
||||||
|
from backend.models import UnitAssignment
|
||||||
|
db.query(UnitAssignment).filter(UnitAssignment.unit_id == old_id).update(
|
||||||
|
{"unit_id": new_id},
|
||||||
|
synchronize_session=False
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Could not update unit_assignments: {e}")
|
||||||
|
|
||||||
|
# Update monitoring_sessions table (if exists)
|
||||||
|
try:
|
||||||
|
from backend.models import MonitoringSession
|
||||||
|
db.query(MonitoringSession).filter(MonitoringSession.unit_id == old_id).update(
|
||||||
|
{"unit_id": new_id},
|
||||||
|
synchronize_session=False
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Could not update monitoring_sessions: {e}")
|
||||||
|
|
||||||
|
# Commit all changes
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
# If sound level meter, sync updated config to SLMM cache
|
||||||
|
if device_type == "slm":
|
||||||
|
logger.info(f"Syncing renamed SLM {new_id} (was {old_id}) config to SLMM cache...")
|
||||||
|
result = await sync_slm_to_slmm_cache(
|
||||||
|
unit_id=new_id,
|
||||||
|
host=old_unit.slm_host,
|
||||||
|
tcp_port=old_unit.slm_tcp_port,
|
||||||
|
ftp_port=old_unit.slm_ftp_port,
|
||||||
|
deployed_with_modem_id=old_unit.deployed_with_modem_id,
|
||||||
|
db=db
|
||||||
|
)
|
||||||
|
|
||||||
|
if not result["success"]:
|
||||||
|
logger.warning(f"SLMM cache sync warning for renamed unit {new_id}: {result['message']}")
|
||||||
|
|
||||||
|
logger.info(f"Successfully renamed unit '{old_id}' to '{new_id}'")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"message": f"Successfully renamed unit from '{old_id}' to '{new_id}'",
|
||||||
|
"old_id": old_id,
|
||||||
|
"new_id": new_id,
|
||||||
|
"device_type": device_type
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
db.rollback()
|
||||||
|
logger.error(f"Error renaming unit '{old_id}' to '{new_id}': {e}")
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=500,
|
||||||
|
detail=f"Failed to rename unit: {str(e)}"
|
||||||
|
)
|
||||||
408
backend/routers/scheduler.py
Normal file
@@ -0,0 +1,408 @@
|
|||||||
|
"""
|
||||||
|
Scheduler Router
|
||||||
|
|
||||||
|
Handles scheduled actions for automated recording control.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from fastapi import APIRouter, Request, Depends, HTTPException, Query
|
||||||
|
from fastapi.responses import HTMLResponse, JSONResponse
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
from sqlalchemy import and_, or_
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from typing import Optional
|
||||||
|
import uuid
|
||||||
|
import json
|
||||||
|
|
||||||
|
from backend.database import get_db
|
||||||
|
from backend.models import (
|
||||||
|
Project,
|
||||||
|
ScheduledAction,
|
||||||
|
MonitoringLocation,
|
||||||
|
UnitAssignment,
|
||||||
|
RosterUnit,
|
||||||
|
)
|
||||||
|
from backend.services.scheduler import get_scheduler
|
||||||
|
from backend.templates_config import templates
|
||||||
|
|
||||||
|
router = APIRouter(prefix="/api/projects/{project_id}/scheduler", tags=["scheduler"])
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Scheduled Actions List
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.get("/actions", response_class=HTMLResponse)
|
||||||
|
async def get_scheduled_actions(
|
||||||
|
project_id: str,
|
||||||
|
request: Request,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
status: Optional[str] = Query(None),
|
||||||
|
start_date: Optional[str] = Query(None),
|
||||||
|
end_date: Optional[str] = Query(None),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Get scheduled actions for a project.
|
||||||
|
Returns HTML partial with agenda/calendar view.
|
||||||
|
"""
|
||||||
|
query = db.query(ScheduledAction).filter_by(project_id=project_id)
|
||||||
|
|
||||||
|
# Filter by status
|
||||||
|
if status:
|
||||||
|
query = query.filter_by(execution_status=status)
|
||||||
|
else:
|
||||||
|
# By default, show pending and upcoming completed/failed
|
||||||
|
query = query.filter(
|
||||||
|
or_(
|
||||||
|
ScheduledAction.execution_status == "pending",
|
||||||
|
and_(
|
||||||
|
ScheduledAction.execution_status.in_(["completed", "failed"]),
|
||||||
|
ScheduledAction.scheduled_time >= datetime.utcnow() - timedelta(days=7),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Filter by date range
|
||||||
|
if start_date:
|
||||||
|
query = query.filter(ScheduledAction.scheduled_time >= datetime.fromisoformat(start_date))
|
||||||
|
if end_date:
|
||||||
|
query = query.filter(ScheduledAction.scheduled_time <= datetime.fromisoformat(end_date))
|
||||||
|
|
||||||
|
actions = query.order_by(ScheduledAction.scheduled_time).all()
|
||||||
|
|
||||||
|
# Enrich with location and unit details
|
||||||
|
actions_data = []
|
||||||
|
for action in actions:
|
||||||
|
location = db.query(MonitoringLocation).filter_by(id=action.location_id).first()
|
||||||
|
|
||||||
|
unit = None
|
||||||
|
if action.unit_id:
|
||||||
|
unit = db.query(RosterUnit).filter_by(id=action.unit_id).first()
|
||||||
|
else:
|
||||||
|
# Get from assignment
|
||||||
|
assignment = db.query(UnitAssignment).filter(
|
||||||
|
and_(
|
||||||
|
UnitAssignment.location_id == action.location_id,
|
||||||
|
UnitAssignment.status == "active",
|
||||||
|
)
|
||||||
|
).first()
|
||||||
|
if assignment:
|
||||||
|
unit = db.query(RosterUnit).filter_by(id=assignment.unit_id).first()
|
||||||
|
|
||||||
|
actions_data.append({
|
||||||
|
"action": action,
|
||||||
|
"location": location,
|
||||||
|
"unit": unit,
|
||||||
|
})
|
||||||
|
|
||||||
|
return templates.TemplateResponse("partials/projects/scheduler_agenda.html", {
|
||||||
|
"request": request,
|
||||||
|
"project_id": project_id,
|
||||||
|
"actions": actions_data,
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Create Scheduled Action
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.post("/actions/create")
|
||||||
|
async def create_scheduled_action(
|
||||||
|
project_id: str,
|
||||||
|
request: Request,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Create a new scheduled action.
|
||||||
|
"""
|
||||||
|
project = db.query(Project).filter_by(id=project_id).first()
|
||||||
|
if not project:
|
||||||
|
raise HTTPException(status_code=404, detail="Project not found")
|
||||||
|
|
||||||
|
form_data = await request.form()
|
||||||
|
|
||||||
|
location_id = form_data.get("location_id")
|
||||||
|
location = db.query(MonitoringLocation).filter_by(
|
||||||
|
id=location_id,
|
||||||
|
project_id=project_id,
|
||||||
|
).first()
|
||||||
|
|
||||||
|
if not location:
|
||||||
|
raise HTTPException(status_code=404, detail="Location not found")
|
||||||
|
|
||||||
|
# Determine device type from location
|
||||||
|
device_type = "slm" if location.location_type == "sound" else "seismograph"
|
||||||
|
|
||||||
|
# Get unit_id (optional - can be determined from assignment at execution time)
|
||||||
|
unit_id = form_data.get("unit_id")
|
||||||
|
|
||||||
|
action = ScheduledAction(
|
||||||
|
id=str(uuid.uuid4()),
|
||||||
|
project_id=project_id,
|
||||||
|
location_id=location_id,
|
||||||
|
unit_id=unit_id,
|
||||||
|
action_type=form_data.get("action_type"),
|
||||||
|
device_type=device_type,
|
||||||
|
scheduled_time=datetime.fromisoformat(form_data.get("scheduled_time")),
|
||||||
|
execution_status="pending",
|
||||||
|
notes=form_data.get("notes"),
|
||||||
|
)
|
||||||
|
|
||||||
|
db.add(action)
|
||||||
|
db.commit()
|
||||||
|
db.refresh(action)
|
||||||
|
|
||||||
|
return JSONResponse({
|
||||||
|
"success": True,
|
||||||
|
"action_id": action.id,
|
||||||
|
"message": f"Scheduled action '{action.action_type}' created for {action.scheduled_time}",
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Schedule Recording Session
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.post("/schedule-session")
|
||||||
|
async def schedule_recording_session(
|
||||||
|
project_id: str,
|
||||||
|
request: Request,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Schedule a complete recording session (start + stop).
|
||||||
|
Creates two scheduled actions: start and stop.
|
||||||
|
"""
|
||||||
|
project = db.query(Project).filter_by(id=project_id).first()
|
||||||
|
if not project:
|
||||||
|
raise HTTPException(status_code=404, detail="Project not found")
|
||||||
|
|
||||||
|
form_data = await request.form()
|
||||||
|
|
||||||
|
location_id = form_data.get("location_id")
|
||||||
|
location = db.query(MonitoringLocation).filter_by(
|
||||||
|
id=location_id,
|
||||||
|
project_id=project_id,
|
||||||
|
).first()
|
||||||
|
|
||||||
|
if not location:
|
||||||
|
raise HTTPException(status_code=404, detail="Location not found")
|
||||||
|
|
||||||
|
device_type = "slm" if location.location_type == "sound" else "seismograph"
|
||||||
|
unit_id = form_data.get("unit_id")
|
||||||
|
|
||||||
|
start_time = datetime.fromisoformat(form_data.get("start_time"))
|
||||||
|
duration_minutes = int(form_data.get("duration_minutes", 60))
|
||||||
|
stop_time = start_time + timedelta(minutes=duration_minutes)
|
||||||
|
|
||||||
|
# Create START action
|
||||||
|
start_action = ScheduledAction(
|
||||||
|
id=str(uuid.uuid4()),
|
||||||
|
project_id=project_id,
|
||||||
|
location_id=location_id,
|
||||||
|
unit_id=unit_id,
|
||||||
|
action_type="start",
|
||||||
|
device_type=device_type,
|
||||||
|
scheduled_time=start_time,
|
||||||
|
execution_status="pending",
|
||||||
|
notes=form_data.get("notes"),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create STOP action
|
||||||
|
stop_action = ScheduledAction(
|
||||||
|
id=str(uuid.uuid4()),
|
||||||
|
project_id=project_id,
|
||||||
|
location_id=location_id,
|
||||||
|
unit_id=unit_id,
|
||||||
|
action_type="stop",
|
||||||
|
device_type=device_type,
|
||||||
|
scheduled_time=stop_time,
|
||||||
|
execution_status="pending",
|
||||||
|
notes=f"Auto-stop after {duration_minutes} minutes",
|
||||||
|
)
|
||||||
|
|
||||||
|
db.add(start_action)
|
||||||
|
db.add(stop_action)
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
return JSONResponse({
|
||||||
|
"success": True,
|
||||||
|
"start_action_id": start_action.id,
|
||||||
|
"stop_action_id": stop_action.id,
|
||||||
|
"message": f"Recording session scheduled from {start_time} to {stop_time}",
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Update/Cancel Scheduled Action
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.put("/actions/{action_id}")
|
||||||
|
async def update_scheduled_action(
|
||||||
|
project_id: str,
|
||||||
|
action_id: str,
|
||||||
|
request: Request,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Update a scheduled action (only if not yet executed).
|
||||||
|
"""
|
||||||
|
action = db.query(ScheduledAction).filter_by(
|
||||||
|
id=action_id,
|
||||||
|
project_id=project_id,
|
||||||
|
).first()
|
||||||
|
|
||||||
|
if not action:
|
||||||
|
raise HTTPException(status_code=404, detail="Action not found")
|
||||||
|
|
||||||
|
if action.execution_status != "pending":
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=400,
|
||||||
|
detail="Cannot update action that has already been executed",
|
||||||
|
)
|
||||||
|
|
||||||
|
data = await request.json()
|
||||||
|
|
||||||
|
if "scheduled_time" in data:
|
||||||
|
action.scheduled_time = datetime.fromisoformat(data["scheduled_time"])
|
||||||
|
if "notes" in data:
|
||||||
|
action.notes = data["notes"]
|
||||||
|
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
return {"success": True, "message": "Action updated successfully"}
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/actions/{action_id}/cancel")
|
||||||
|
async def cancel_scheduled_action(
|
||||||
|
project_id: str,
|
||||||
|
action_id: str,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Cancel a pending scheduled action.
|
||||||
|
"""
|
||||||
|
action = db.query(ScheduledAction).filter_by(
|
||||||
|
id=action_id,
|
||||||
|
project_id=project_id,
|
||||||
|
).first()
|
||||||
|
|
||||||
|
if not action:
|
||||||
|
raise HTTPException(status_code=404, detail="Action not found")
|
||||||
|
|
||||||
|
if action.execution_status != "pending":
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=400,
|
||||||
|
detail="Can only cancel pending actions",
|
||||||
|
)
|
||||||
|
|
||||||
|
action.execution_status = "cancelled"
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
return {"success": True, "message": "Action cancelled successfully"}
|
||||||
|
|
||||||
|
|
||||||
|
@router.delete("/actions/{action_id}")
|
||||||
|
async def delete_scheduled_action(
|
||||||
|
project_id: str,
|
||||||
|
action_id: str,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Delete a scheduled action (only if pending or cancelled).
|
||||||
|
"""
|
||||||
|
action = db.query(ScheduledAction).filter_by(
|
||||||
|
id=action_id,
|
||||||
|
project_id=project_id,
|
||||||
|
).first()
|
||||||
|
|
||||||
|
if not action:
|
||||||
|
raise HTTPException(status_code=404, detail="Action not found")
|
||||||
|
|
||||||
|
if action.execution_status not in ["pending", "cancelled"]:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=400,
|
||||||
|
detail="Cannot delete action that has been executed",
|
||||||
|
)
|
||||||
|
|
||||||
|
db.delete(action)
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
return {"success": True, "message": "Action deleted successfully"}
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Manual Execution
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.post("/actions/{action_id}/execute")
|
||||||
|
async def execute_action_now(
|
||||||
|
project_id: str,
|
||||||
|
action_id: str,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Manually trigger execution of a scheduled action (for testing/debugging).
|
||||||
|
"""
|
||||||
|
action = db.query(ScheduledAction).filter_by(
|
||||||
|
id=action_id,
|
||||||
|
project_id=project_id,
|
||||||
|
).first()
|
||||||
|
|
||||||
|
if not action:
|
||||||
|
raise HTTPException(status_code=404, detail="Action not found")
|
||||||
|
|
||||||
|
if action.execution_status != "pending":
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=400,
|
||||||
|
detail="Action is not pending",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Execute via scheduler service
|
||||||
|
scheduler = get_scheduler()
|
||||||
|
result = await scheduler.execute_action_by_id(action_id)
|
||||||
|
|
||||||
|
# Refresh from DB to get updated status
|
||||||
|
db.refresh(action)
|
||||||
|
|
||||||
|
return JSONResponse({
|
||||||
|
"success": result.get("success", False),
|
||||||
|
"result": result,
|
||||||
|
"action": {
|
||||||
|
"id": action.id,
|
||||||
|
"execution_status": action.execution_status,
|
||||||
|
"executed_at": action.executed_at.isoformat() if action.executed_at else None,
|
||||||
|
"error_message": action.error_message,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Scheduler Status
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.get("/status")
|
||||||
|
async def get_scheduler_status():
|
||||||
|
"""
|
||||||
|
Get scheduler service status.
|
||||||
|
"""
|
||||||
|
scheduler = get_scheduler()
|
||||||
|
|
||||||
|
return {
|
||||||
|
"running": scheduler.running,
|
||||||
|
"check_interval": scheduler.check_interval,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/execute-pending")
|
||||||
|
async def trigger_pending_execution():
|
||||||
|
"""
|
||||||
|
Manually trigger execution of all pending actions (for testing).
|
||||||
|
"""
|
||||||
|
scheduler = get_scheduler()
|
||||||
|
results = await scheduler.execute_pending_actions()
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"executed_count": len(results),
|
||||||
|
"results": results,
|
||||||
|
}
|
||||||
228
backend/routers/seismo_dashboard.py
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
"""
|
||||||
|
Seismograph Dashboard API Router
|
||||||
|
Provides endpoints for the seismograph-specific dashboard
|
||||||
|
"""
|
||||||
|
|
||||||
|
from datetime import date, datetime, timedelta
|
||||||
|
|
||||||
|
from fastapi import APIRouter, Request, Depends, Query, Form, HTTPException
|
||||||
|
from fastapi.responses import HTMLResponse
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
from backend.database import get_db
|
||||||
|
from backend.models import RosterUnit, UnitHistory, UserPreferences
|
||||||
|
from backend.templates_config import templates
|
||||||
|
|
||||||
|
router = APIRouter(prefix="/api/seismo-dashboard", tags=["seismo-dashboard"])
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/stats", response_class=HTMLResponse)
|
||||||
|
async def get_seismo_stats(request: Request, db: Session = Depends(get_db)):
|
||||||
|
"""
|
||||||
|
Returns HTML partial with seismograph statistics summary
|
||||||
|
"""
|
||||||
|
# Get all seismograph units
|
||||||
|
seismos = db.query(RosterUnit).filter_by(
|
||||||
|
device_type="seismograph",
|
||||||
|
retired=False
|
||||||
|
).all()
|
||||||
|
|
||||||
|
total = len(seismos)
|
||||||
|
deployed = sum(1 for s in seismos if s.deployed)
|
||||||
|
benched = sum(1 for s in seismos if not s.deployed and not s.out_for_calibration)
|
||||||
|
out_for_calibration = sum(1 for s in seismos if s.out_for_calibration)
|
||||||
|
|
||||||
|
# Count modems assigned to deployed seismographs
|
||||||
|
with_modem = sum(1 for s in seismos if s.deployed and s.deployed_with_modem_id)
|
||||||
|
without_modem = deployed - with_modem
|
||||||
|
|
||||||
|
return templates.TemplateResponse(
|
||||||
|
"partials/seismo_stats.html",
|
||||||
|
{
|
||||||
|
"request": request,
|
||||||
|
"total": total,
|
||||||
|
"deployed": deployed,
|
||||||
|
"benched": benched,
|
||||||
|
"out_for_calibration": out_for_calibration,
|
||||||
|
"with_modem": with_modem,
|
||||||
|
"without_modem": without_modem
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/units", response_class=HTMLResponse)
|
||||||
|
async def get_seismo_units(
|
||||||
|
request: Request,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
search: str = Query(None),
|
||||||
|
sort: str = Query("id"),
|
||||||
|
order: str = Query("asc"),
|
||||||
|
status: str = Query(None),
|
||||||
|
modem: str = Query(None)
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Returns HTML partial with filterable and sortable seismograph unit list
|
||||||
|
"""
|
||||||
|
query = db.query(RosterUnit).filter_by(
|
||||||
|
device_type="seismograph",
|
||||||
|
retired=False
|
||||||
|
)
|
||||||
|
|
||||||
|
# Apply search filter
|
||||||
|
if search:
|
||||||
|
query = query.filter(
|
||||||
|
(RosterUnit.id.ilike(f"%{search}%")) |
|
||||||
|
(RosterUnit.note.ilike(f"%{search}%")) |
|
||||||
|
(RosterUnit.address.ilike(f"%{search}%"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Apply status filter
|
||||||
|
if status == "deployed":
|
||||||
|
query = query.filter(RosterUnit.deployed == True)
|
||||||
|
elif status == "benched":
|
||||||
|
query = query.filter(RosterUnit.deployed == False, RosterUnit.out_for_calibration == False)
|
||||||
|
elif status == "out_for_calibration":
|
||||||
|
query = query.filter(RosterUnit.out_for_calibration == True)
|
||||||
|
|
||||||
|
# Apply modem filter
|
||||||
|
if modem == "with":
|
||||||
|
query = query.filter(RosterUnit.deployed_with_modem_id.isnot(None))
|
||||||
|
elif modem == "without":
|
||||||
|
query = query.filter(RosterUnit.deployed_with_modem_id.is_(None))
|
||||||
|
|
||||||
|
# Apply sorting
|
||||||
|
sort_column_map = {
|
||||||
|
"id": RosterUnit.id,
|
||||||
|
"status": RosterUnit.deployed,
|
||||||
|
"modem": RosterUnit.deployed_with_modem_id,
|
||||||
|
"location": RosterUnit.address,
|
||||||
|
"last_calibrated": RosterUnit.last_calibrated,
|
||||||
|
"notes": RosterUnit.note
|
||||||
|
}
|
||||||
|
sort_column = sort_column_map.get(sort, RosterUnit.id)
|
||||||
|
|
||||||
|
if order == "desc":
|
||||||
|
query = query.order_by(sort_column.desc())
|
||||||
|
else:
|
||||||
|
query = query.order_by(sort_column.asc())
|
||||||
|
|
||||||
|
seismos = query.all()
|
||||||
|
|
||||||
|
return templates.TemplateResponse(
|
||||||
|
"partials/seismo_unit_list.html",
|
||||||
|
{
|
||||||
|
"request": request,
|
||||||
|
"units": seismos,
|
||||||
|
"search": search or "",
|
||||||
|
"sort": sort,
|
||||||
|
"order": order,
|
||||||
|
"status": status or "",
|
||||||
|
"modem": modem or "",
|
||||||
|
"today": date.today()
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_calibration_interval(db: Session) -> int:
|
||||||
|
prefs = db.query(UserPreferences).first()
|
||||||
|
if prefs and prefs.calibration_interval_days:
|
||||||
|
return prefs.calibration_interval_days
|
||||||
|
return 365
|
||||||
|
|
||||||
|
|
||||||
|
def _row_context(request: Request, unit: RosterUnit) -> dict:
|
||||||
|
return {"request": request, "unit": unit, "today": date.today()}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/unit/{unit_id}/view-row", response_class=HTMLResponse)
|
||||||
|
async def get_seismo_view_row(unit_id: str, request: Request, db: Session = Depends(get_db)):
|
||||||
|
unit = db.query(RosterUnit).filter(RosterUnit.id == unit_id).first()
|
||||||
|
if not unit:
|
||||||
|
raise HTTPException(status_code=404, detail="Unit not found")
|
||||||
|
return templates.TemplateResponse("partials/seismo_row_view.html", _row_context(request, unit))
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/unit/{unit_id}/edit-row", response_class=HTMLResponse)
|
||||||
|
async def get_seismo_edit_row(unit_id: str, request: Request, db: Session = Depends(get_db)):
|
||||||
|
unit = db.query(RosterUnit).filter(RosterUnit.id == unit_id).first()
|
||||||
|
if not unit:
|
||||||
|
raise HTTPException(status_code=404, detail="Unit not found")
|
||||||
|
return templates.TemplateResponse("partials/seismo_row_edit.html", _row_context(request, unit))
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/unit/{unit_id}/quick-update", response_class=HTMLResponse)
|
||||||
|
async def quick_update_seismo_unit(
|
||||||
|
unit_id: str,
|
||||||
|
request: Request,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
status: str = Form(...),
|
||||||
|
last_calibrated: str = Form(""),
|
||||||
|
note: str = Form(""),
|
||||||
|
):
|
||||||
|
unit = db.query(RosterUnit).filter(RosterUnit.id == unit_id).first()
|
||||||
|
if not unit:
|
||||||
|
raise HTTPException(status_code=404, detail="Unit not found")
|
||||||
|
|
||||||
|
# --- Status ---
|
||||||
|
old_deployed = unit.deployed
|
||||||
|
old_out_for_cal = unit.out_for_calibration
|
||||||
|
if status == "deployed":
|
||||||
|
unit.deployed = True
|
||||||
|
unit.out_for_calibration = False
|
||||||
|
elif status == "out_for_calibration":
|
||||||
|
unit.deployed = False
|
||||||
|
unit.out_for_calibration = True
|
||||||
|
else:
|
||||||
|
unit.deployed = False
|
||||||
|
unit.out_for_calibration = False
|
||||||
|
|
||||||
|
if unit.deployed != old_deployed or unit.out_for_calibration != old_out_for_cal:
|
||||||
|
old_status = "deployed" if old_deployed else ("out_for_calibration" if old_out_for_cal else "benched")
|
||||||
|
db.add(UnitHistory(
|
||||||
|
unit_id=unit_id,
|
||||||
|
change_type="deployed_change",
|
||||||
|
field_name="status",
|
||||||
|
old_value=old_status,
|
||||||
|
new_value=status,
|
||||||
|
source="manual",
|
||||||
|
))
|
||||||
|
|
||||||
|
# --- Last calibrated ---
|
||||||
|
old_cal = unit.last_calibrated
|
||||||
|
if last_calibrated:
|
||||||
|
try:
|
||||||
|
new_cal = datetime.strptime(last_calibrated, "%Y-%m-%d").date()
|
||||||
|
except ValueError:
|
||||||
|
raise HTTPException(status_code=400, detail="Invalid date format. Use YYYY-MM-DD")
|
||||||
|
unit.last_calibrated = new_cal
|
||||||
|
unit.next_calibration_due = new_cal + timedelta(days=_get_calibration_interval(db))
|
||||||
|
else:
|
||||||
|
unit.last_calibrated = None
|
||||||
|
unit.next_calibration_due = None
|
||||||
|
|
||||||
|
if unit.last_calibrated != old_cal:
|
||||||
|
db.add(UnitHistory(
|
||||||
|
unit_id=unit_id,
|
||||||
|
change_type="calibration_status_change",
|
||||||
|
field_name="last_calibrated",
|
||||||
|
old_value=old_cal.strftime("%Y-%m-%d") if old_cal else None,
|
||||||
|
new_value=last_calibrated or None,
|
||||||
|
source="manual",
|
||||||
|
))
|
||||||
|
|
||||||
|
# --- Note ---
|
||||||
|
old_note = unit.note
|
||||||
|
unit.note = note or None
|
||||||
|
if unit.note != old_note:
|
||||||
|
db.add(UnitHistory(
|
||||||
|
unit_id=unit_id,
|
||||||
|
change_type="note_change",
|
||||||
|
field_name="note",
|
||||||
|
old_value=old_note,
|
||||||
|
new_value=unit.note,
|
||||||
|
source="manual",
|
||||||
|
))
|
||||||
|
|
||||||
|
db.commit()
|
||||||
|
db.refresh(unit)
|
||||||
|
|
||||||
|
return templates.TemplateResponse("partials/seismo_row_view.html", _row_context(request, unit))
|
||||||
@@ -100,6 +100,14 @@ def get_all_roster_units(db: Session = Depends(get_db)):
|
|||||||
"ip_address": unit.ip_address or "",
|
"ip_address": unit.ip_address or "",
|
||||||
"phone_number": unit.phone_number or "",
|
"phone_number": unit.phone_number or "",
|
||||||
"hardware_model": unit.hardware_model or "",
|
"hardware_model": unit.hardware_model or "",
|
||||||
|
"slm_host": unit.slm_host or "",
|
||||||
|
"slm_tcp_port": unit.slm_tcp_port,
|
||||||
|
"slm_model": unit.slm_model or "",
|
||||||
|
"slm_serial_number": unit.slm_serial_number or "",
|
||||||
|
"slm_frequency_weighting": unit.slm_frequency_weighting or "",
|
||||||
|
"slm_time_weighting": unit.slm_time_weighting or "",
|
||||||
|
"slm_measurement_range": unit.slm_measurement_range or "",
|
||||||
|
"slm_last_check": unit.slm_last_check.isoformat() if unit.slm_last_check else None,
|
||||||
"last_updated": unit.last_updated.isoformat() if unit.last_updated else None
|
"last_updated": unit.last_updated.isoformat() if unit.last_updated else None
|
||||||
} for unit in units]
|
} for unit in units]
|
||||||
|
|
||||||
@@ -469,3 +477,75 @@ async def upload_snapshot(file: UploadFile = File(...)):
|
|||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise HTTPException(status_code=500, detail=f"Upload failed: {str(e)}")
|
raise HTTPException(status_code=500, detail=f"Upload failed: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# SLMM SYNC ENDPOINTS
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@router.post("/slmm/sync-all")
|
||||||
|
async def sync_all_slms(db: Session = Depends(get_db)):
|
||||||
|
"""
|
||||||
|
Manually trigger full sync of all SLM devices from Terra-View roster to SLMM.
|
||||||
|
|
||||||
|
This ensures SLMM database matches Terra-View roster (source of truth).
|
||||||
|
Also cleans up orphaned devices in SLMM that are not in Terra-View.
|
||||||
|
"""
|
||||||
|
from backend.services.slmm_sync import sync_all_slms_to_slmm, cleanup_orphaned_slmm_devices
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Sync all SLMs
|
||||||
|
sync_results = await sync_all_slms_to_slmm(db)
|
||||||
|
|
||||||
|
# Clean up orphaned devices
|
||||||
|
cleanup_results = await cleanup_orphaned_slmm_devices(db)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"status": "ok",
|
||||||
|
"sync": sync_results,
|
||||||
|
"cleanup": cleanup_results
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
raise HTTPException(status_code=500, detail=f"Sync failed: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/slmm/status")
|
||||||
|
async def get_slmm_sync_status(db: Session = Depends(get_db)):
|
||||||
|
"""
|
||||||
|
Get status of SLMM synchronization.
|
||||||
|
|
||||||
|
Shows which devices are in Terra-View roster vs SLMM database.
|
||||||
|
"""
|
||||||
|
from backend.services.slmm_sync import get_slmm_devices
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Get devices from both systems
|
||||||
|
roster_slms = db.query(RosterUnit).filter_by(device_type="slm").all()
|
||||||
|
slmm_devices = await get_slmm_devices()
|
||||||
|
|
||||||
|
if slmm_devices is None:
|
||||||
|
raise HTTPException(status_code=503, detail="SLMM service unavailable")
|
||||||
|
|
||||||
|
roster_unit_ids = {unit.unit_type for unit in roster_slms}
|
||||||
|
slmm_unit_ids = set(slmm_devices)
|
||||||
|
|
||||||
|
# Find differences
|
||||||
|
in_roster_only = roster_unit_ids - slmm_unit_ids
|
||||||
|
in_slmm_only = slmm_unit_ids - roster_unit_ids
|
||||||
|
in_both = roster_unit_ids & slmm_unit_ids
|
||||||
|
|
||||||
|
return {
|
||||||
|
"status": "ok",
|
||||||
|
"terra_view_total": len(roster_unit_ids),
|
||||||
|
"slmm_total": len(slmm_unit_ids),
|
||||||
|
"synced": len(in_both),
|
||||||
|
"missing_from_slmm": list(in_roster_only),
|
||||||
|
"orphaned_in_slmm": list(in_slmm_only),
|
||||||
|
"in_sync": len(in_roster_only) == 0 and len(in_slmm_only) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
except HTTPException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
raise HTTPException(status_code=500, detail=f"Status check failed: {str(e)}")
|
||||||
|
|||||||
363
backend/routers/slm_dashboard.py
Normal file
@@ -0,0 +1,363 @@
|
|||||||
|
"""
|
||||||
|
SLM Dashboard Router
|
||||||
|
|
||||||
|
Provides API endpoints for the Sound Level Meters dashboard page.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from fastapi import APIRouter, Request, Depends, Query
|
||||||
|
from fastapi.responses import HTMLResponse
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
from sqlalchemy import func
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
import asyncio
|
||||||
|
import httpx
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
|
||||||
|
from backend.database import get_db
|
||||||
|
from backend.models import RosterUnit
|
||||||
|
from backend.routers.roster_edit import sync_slm_to_slmm_cache
|
||||||
|
from backend.templates_config import templates
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
router = APIRouter(prefix="/api/slm-dashboard", tags=["slm-dashboard"])
|
||||||
|
|
||||||
|
# SLMM backend URL - configurable via environment variable
|
||||||
|
SLMM_BASE_URL = os.getenv("SLMM_BASE_URL", "http://localhost:8100")
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/stats", response_class=HTMLResponse)
|
||||||
|
async def get_slm_stats(request: Request, db: Session = Depends(get_db)):
|
||||||
|
"""
|
||||||
|
Get summary statistics for SLM dashboard.
|
||||||
|
Returns HTML partial with stat cards.
|
||||||
|
"""
|
||||||
|
# Query all SLMs
|
||||||
|
all_slms = db.query(RosterUnit).filter_by(device_type="slm").all()
|
||||||
|
|
||||||
|
# Count deployed vs benched
|
||||||
|
deployed_count = sum(1 for slm in all_slms if slm.deployed and not slm.retired)
|
||||||
|
benched_count = sum(1 for slm in all_slms if not slm.deployed and not slm.retired)
|
||||||
|
retired_count = sum(1 for slm in all_slms if slm.retired)
|
||||||
|
|
||||||
|
# Count recently active (checked in last hour)
|
||||||
|
one_hour_ago = datetime.utcnow() - timedelta(hours=1)
|
||||||
|
active_count = sum(1 for slm in all_slms
|
||||||
|
if slm.slm_last_check and slm.slm_last_check > one_hour_ago)
|
||||||
|
|
||||||
|
return templates.TemplateResponse("partials/slm_stats.html", {
|
||||||
|
"request": request,
|
||||||
|
"total_count": len(all_slms),
|
||||||
|
"deployed_count": deployed_count,
|
||||||
|
"benched_count": benched_count,
|
||||||
|
"active_count": active_count,
|
||||||
|
"retired_count": retired_count
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/units", response_class=HTMLResponse)
|
||||||
|
async def get_slm_units(
|
||||||
|
request: Request,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
search: str = Query(None),
|
||||||
|
project: str = Query(None),
|
||||||
|
include_measurement: bool = Query(False),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Get list of SLM units for the sidebar.
|
||||||
|
Returns HTML partial with unit cards.
|
||||||
|
"""
|
||||||
|
query = db.query(RosterUnit).filter_by(device_type="slm")
|
||||||
|
|
||||||
|
# Filter by project if provided
|
||||||
|
if project:
|
||||||
|
query = query.filter(RosterUnit.project_id == project)
|
||||||
|
|
||||||
|
# Filter by search term if provided
|
||||||
|
if search:
|
||||||
|
search_term = f"%{search}%"
|
||||||
|
query = query.filter(
|
||||||
|
(RosterUnit.id.like(search_term)) |
|
||||||
|
(RosterUnit.slm_model.like(search_term)) |
|
||||||
|
(RosterUnit.address.like(search_term))
|
||||||
|
)
|
||||||
|
|
||||||
|
units = query.order_by(
|
||||||
|
RosterUnit.retired.asc(),
|
||||||
|
RosterUnit.deployed.desc(),
|
||||||
|
RosterUnit.id.asc()
|
||||||
|
).all()
|
||||||
|
|
||||||
|
one_hour_ago = datetime.utcnow() - timedelta(hours=1)
|
||||||
|
for unit in units:
|
||||||
|
unit.is_recent = bool(unit.slm_last_check and unit.slm_last_check > one_hour_ago)
|
||||||
|
|
||||||
|
if include_measurement:
|
||||||
|
async def fetch_measurement_state(client: httpx.AsyncClient, unit_id: str) -> str | None:
|
||||||
|
try:
|
||||||
|
response = await client.get(f"{SLMM_BASE_URL}/api/nl43/{unit_id}/measurement-state")
|
||||||
|
if response.status_code == 200:
|
||||||
|
return response.json().get("measurement_state")
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
return None
|
||||||
|
|
||||||
|
deployed_units = [unit for unit in units if unit.deployed and not unit.retired]
|
||||||
|
if deployed_units:
|
||||||
|
async with httpx.AsyncClient(timeout=3.0) as client:
|
||||||
|
tasks = [fetch_measurement_state(client, unit.id) for unit in deployed_units]
|
||||||
|
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||||
|
|
||||||
|
for unit, state in zip(deployed_units, results):
|
||||||
|
if isinstance(state, Exception):
|
||||||
|
unit.measurement_state = None
|
||||||
|
else:
|
||||||
|
unit.measurement_state = state
|
||||||
|
|
||||||
|
return templates.TemplateResponse("partials/slm_device_list.html", {
|
||||||
|
"request": request,
|
||||||
|
"units": units
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/live-view/{unit_id}", response_class=HTMLResponse)
|
||||||
|
async def get_live_view(request: Request, unit_id: str, db: Session = Depends(get_db)):
|
||||||
|
"""
|
||||||
|
Get live view panel for a specific SLM unit.
|
||||||
|
Returns HTML partial with live metrics and chart.
|
||||||
|
"""
|
||||||
|
# Get unit from database
|
||||||
|
unit = db.query(RosterUnit).filter_by(id=unit_id, device_type="slm").first()
|
||||||
|
|
||||||
|
if not unit:
|
||||||
|
return templates.TemplateResponse("partials/slm_live_view_error.html", {
|
||||||
|
"request": request,
|
||||||
|
"error": f"Unit {unit_id} not found"
|
||||||
|
})
|
||||||
|
|
||||||
|
# Get modem information if assigned
|
||||||
|
modem = None
|
||||||
|
modem_ip = None
|
||||||
|
if unit.deployed_with_modem_id:
|
||||||
|
modem = db.query(RosterUnit).filter_by(id=unit.deployed_with_modem_id, device_type="modem").first()
|
||||||
|
if modem:
|
||||||
|
modem_ip = modem.ip_address
|
||||||
|
else:
|
||||||
|
logger.warning(f"SLM {unit_id} is assigned to modem {unit.deployed_with_modem_id} but modem not found")
|
||||||
|
|
||||||
|
# Fallback to direct slm_host if no modem assigned (backward compatibility)
|
||||||
|
if not modem_ip and unit.slm_host:
|
||||||
|
modem_ip = unit.slm_host
|
||||||
|
logger.info(f"Using legacy slm_host for {unit_id}: {modem_ip}")
|
||||||
|
|
||||||
|
# Try to get current status from SLMM
|
||||||
|
current_status = None
|
||||||
|
measurement_state = None
|
||||||
|
is_measuring = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
async with httpx.AsyncClient(timeout=10.0) as client:
|
||||||
|
# Get measurement state
|
||||||
|
state_response = await client.get(
|
||||||
|
f"{SLMM_BASE_URL}/api/nl43/{unit_id}/measurement-state"
|
||||||
|
)
|
||||||
|
if state_response.status_code == 200:
|
||||||
|
state_data = state_response.json()
|
||||||
|
measurement_state = state_data.get("measurement_state", "Unknown")
|
||||||
|
is_measuring = state_data.get("is_measuring", False)
|
||||||
|
|
||||||
|
# Get live status (measurement_start_time is already stored in SLMM database)
|
||||||
|
status_response = await client.get(
|
||||||
|
f"{SLMM_BASE_URL}/api/nl43/{unit_id}/live"
|
||||||
|
)
|
||||||
|
if status_response.status_code == 200:
|
||||||
|
status_data = status_response.json()
|
||||||
|
current_status = status_data.get("data", {})
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to get status for {unit_id}: {e}")
|
||||||
|
|
||||||
|
return templates.TemplateResponse("partials/slm_live_view.html", {
|
||||||
|
"request": request,
|
||||||
|
"unit": unit,
|
||||||
|
"modem": modem,
|
||||||
|
"modem_ip": modem_ip,
|
||||||
|
"current_status": current_status,
|
||||||
|
"measurement_state": measurement_state,
|
||||||
|
"is_measuring": is_measuring
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/control/{unit_id}/{action}")
|
||||||
|
async def control_slm(unit_id: str, action: str):
|
||||||
|
"""
|
||||||
|
Send control commands to SLM (start, stop, pause, resume, reset).
|
||||||
|
Proxies to SLMM backend.
|
||||||
|
"""
|
||||||
|
valid_actions = ["start", "stop", "pause", "resume", "reset"]
|
||||||
|
|
||||||
|
if action not in valid_actions:
|
||||||
|
return {"status": "error", "detail": f"Invalid action. Must be one of: {valid_actions}"}
|
||||||
|
|
||||||
|
try:
|
||||||
|
async with httpx.AsyncClient(timeout=10.0) as client:
|
||||||
|
response = await client.post(
|
||||||
|
f"{SLMM_BASE_URL}/api/nl43/{unit_id}/{action}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.status_code == 200:
|
||||||
|
return response.json()
|
||||||
|
else:
|
||||||
|
return {
|
||||||
|
"status": "error",
|
||||||
|
"detail": f"SLMM returned status {response.status_code}"
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to control {unit_id}: {e}")
|
||||||
|
return {
|
||||||
|
"status": "error",
|
||||||
|
"detail": str(e)
|
||||||
|
}
|
||||||
|
|
||||||
|
@router.get("/config/{unit_id}", response_class=HTMLResponse)
|
||||||
|
async def get_slm_config(request: Request, unit_id: str, db: Session = Depends(get_db)):
|
||||||
|
"""
|
||||||
|
Get configuration form for a specific SLM unit.
|
||||||
|
Returns HTML partial with configuration form.
|
||||||
|
"""
|
||||||
|
unit = db.query(RosterUnit).filter_by(id=unit_id, device_type="slm").first()
|
||||||
|
|
||||||
|
if not unit:
|
||||||
|
return HTMLResponse(
|
||||||
|
content=f'<div class="text-red-500">Unit {unit_id} not found</div>',
|
||||||
|
status_code=404
|
||||||
|
)
|
||||||
|
|
||||||
|
return templates.TemplateResponse("partials/slm_config_form.html", {
|
||||||
|
"request": request,
|
||||||
|
"unit": unit
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/config/{unit_id}")
|
||||||
|
async def save_slm_config(request: Request, unit_id: str, db: Session = Depends(get_db)):
|
||||||
|
"""
|
||||||
|
Save SLM configuration.
|
||||||
|
Updates unit parameters in the database.
|
||||||
|
"""
|
||||||
|
unit = db.query(RosterUnit).filter_by(id=unit_id, device_type="slm").first()
|
||||||
|
|
||||||
|
if not unit:
|
||||||
|
return {"status": "error", "detail": f"Unit {unit_id} not found"}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Get form data
|
||||||
|
form_data = await request.form()
|
||||||
|
|
||||||
|
# Update SLM-specific fields
|
||||||
|
unit.slm_model = form_data.get("slm_model") or None
|
||||||
|
unit.slm_serial_number = form_data.get("slm_serial_number") or None
|
||||||
|
unit.slm_frequency_weighting = form_data.get("slm_frequency_weighting") or None
|
||||||
|
unit.slm_time_weighting = form_data.get("slm_time_weighting") or None
|
||||||
|
unit.slm_measurement_range = form_data.get("slm_measurement_range") or None
|
||||||
|
|
||||||
|
# Update network configuration
|
||||||
|
modem_id = form_data.get("deployed_with_modem_id")
|
||||||
|
unit.deployed_with_modem_id = modem_id if modem_id else None
|
||||||
|
|
||||||
|
# Always update TCP and FTP ports (used regardless of modem assignment)
|
||||||
|
unit.slm_tcp_port = int(form_data.get("slm_tcp_port")) if form_data.get("slm_tcp_port") else None
|
||||||
|
unit.slm_ftp_port = int(form_data.get("slm_ftp_port")) if form_data.get("slm_ftp_port") else None
|
||||||
|
|
||||||
|
# Only update direct IP if no modem is assigned
|
||||||
|
if not modem_id:
|
||||||
|
unit.slm_host = form_data.get("slm_host") or None
|
||||||
|
else:
|
||||||
|
# Clear legacy direct IP field when modem is assigned
|
||||||
|
unit.slm_host = None
|
||||||
|
|
||||||
|
db.commit()
|
||||||
|
logger.info(f"Updated configuration for SLM {unit_id}")
|
||||||
|
|
||||||
|
# Sync updated configuration to SLMM cache
|
||||||
|
logger.info(f"Syncing SLM {unit_id} config changes to SLMM cache...")
|
||||||
|
result = await sync_slm_to_slmm_cache(
|
||||||
|
unit_id=unit_id,
|
||||||
|
host=unit.slm_host, # Use the updated host from Terra-View
|
||||||
|
tcp_port=unit.slm_tcp_port,
|
||||||
|
ftp_port=unit.slm_ftp_port,
|
||||||
|
deployed_with_modem_id=unit.deployed_with_modem_id, # Resolve modem IP if assigned
|
||||||
|
db=db
|
||||||
|
)
|
||||||
|
|
||||||
|
if not result["success"]:
|
||||||
|
logger.warning(f"SLMM cache sync warning for {unit_id}: {result['message']}")
|
||||||
|
# Config still saved in Terra-View (source of truth)
|
||||||
|
|
||||||
|
return {"status": "success", "unit_id": unit_id}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
db.rollback()
|
||||||
|
logger.error(f"Failed to save config for {unit_id}: {e}")
|
||||||
|
return {"status": "error", "detail": str(e)}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/test-modem/{modem_id}")
|
||||||
|
async def test_modem_connection(modem_id: str, db: Session = Depends(get_db)):
|
||||||
|
"""
|
||||||
|
Test modem connectivity with a simple ping/health check.
|
||||||
|
Returns response time and connection status.
|
||||||
|
"""
|
||||||
|
import subprocess
|
||||||
|
import time
|
||||||
|
|
||||||
|
# Get modem from database
|
||||||
|
modem = db.query(RosterUnit).filter_by(id=modem_id, device_type="modem").first()
|
||||||
|
|
||||||
|
if not modem:
|
||||||
|
return {"status": "error", "detail": f"Modem {modem_id} not found"}
|
||||||
|
|
||||||
|
if not modem.ip_address:
|
||||||
|
return {"status": "error", "detail": f"Modem {modem_id} has no IP address configured"}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Ping the modem (1 packet, 2 second timeout)
|
||||||
|
start_time = time.time()
|
||||||
|
result = subprocess.run(
|
||||||
|
["ping", "-c", "1", "-W", "2", modem.ip_address],
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
timeout=3
|
||||||
|
)
|
||||||
|
response_time = int((time.time() - start_time) * 1000) # Convert to milliseconds
|
||||||
|
|
||||||
|
if result.returncode == 0:
|
||||||
|
return {
|
||||||
|
"status": "success",
|
||||||
|
"modem_id": modem_id,
|
||||||
|
"ip_address": modem.ip_address,
|
||||||
|
"response_time": response_time,
|
||||||
|
"message": "Modem is responding to ping"
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return {
|
||||||
|
"status": "error",
|
||||||
|
"modem_id": modem_id,
|
||||||
|
"ip_address": modem.ip_address,
|
||||||
|
"detail": "Modem not responding to ping"
|
||||||
|
}
|
||||||
|
|
||||||
|
except subprocess.TimeoutExpired:
|
||||||
|
return {
|
||||||
|
"status": "error",
|
||||||
|
"modem_id": modem_id,
|
||||||
|
"ip_address": modem.ip_address,
|
||||||
|
"detail": "Ping timeout (> 2 seconds)"
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to ping modem {modem_id}: {e}")
|
||||||
|
return {
|
||||||
|
"status": "error",
|
||||||
|
"modem_id": modem_id,
|
||||||
|
"detail": str(e)
|
||||||
|
}
|
||||||
122
backend/routers/slm_ui.py
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
"""
|
||||||
|
Sound Level Meter UI Router
|
||||||
|
|
||||||
|
Provides endpoints for SLM dashboard cards, detail pages, and real-time data.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from fastapi import APIRouter, Depends, HTTPException, Request
|
||||||
|
from fastapi.responses import HTMLResponse
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
from datetime import datetime
|
||||||
|
import httpx
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
|
||||||
|
from backend.database import get_db
|
||||||
|
from backend.models import RosterUnit
|
||||||
|
from backend.templates_config import templates
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
router = APIRouter(prefix="/slm", tags=["slm-ui"])
|
||||||
|
|
||||||
|
SLMM_BASE_URL = os.getenv("SLMM_BASE_URL", "http://172.19.0.1:8100")
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/{unit_id}", response_class=HTMLResponse)
|
||||||
|
async def slm_detail_page(request: Request, unit_id: str, db: Session = Depends(get_db)):
|
||||||
|
"""Sound level meter detail page with controls."""
|
||||||
|
|
||||||
|
# Get roster unit
|
||||||
|
unit = db.query(RosterUnit).filter_by(id=unit_id).first()
|
||||||
|
if not unit or unit.device_type != "slm":
|
||||||
|
raise HTTPException(status_code=404, detail="Sound level meter not found")
|
||||||
|
|
||||||
|
return templates.TemplateResponse("slm_detail.html", {
|
||||||
|
"request": request,
|
||||||
|
"unit": unit,
|
||||||
|
"unit_id": unit_id
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/api/{unit_id}/summary")
|
||||||
|
async def get_slm_summary(unit_id: str, db: Session = Depends(get_db)):
|
||||||
|
"""Get SLM summary data for dashboard card."""
|
||||||
|
|
||||||
|
# Get roster unit
|
||||||
|
unit = db.query(RosterUnit).filter_by(id=unit_id).first()
|
||||||
|
if not unit or unit.device_type != "slm":
|
||||||
|
raise HTTPException(status_code=404, detail="Sound level meter not found")
|
||||||
|
|
||||||
|
# Try to get live status from SLMM
|
||||||
|
status_data = None
|
||||||
|
try:
|
||||||
|
async with httpx.AsyncClient(timeout=3.0) as client:
|
||||||
|
response = await client.get(f"{SLMM_BASE_URL}/api/nl43/{unit_id}/status")
|
||||||
|
if response.status_code == 200:
|
||||||
|
status_data = response.json().get("data")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to get SLM status for {unit_id}: {e}")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"unit_id": unit_id,
|
||||||
|
"device_type": "slm",
|
||||||
|
"deployed": unit.deployed,
|
||||||
|
"model": unit.slm_model or "NL-43",
|
||||||
|
"location": unit.address or unit.location,
|
||||||
|
"coordinates": unit.coordinates,
|
||||||
|
"note": unit.note,
|
||||||
|
"status": status_data,
|
||||||
|
"last_check": unit.slm_last_check.isoformat() if unit.slm_last_check else None,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/partials/{unit_id}/card", response_class=HTMLResponse)
|
||||||
|
async def slm_dashboard_card(request: Request, unit_id: str, db: Session = Depends(get_db)):
|
||||||
|
"""Render SLM dashboard card partial."""
|
||||||
|
|
||||||
|
summary = await get_slm_summary(unit_id, db)
|
||||||
|
|
||||||
|
return templates.TemplateResponse("partials/slm_card.html", {
|
||||||
|
"request": request,
|
||||||
|
"slm": summary
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/partials/{unit_id}/controls", response_class=HTMLResponse)
|
||||||
|
async def slm_controls_partial(request: Request, unit_id: str, db: Session = Depends(get_db)):
|
||||||
|
"""Render SLM control panel partial."""
|
||||||
|
|
||||||
|
unit = db.query(RosterUnit).filter_by(id=unit_id).first()
|
||||||
|
if not unit or unit.device_type != "slm":
|
||||||
|
raise HTTPException(status_code=404, detail="Sound level meter not found")
|
||||||
|
|
||||||
|
# Get current status from SLMM
|
||||||
|
measurement_state = None
|
||||||
|
battery_level = None
|
||||||
|
try:
|
||||||
|
async with httpx.AsyncClient(timeout=3.0) as client:
|
||||||
|
# Get measurement state
|
||||||
|
state_response = await client.get(
|
||||||
|
f"{SLMM_BASE_URL}/api/nl43/{unit_id}/measurement-state"
|
||||||
|
)
|
||||||
|
if state_response.status_code == 200:
|
||||||
|
measurement_state = state_response.json().get("measurement_state")
|
||||||
|
|
||||||
|
# Get battery level
|
||||||
|
battery_response = await client.get(
|
||||||
|
f"{SLMM_BASE_URL}/api/nl43/{unit_id}/battery"
|
||||||
|
)
|
||||||
|
if battery_response.status_code == 200:
|
||||||
|
battery_level = battery_response.json().get("battery_level")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to get SLM control data for {unit_id}: {e}")
|
||||||
|
|
||||||
|
return templates.TemplateResponse("partials/slm_controls.html", {
|
||||||
|
"request": request,
|
||||||
|
"unit_id": unit_id,
|
||||||
|
"unit": unit,
|
||||||
|
"measurement_state": measurement_state,
|
||||||
|
"battery_level": battery_level,
|
||||||
|
"is_measuring": measurement_state == "Start"
|
||||||
|
})
|
||||||
301
backend/routers/slmm.py
Normal file
@@ -0,0 +1,301 @@
|
|||||||
|
"""
|
||||||
|
SLMM (Sound Level Meter Manager) Proxy Router
|
||||||
|
|
||||||
|
Proxies requests from SFM to the standalone SLMM backend service.
|
||||||
|
SLMM runs on port 8100 and handles NL43/NL53 sound level meter communication.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from fastapi import APIRouter, HTTPException, Request, Response, WebSocket, WebSocketDisconnect
|
||||||
|
from fastapi.responses import StreamingResponse
|
||||||
|
import httpx
|
||||||
|
import websockets
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
router = APIRouter(prefix="/api/slmm", tags=["slmm"])
|
||||||
|
|
||||||
|
# SLMM backend URL - configurable via environment variable
|
||||||
|
SLMM_BASE_URL = os.getenv("SLMM_BASE_URL", "http://localhost:8100")
|
||||||
|
# WebSocket URL derived from HTTP URL
|
||||||
|
SLMM_WS_BASE_URL = SLMM_BASE_URL.replace("http://", "ws://").replace("https://", "wss://")
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/health")
|
||||||
|
async def check_slmm_health():
|
||||||
|
"""
|
||||||
|
Check if the SLMM backend service is reachable and healthy.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
async with httpx.AsyncClient(timeout=5.0) as client:
|
||||||
|
response = await client.get(f"{SLMM_BASE_URL}/health")
|
||||||
|
|
||||||
|
if response.status_code == 200:
|
||||||
|
data = response.json()
|
||||||
|
return {
|
||||||
|
"status": "ok",
|
||||||
|
"slmm_status": "connected",
|
||||||
|
"slmm_url": SLMM_BASE_URL,
|
||||||
|
"slmm_version": data.get("version", "unknown"),
|
||||||
|
"slmm_response": data
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return {
|
||||||
|
"status": "degraded",
|
||||||
|
"slmm_status": "error",
|
||||||
|
"slmm_url": SLMM_BASE_URL,
|
||||||
|
"detail": f"SLMM returned status {response.status_code}"
|
||||||
|
}
|
||||||
|
|
||||||
|
except httpx.ConnectError:
|
||||||
|
return {
|
||||||
|
"status": "error",
|
||||||
|
"slmm_status": "unreachable",
|
||||||
|
"slmm_url": SLMM_BASE_URL,
|
||||||
|
"detail": "Cannot connect to SLMM backend. Is it running?"
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
return {
|
||||||
|
"status": "error",
|
||||||
|
"slmm_status": "error",
|
||||||
|
"slmm_url": SLMM_BASE_URL,
|
||||||
|
"detail": str(e)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# WebSocket routes MUST come before the catch-all route
|
||||||
|
@router.websocket("/{unit_id}/stream")
|
||||||
|
async def proxy_websocket_stream(websocket: WebSocket, unit_id: str):
|
||||||
|
"""
|
||||||
|
Proxy WebSocket connections to SLMM's /stream endpoint.
|
||||||
|
|
||||||
|
This allows real-time streaming of measurement data from NL43 devices
|
||||||
|
through the SFM unified interface.
|
||||||
|
"""
|
||||||
|
await websocket.accept()
|
||||||
|
logger.info(f"WebSocket connection accepted for SLMM unit {unit_id}")
|
||||||
|
|
||||||
|
# Build target WebSocket URL
|
||||||
|
target_ws_url = f"{SLMM_WS_BASE_URL}/api/nl43/{unit_id}/stream"
|
||||||
|
logger.info(f"Connecting to SLMM WebSocket: {target_ws_url}")
|
||||||
|
|
||||||
|
backend_ws = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Connect to SLMM backend WebSocket
|
||||||
|
backend_ws = await websockets.connect(target_ws_url)
|
||||||
|
logger.info(f"Connected to SLMM backend WebSocket for {unit_id}")
|
||||||
|
|
||||||
|
# Create tasks for bidirectional communication
|
||||||
|
async def forward_to_backend():
|
||||||
|
"""Forward messages from client to SLMM backend"""
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
data = await websocket.receive_text()
|
||||||
|
await backend_ws.send(data)
|
||||||
|
except WebSocketDisconnect:
|
||||||
|
logger.info(f"Client WebSocket disconnected for {unit_id}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error forwarding to backend: {e}")
|
||||||
|
|
||||||
|
async def forward_to_client():
|
||||||
|
"""Forward messages from SLMM backend to client"""
|
||||||
|
try:
|
||||||
|
async for message in backend_ws:
|
||||||
|
await websocket.send_text(message)
|
||||||
|
except websockets.exceptions.ConnectionClosed:
|
||||||
|
logger.info(f"Backend WebSocket closed for {unit_id}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error forwarding to client: {e}")
|
||||||
|
|
||||||
|
# Run both forwarding tasks concurrently
|
||||||
|
await asyncio.gather(
|
||||||
|
forward_to_backend(),
|
||||||
|
forward_to_client(),
|
||||||
|
return_exceptions=True
|
||||||
|
)
|
||||||
|
|
||||||
|
except websockets.exceptions.WebSocketException as e:
|
||||||
|
logger.error(f"WebSocket error connecting to SLMM backend: {e}")
|
||||||
|
try:
|
||||||
|
await websocket.send_json({
|
||||||
|
"error": "Failed to connect to SLMM backend",
|
||||||
|
"detail": str(e)
|
||||||
|
})
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Unexpected error in WebSocket proxy for {unit_id}: {e}")
|
||||||
|
try:
|
||||||
|
await websocket.send_json({
|
||||||
|
"error": "Internal server error",
|
||||||
|
"detail": str(e)
|
||||||
|
})
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
finally:
|
||||||
|
# Clean up connections
|
||||||
|
if backend_ws:
|
||||||
|
try:
|
||||||
|
await backend_ws.close()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
await websocket.close()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
logger.info(f"WebSocket proxy closed for {unit_id}")
|
||||||
|
|
||||||
|
|
||||||
|
@router.websocket("/{unit_id}/live")
|
||||||
|
async def proxy_websocket_live(websocket: WebSocket, unit_id: str):
|
||||||
|
"""
|
||||||
|
Proxy WebSocket connections to SLMM's /live endpoint.
|
||||||
|
|
||||||
|
Alternative WebSocket endpoint that may be used by some frontend components.
|
||||||
|
"""
|
||||||
|
await websocket.accept()
|
||||||
|
logger.info(f"WebSocket connection accepted for SLMM unit {unit_id} (live endpoint)")
|
||||||
|
|
||||||
|
# Build target WebSocket URL - try /stream endpoint as SLMM uses that for WebSocket
|
||||||
|
target_ws_url = f"{SLMM_WS_BASE_URL}/api/nl43/{unit_id}/stream"
|
||||||
|
logger.info(f"Connecting to SLMM WebSocket: {target_ws_url}")
|
||||||
|
|
||||||
|
backend_ws = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Connect to SLMM backend WebSocket
|
||||||
|
backend_ws = await websockets.connect(target_ws_url)
|
||||||
|
logger.info(f"Connected to SLMM backend WebSocket for {unit_id} (live endpoint)")
|
||||||
|
|
||||||
|
# Create tasks for bidirectional communication
|
||||||
|
async def forward_to_backend():
|
||||||
|
"""Forward messages from client to SLMM backend"""
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
data = await websocket.receive_text()
|
||||||
|
await backend_ws.send(data)
|
||||||
|
except WebSocketDisconnect:
|
||||||
|
logger.info(f"Client WebSocket disconnected for {unit_id} (live)")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error forwarding to backend (live): {e}")
|
||||||
|
|
||||||
|
async def forward_to_client():
|
||||||
|
"""Forward messages from SLMM backend to client"""
|
||||||
|
try:
|
||||||
|
async for message in backend_ws:
|
||||||
|
await websocket.send_text(message)
|
||||||
|
except websockets.exceptions.ConnectionClosed:
|
||||||
|
logger.info(f"Backend WebSocket closed for {unit_id} (live)")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error forwarding to client (live): {e}")
|
||||||
|
|
||||||
|
# Run both forwarding tasks concurrently
|
||||||
|
await asyncio.gather(
|
||||||
|
forward_to_backend(),
|
||||||
|
forward_to_client(),
|
||||||
|
return_exceptions=True
|
||||||
|
)
|
||||||
|
|
||||||
|
except websockets.exceptions.WebSocketException as e:
|
||||||
|
logger.error(f"WebSocket error connecting to SLMM backend (live): {e}")
|
||||||
|
try:
|
||||||
|
await websocket.send_json({
|
||||||
|
"error": "Failed to connect to SLMM backend",
|
||||||
|
"detail": str(e)
|
||||||
|
})
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Unexpected error in WebSocket proxy for {unit_id} (live): {e}")
|
||||||
|
try:
|
||||||
|
await websocket.send_json({
|
||||||
|
"error": "Internal server error",
|
||||||
|
"detail": str(e)
|
||||||
|
})
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
finally:
|
||||||
|
# Clean up connections
|
||||||
|
if backend_ws:
|
||||||
|
try:
|
||||||
|
await backend_ws.close()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
await websocket.close()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
logger.info(f"WebSocket proxy closed for {unit_id} (live)")
|
||||||
|
|
||||||
|
|
||||||
|
# HTTP catch-all route MUST come after specific routes (including WebSocket routes)
|
||||||
|
@router.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE", "PATCH"])
|
||||||
|
async def proxy_to_slmm(path: str, request: Request):
|
||||||
|
"""
|
||||||
|
Proxy all requests to the SLMM backend service.
|
||||||
|
|
||||||
|
This allows SFM to act as a unified frontend for all device types,
|
||||||
|
while SLMM remains a standalone backend service.
|
||||||
|
"""
|
||||||
|
# Build target URL
|
||||||
|
target_url = f"{SLMM_BASE_URL}/api/nl43/{path}"
|
||||||
|
|
||||||
|
# Get query parameters
|
||||||
|
query_params = dict(request.query_params)
|
||||||
|
|
||||||
|
# Get request body if present
|
||||||
|
body = None
|
||||||
|
if request.method in ["POST", "PUT", "PATCH"]:
|
||||||
|
try:
|
||||||
|
body = await request.body()
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to read request body: {e}")
|
||||||
|
body = None
|
||||||
|
|
||||||
|
# Get headers (exclude host and other proxy-specific headers)
|
||||||
|
headers = dict(request.headers)
|
||||||
|
headers_to_exclude = ["host", "content-length", "transfer-encoding", "connection"]
|
||||||
|
proxy_headers = {k: v for k, v in headers.items() if k.lower() not in headers_to_exclude}
|
||||||
|
|
||||||
|
logger.info(f"Proxying {request.method} request to SLMM: {target_url}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
async with httpx.AsyncClient(timeout=30.0) as client:
|
||||||
|
# Forward the request to SLMM
|
||||||
|
response = await client.request(
|
||||||
|
method=request.method,
|
||||||
|
url=target_url,
|
||||||
|
params=query_params,
|
||||||
|
headers=proxy_headers,
|
||||||
|
content=body
|
||||||
|
)
|
||||||
|
|
||||||
|
# Return the response from SLMM
|
||||||
|
return Response(
|
||||||
|
content=response.content,
|
||||||
|
status_code=response.status_code,
|
||||||
|
headers=dict(response.headers),
|
||||||
|
media_type=response.headers.get("content-type")
|
||||||
|
)
|
||||||
|
|
||||||
|
except httpx.ConnectError:
|
||||||
|
logger.error(f"Failed to connect to SLMM backend at {SLMM_BASE_URL}")
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=503,
|
||||||
|
detail=f"SLMM backend service unavailable. Is SLMM running on {SLMM_BASE_URL}?"
|
||||||
|
)
|
||||||
|
except httpx.TimeoutException:
|
||||||
|
logger.error(f"Timeout connecting to SLMM backend at {SLMM_BASE_URL}")
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=504,
|
||||||
|
detail="SLMM backend timeout"
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error proxying to SLMM: {e}")
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=500,
|
||||||
|
detail=f"Failed to proxy request to SLMM: {str(e)}"
|
||||||
|
)
|
||||||
@@ -5,6 +5,7 @@ from typing import Dict, Any
|
|||||||
|
|
||||||
from backend.database import get_db
|
from backend.database import get_db
|
||||||
from backend.services.snapshot import emit_status_snapshot
|
from backend.services.snapshot import emit_status_snapshot
|
||||||
|
from backend.models import RosterUnit
|
||||||
|
|
||||||
router = APIRouter(prefix="/api", tags=["units"])
|
router = APIRouter(prefix="/api", tags=["units"])
|
||||||
|
|
||||||
@@ -42,3 +43,32 @@ def get_unit_detail(unit_id: str, db: Session = Depends(get_db)):
|
|||||||
"note": unit_data.get("note", ""),
|
"note": unit_data.get("note", ""),
|
||||||
"coordinates": coords
|
"coordinates": coords
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/units/{unit_id}")
|
||||||
|
def get_unit_by_id(unit_id: str, db: Session = Depends(get_db)):
|
||||||
|
"""
|
||||||
|
Get unit data directly from the roster (for settings/configuration).
|
||||||
|
"""
|
||||||
|
unit = db.query(RosterUnit).filter_by(id=unit_id).first()
|
||||||
|
|
||||||
|
if not unit:
|
||||||
|
raise HTTPException(status_code=404, detail=f"Unit {unit_id} not found")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"id": unit.id,
|
||||||
|
"unit_type": unit.unit_type,
|
||||||
|
"device_type": unit.device_type,
|
||||||
|
"deployed": unit.deployed,
|
||||||
|
"retired": unit.retired,
|
||||||
|
"note": unit.note,
|
||||||
|
"location": unit.location,
|
||||||
|
"address": unit.address,
|
||||||
|
"coordinates": unit.coordinates,
|
||||||
|
"slm_host": unit.slm_host,
|
||||||
|
"slm_tcp_port": unit.slm_tcp_port,
|
||||||
|
"slm_ftp_port": unit.slm_ftp_port,
|
||||||
|
"slm_model": unit.slm_model,
|
||||||
|
"slm_serial_number": unit.slm_serial_number,
|
||||||
|
"deployed_with_modem_id": unit.deployed_with_modem_id
|
||||||
|
}
|
||||||
|
|||||||
133
backend/routers/watcher_manager.py
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
"""
|
||||||
|
Watcher Manager — admin API for series3-watcher and thor-watcher agents.
|
||||||
|
|
||||||
|
Endpoints:
|
||||||
|
GET /api/admin/watchers — list all watcher agents
|
||||||
|
GET /api/admin/watchers/{agent_id} — get single agent detail
|
||||||
|
POST /api/admin/watchers/{agent_id}/trigger-update — flag agent for update
|
||||||
|
POST /api/admin/watchers/{agent_id}/clear-update — clear update flag
|
||||||
|
GET /api/admin/watchers/{agent_id}/update-check — polled by watcher on heartbeat
|
||||||
|
|
||||||
|
Page:
|
||||||
|
GET /admin/watchers — HTML admin page
|
||||||
|
"""
|
||||||
|
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from fastapi import APIRouter, Depends, HTTPException, Request
|
||||||
|
from fastapi.responses import HTMLResponse
|
||||||
|
from pydantic import BaseModel
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from backend.database import get_db
|
||||||
|
from backend.models import WatcherAgent
|
||||||
|
from backend.templates_config import templates
|
||||||
|
|
||||||
|
router = APIRouter(tags=["admin"])
|
||||||
|
|
||||||
|
|
||||||
|
# ── helpers ──────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
def _agent_to_dict(agent: WatcherAgent) -> dict:
|
||||||
|
last_seen = agent.last_seen
|
||||||
|
if last_seen:
|
||||||
|
now_utc = datetime.utcnow()
|
||||||
|
age_minutes = int((now_utc - last_seen).total_seconds() // 60)
|
||||||
|
if age_minutes > 60:
|
||||||
|
status = "missing"
|
||||||
|
else:
|
||||||
|
status = "ok"
|
||||||
|
else:
|
||||||
|
age_minutes = None
|
||||||
|
status = "missing"
|
||||||
|
|
||||||
|
return {
|
||||||
|
"id": agent.id,
|
||||||
|
"source_type": agent.source_type,
|
||||||
|
"version": agent.version,
|
||||||
|
"last_seen": last_seen.isoformat() if last_seen else None,
|
||||||
|
"age_minutes": age_minutes,
|
||||||
|
"status": status,
|
||||||
|
"ip_address": agent.ip_address,
|
||||||
|
"log_tail": agent.log_tail,
|
||||||
|
"update_pending": bool(agent.update_pending),
|
||||||
|
"update_version": agent.update_version,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ── API routes ────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
@router.get("/api/admin/watchers")
|
||||||
|
def list_watchers(db: Session = Depends(get_db)):
|
||||||
|
agents = db.query(WatcherAgent).order_by(WatcherAgent.last_seen.desc()).all()
|
||||||
|
return [_agent_to_dict(a) for a in agents]
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/api/admin/watchers/{agent_id}")
|
||||||
|
def get_watcher(agent_id: str, db: Session = Depends(get_db)):
|
||||||
|
agent = db.query(WatcherAgent).filter(WatcherAgent.id == agent_id).first()
|
||||||
|
if not agent:
|
||||||
|
raise HTTPException(status_code=404, detail="Watcher agent not found")
|
||||||
|
return _agent_to_dict(agent)
|
||||||
|
|
||||||
|
|
||||||
|
class TriggerUpdateRequest(BaseModel):
|
||||||
|
version: Optional[str] = None # target version label (informational)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/api/admin/watchers/{agent_id}/trigger-update")
|
||||||
|
def trigger_update(agent_id: str, body: TriggerUpdateRequest, db: Session = Depends(get_db)):
|
||||||
|
agent = db.query(WatcherAgent).filter(WatcherAgent.id == agent_id).first()
|
||||||
|
if not agent:
|
||||||
|
raise HTTPException(status_code=404, detail="Watcher agent not found")
|
||||||
|
agent.update_pending = True
|
||||||
|
agent.update_version = body.version
|
||||||
|
db.commit()
|
||||||
|
return {"ok": True, "agent_id": agent_id, "update_pending": True}
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/api/admin/watchers/{agent_id}/clear-update")
|
||||||
|
def clear_update(agent_id: str, db: Session = Depends(get_db)):
|
||||||
|
agent = db.query(WatcherAgent).filter(WatcherAgent.id == agent_id).first()
|
||||||
|
if not agent:
|
||||||
|
raise HTTPException(status_code=404, detail="Watcher agent not found")
|
||||||
|
agent.update_pending = False
|
||||||
|
agent.update_version = None
|
||||||
|
db.commit()
|
||||||
|
return {"ok": True, "agent_id": agent_id, "update_pending": False}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/api/admin/watchers/{agent_id}/update-check")
|
||||||
|
def update_check(agent_id: str, db: Session = Depends(get_db)):
|
||||||
|
"""
|
||||||
|
Polled by watcher agents on each heartbeat cycle.
|
||||||
|
Returns update_available=True when an update has been triggered via the UI.
|
||||||
|
Automatically clears the flag after the watcher acknowledges it.
|
||||||
|
"""
|
||||||
|
agent = db.query(WatcherAgent).filter(WatcherAgent.id == agent_id).first()
|
||||||
|
if not agent:
|
||||||
|
return {"update_available": False}
|
||||||
|
|
||||||
|
pending = bool(agent.update_pending)
|
||||||
|
|
||||||
|
if pending:
|
||||||
|
# Clear the flag — the watcher will now self-update
|
||||||
|
agent.update_pending = False
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
return {
|
||||||
|
"update_available": pending,
|
||||||
|
"version": agent.update_version,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ── HTML page ─────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
@router.get("/admin/watchers", response_class=HTMLResponse)
|
||||||
|
def admin_watchers_page(request: Request, db: Session = Depends(get_db)):
|
||||||
|
agents = db.query(WatcherAgent).order_by(WatcherAgent.last_seen.desc()).all()
|
||||||
|
agents_data = [_agent_to_dict(a) for a in agents]
|
||||||
|
return templates.TemplateResponse("admin_watchers.html", {
|
||||||
|
"request": request,
|
||||||
|
"agents": agents_data,
|
||||||
|
})
|
||||||
@@ -5,7 +5,7 @@ from datetime import datetime
|
|||||||
from typing import Optional, List
|
from typing import Optional, List
|
||||||
|
|
||||||
from backend.database import get_db
|
from backend.database import get_db
|
||||||
from backend.models import Emitter
|
from backend.models import Emitter, WatcherAgent
|
||||||
|
|
||||||
router = APIRouter()
|
router = APIRouter()
|
||||||
|
|
||||||
@@ -107,6 +107,35 @@ def get_fleet_status(db: Session = Depends(get_db)):
|
|||||||
emitters = db.query(Emitter).all()
|
emitters = db.query(Emitter).all()
|
||||||
return emitters
|
return emitters
|
||||||
|
|
||||||
|
# ── Watcher agent upsert helper ───────────────────────────────────────────────
|
||||||
|
|
||||||
|
def _upsert_watcher_agent(db: Session, source_id: str, source_type: str,
|
||||||
|
version: str, ip_address: str, log_tail: str,
|
||||||
|
status: str) -> None:
|
||||||
|
"""Create or update the WatcherAgent row for a given source_id."""
|
||||||
|
agent = db.query(WatcherAgent).filter(WatcherAgent.id == source_id).first()
|
||||||
|
if agent:
|
||||||
|
agent.source_type = source_type
|
||||||
|
agent.version = version
|
||||||
|
agent.last_seen = datetime.utcnow()
|
||||||
|
agent.status = status
|
||||||
|
if ip_address:
|
||||||
|
agent.ip_address = ip_address
|
||||||
|
if log_tail is not None:
|
||||||
|
agent.log_tail = log_tail
|
||||||
|
else:
|
||||||
|
agent = WatcherAgent(
|
||||||
|
id=source_id,
|
||||||
|
source_type=source_type,
|
||||||
|
version=version,
|
||||||
|
last_seen=datetime.utcnow(),
|
||||||
|
status=status,
|
||||||
|
ip_address=ip_address,
|
||||||
|
log_tail=log_tail,
|
||||||
|
)
|
||||||
|
db.add(agent)
|
||||||
|
|
||||||
|
|
||||||
# series3v1.1 Standardized Heartbeat Schema (multi-unit)
|
# series3v1.1 Standardized Heartbeat Schema (multi-unit)
|
||||||
from fastapi import Request
|
from fastapi import Request
|
||||||
|
|
||||||
@@ -120,6 +149,11 @@ async def series3_heartbeat(request: Request, db: Session = Depends(get_db)):
|
|||||||
|
|
||||||
source = payload.get("source_id")
|
source = payload.get("source_id")
|
||||||
units = payload.get("units", [])
|
units = payload.get("units", [])
|
||||||
|
version = payload.get("version")
|
||||||
|
log_tail = payload.get("log_tail") # list of strings or None
|
||||||
|
import json as _json
|
||||||
|
log_tail_str = _json.dumps(log_tail) if log_tail is not None else None
|
||||||
|
client_ip = request.client.host if request.client else None
|
||||||
|
|
||||||
print("\n=== Series 3 Heartbeat ===")
|
print("\n=== Series 3 Heartbeat ===")
|
||||||
print("Source:", source)
|
print("Source:", source)
|
||||||
@@ -182,13 +216,27 @@ async def series3_heartbeat(request: Request, db: Session = Depends(get_db)):
|
|||||||
|
|
||||||
results.append({"unit": uid, "status": status})
|
results.append({"unit": uid, "status": status})
|
||||||
|
|
||||||
|
if source:
|
||||||
|
_upsert_watcher_agent(db, source, "series3_watcher", version,
|
||||||
|
client_ip, log_tail_str, "ok")
|
||||||
|
|
||||||
db.commit()
|
db.commit()
|
||||||
|
|
||||||
|
# Check if an update has been triggered for this agent
|
||||||
|
update_available = False
|
||||||
|
if source:
|
||||||
|
agent = db.query(WatcherAgent).filter(WatcherAgent.id == source).first()
|
||||||
|
if agent and agent.update_pending:
|
||||||
|
update_available = True
|
||||||
|
agent.update_pending = False
|
||||||
|
db.commit()
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"message": "Heartbeat processed",
|
"message": "Heartbeat processed",
|
||||||
"source": source,
|
"source": source,
|
||||||
"units_processed": len(results),
|
"units_processed": len(results),
|
||||||
"results": results
|
"results": results,
|
||||||
|
"update_available": update_available,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -219,8 +267,14 @@ async def series4_heartbeat(request: Request, db: Session = Depends(get_db)):
|
|||||||
"""
|
"""
|
||||||
payload = await request.json()
|
payload = await request.json()
|
||||||
|
|
||||||
source = payload.get("source", "series4_emitter")
|
# Accept source_id (new standard field) with fallback to legacy "source" key
|
||||||
|
source = payload.get("source_id") or payload.get("source", "series4_emitter")
|
||||||
units = payload.get("units", [])
|
units = payload.get("units", [])
|
||||||
|
version = payload.get("version")
|
||||||
|
log_tail = payload.get("log_tail")
|
||||||
|
import json as _json
|
||||||
|
log_tail_str = _json.dumps(log_tail) if log_tail is not None else None
|
||||||
|
client_ip = request.client.host if request.client else None
|
||||||
|
|
||||||
print("\n=== Series 4 Heartbeat ===")
|
print("\n=== Series 4 Heartbeat ===")
|
||||||
print("Source:", source)
|
print("Source:", source)
|
||||||
@@ -276,11 +330,25 @@ async def series4_heartbeat(request: Request, db: Session = Depends(get_db)):
|
|||||||
|
|
||||||
results.append({"unit": uid, "status": status})
|
results.append({"unit": uid, "status": status})
|
||||||
|
|
||||||
|
if source:
|
||||||
|
_upsert_watcher_agent(db, source, "series4_watcher", version,
|
||||||
|
client_ip, log_tail_str, "ok")
|
||||||
|
|
||||||
db.commit()
|
db.commit()
|
||||||
|
|
||||||
|
# Check if an update has been triggered for this agent
|
||||||
|
update_available = False
|
||||||
|
if source:
|
||||||
|
agent = db.query(WatcherAgent).filter(WatcherAgent.id == source).first()
|
||||||
|
if agent and agent.update_pending:
|
||||||
|
update_available = True
|
||||||
|
agent.update_pending = False
|
||||||
|
db.commit()
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"message": "Heartbeat processed",
|
"message": "Heartbeat processed",
|
||||||
"source": source,
|
"source": source,
|
||||||
"units_processed": len(results),
|
"units_processed": len(results),
|
||||||
"results": results
|
"results": results,
|
||||||
|
"update_available": update_available,
|
||||||
}
|
}
|
||||||
|
|||||||
462
backend/services/alert_service.py
Normal file
@@ -0,0 +1,462 @@
|
|||||||
|
"""
|
||||||
|
Alert Service
|
||||||
|
|
||||||
|
Manages in-app alerts for device status changes and system events.
|
||||||
|
Provides foundation for future notification channels (email, webhook).
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import uuid
|
||||||
|
import logging
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from typing import Optional, List, Dict, Any
|
||||||
|
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
from sqlalchemy import and_, or_
|
||||||
|
|
||||||
|
from backend.models import Alert, RosterUnit
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class AlertService:
|
||||||
|
"""
|
||||||
|
Service for managing alerts.
|
||||||
|
|
||||||
|
Handles alert lifecycle:
|
||||||
|
- Create alerts from various triggers
|
||||||
|
- Query active alerts
|
||||||
|
- Acknowledge/resolve/dismiss alerts
|
||||||
|
- (Future) Dispatch to notification channels
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, db: Session):
|
||||||
|
self.db = db
|
||||||
|
|
||||||
|
def create_alert(
|
||||||
|
self,
|
||||||
|
alert_type: str,
|
||||||
|
title: str,
|
||||||
|
message: str = None,
|
||||||
|
severity: str = "warning",
|
||||||
|
unit_id: str = None,
|
||||||
|
project_id: str = None,
|
||||||
|
location_id: str = None,
|
||||||
|
schedule_id: str = None,
|
||||||
|
metadata: dict = None,
|
||||||
|
expires_hours: int = 24,
|
||||||
|
) -> Alert:
|
||||||
|
"""
|
||||||
|
Create a new alert.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
alert_type: Type of alert (device_offline, device_online, schedule_failed)
|
||||||
|
title: Short alert title
|
||||||
|
message: Detailed description
|
||||||
|
severity: info, warning, or critical
|
||||||
|
unit_id: Related unit ID (optional)
|
||||||
|
project_id: Related project ID (optional)
|
||||||
|
location_id: Related location ID (optional)
|
||||||
|
schedule_id: Related schedule ID (optional)
|
||||||
|
metadata: Additional JSON data
|
||||||
|
expires_hours: Hours until auto-expiry (default 24)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Created Alert instance
|
||||||
|
"""
|
||||||
|
alert = Alert(
|
||||||
|
id=str(uuid.uuid4()),
|
||||||
|
alert_type=alert_type,
|
||||||
|
title=title,
|
||||||
|
message=message,
|
||||||
|
severity=severity,
|
||||||
|
unit_id=unit_id,
|
||||||
|
project_id=project_id,
|
||||||
|
location_id=location_id,
|
||||||
|
schedule_id=schedule_id,
|
||||||
|
alert_metadata=json.dumps(metadata) if metadata else None,
|
||||||
|
status="active",
|
||||||
|
expires_at=datetime.utcnow() + timedelta(hours=expires_hours),
|
||||||
|
)
|
||||||
|
|
||||||
|
self.db.add(alert)
|
||||||
|
self.db.commit()
|
||||||
|
self.db.refresh(alert)
|
||||||
|
|
||||||
|
logger.info(f"Created alert: {alert.title} ({alert.alert_type})")
|
||||||
|
return alert
|
||||||
|
|
||||||
|
def create_device_offline_alert(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
consecutive_failures: int = 0,
|
||||||
|
last_error: str = None,
|
||||||
|
) -> Optional[Alert]:
|
||||||
|
"""
|
||||||
|
Create alert when device becomes unreachable.
|
||||||
|
|
||||||
|
Only creates if no active offline alert exists for this device.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: The unit that went offline
|
||||||
|
consecutive_failures: Number of consecutive poll failures
|
||||||
|
last_error: Last error message from polling
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Created Alert or None if alert already exists
|
||||||
|
"""
|
||||||
|
# Check if active offline alert already exists
|
||||||
|
existing = self.db.query(Alert).filter(
|
||||||
|
and_(
|
||||||
|
Alert.unit_id == unit_id,
|
||||||
|
Alert.alert_type == "device_offline",
|
||||||
|
Alert.status == "active",
|
||||||
|
)
|
||||||
|
).first()
|
||||||
|
|
||||||
|
if existing:
|
||||||
|
logger.debug(f"Offline alert already exists for {unit_id}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Get unit info for title
|
||||||
|
unit = self.db.query(RosterUnit).filter_by(id=unit_id).first()
|
||||||
|
unit_name = unit.id if unit else unit_id
|
||||||
|
|
||||||
|
# Determine severity based on failure count
|
||||||
|
severity = "critical" if consecutive_failures >= 5 else "warning"
|
||||||
|
|
||||||
|
return self.create_alert(
|
||||||
|
alert_type="device_offline",
|
||||||
|
title=f"{unit_name} is offline",
|
||||||
|
message=f"Device has been unreachable after {consecutive_failures} failed connection attempts."
|
||||||
|
+ (f" Last error: {last_error}" if last_error else ""),
|
||||||
|
severity=severity,
|
||||||
|
unit_id=unit_id,
|
||||||
|
metadata={
|
||||||
|
"consecutive_failures": consecutive_failures,
|
||||||
|
"last_error": last_error,
|
||||||
|
},
|
||||||
|
expires_hours=48, # Offline alerts stay longer
|
||||||
|
)
|
||||||
|
|
||||||
|
def resolve_device_offline_alert(self, unit_id: str) -> Optional[Alert]:
|
||||||
|
"""
|
||||||
|
Auto-resolve offline alert when device comes back online.
|
||||||
|
|
||||||
|
Also creates an "device_online" info alert to notify user.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: The unit that came back online
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The resolved Alert or None if no alert existed
|
||||||
|
"""
|
||||||
|
# Find active offline alert
|
||||||
|
alert = self.db.query(Alert).filter(
|
||||||
|
and_(
|
||||||
|
Alert.unit_id == unit_id,
|
||||||
|
Alert.alert_type == "device_offline",
|
||||||
|
Alert.status == "active",
|
||||||
|
)
|
||||||
|
).first()
|
||||||
|
|
||||||
|
if not alert:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Resolve the offline alert
|
||||||
|
alert.status = "resolved"
|
||||||
|
alert.resolved_at = datetime.utcnow()
|
||||||
|
self.db.commit()
|
||||||
|
|
||||||
|
logger.info(f"Resolved offline alert for {unit_id}")
|
||||||
|
|
||||||
|
# Create online notification
|
||||||
|
unit = self.db.query(RosterUnit).filter_by(id=unit_id).first()
|
||||||
|
unit_name = unit.id if unit else unit_id
|
||||||
|
|
||||||
|
self.create_alert(
|
||||||
|
alert_type="device_online",
|
||||||
|
title=f"{unit_name} is back online",
|
||||||
|
message="Device connection has been restored.",
|
||||||
|
severity="info",
|
||||||
|
unit_id=unit_id,
|
||||||
|
expires_hours=6, # Info alerts expire quickly
|
||||||
|
)
|
||||||
|
|
||||||
|
return alert
|
||||||
|
|
||||||
|
def create_schedule_failed_alert(
|
||||||
|
self,
|
||||||
|
schedule_id: str,
|
||||||
|
action_type: str,
|
||||||
|
unit_id: str = None,
|
||||||
|
error_message: str = None,
|
||||||
|
project_id: str = None,
|
||||||
|
location_id: str = None,
|
||||||
|
) -> Alert:
|
||||||
|
"""
|
||||||
|
Create alert when a scheduled action fails.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
schedule_id: The ScheduledAction or RecurringSchedule ID
|
||||||
|
action_type: start, stop, download, cycle
|
||||||
|
unit_id: Related unit
|
||||||
|
error_message: Error from execution
|
||||||
|
project_id: Related project
|
||||||
|
location_id: Related location
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Created Alert
|
||||||
|
"""
|
||||||
|
return self.create_alert(
|
||||||
|
alert_type="schedule_failed",
|
||||||
|
title=f"Scheduled {action_type} failed",
|
||||||
|
message=error_message or f"The scheduled {action_type} action did not complete successfully.",
|
||||||
|
severity="warning",
|
||||||
|
unit_id=unit_id,
|
||||||
|
project_id=project_id,
|
||||||
|
location_id=location_id,
|
||||||
|
schedule_id=schedule_id,
|
||||||
|
metadata={"action_type": action_type},
|
||||||
|
expires_hours=24,
|
||||||
|
)
|
||||||
|
|
||||||
|
def create_schedule_completed_alert(
|
||||||
|
self,
|
||||||
|
schedule_id: str,
|
||||||
|
action_type: str,
|
||||||
|
unit_id: str = None,
|
||||||
|
project_id: str = None,
|
||||||
|
location_id: str = None,
|
||||||
|
metadata: dict = None,
|
||||||
|
) -> Alert:
|
||||||
|
"""
|
||||||
|
Create alert when a scheduled action completes successfully.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
schedule_id: The ScheduledAction ID
|
||||||
|
action_type: start, stop, download, cycle
|
||||||
|
unit_id: Related unit
|
||||||
|
project_id: Related project
|
||||||
|
location_id: Related location
|
||||||
|
metadata: Additional info (e.g., downloaded folder, index numbers)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Created Alert
|
||||||
|
"""
|
||||||
|
# Build descriptive message based on action type and metadata
|
||||||
|
if action_type == "stop" and metadata:
|
||||||
|
download_folder = metadata.get("downloaded_folder")
|
||||||
|
download_success = metadata.get("download_success", False)
|
||||||
|
if download_success and download_folder:
|
||||||
|
message = f"Measurement stopped and data downloaded ({download_folder})"
|
||||||
|
elif download_success is False and metadata.get("download_attempted"):
|
||||||
|
message = "Measurement stopped but download failed"
|
||||||
|
else:
|
||||||
|
message = "Measurement stopped successfully"
|
||||||
|
elif action_type == "start" and metadata:
|
||||||
|
new_index = metadata.get("new_index")
|
||||||
|
if new_index is not None:
|
||||||
|
message = f"Measurement started (index {new_index:04d})"
|
||||||
|
else:
|
||||||
|
message = "Measurement started successfully"
|
||||||
|
else:
|
||||||
|
message = f"Scheduled {action_type} completed successfully"
|
||||||
|
|
||||||
|
return self.create_alert(
|
||||||
|
alert_type="schedule_completed",
|
||||||
|
title=f"Scheduled {action_type} completed",
|
||||||
|
message=message,
|
||||||
|
severity="info",
|
||||||
|
unit_id=unit_id,
|
||||||
|
project_id=project_id,
|
||||||
|
location_id=location_id,
|
||||||
|
schedule_id=schedule_id,
|
||||||
|
metadata={"action_type": action_type, **(metadata or {})},
|
||||||
|
expires_hours=12, # Info alerts expire quickly
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_active_alerts(
|
||||||
|
self,
|
||||||
|
project_id: str = None,
|
||||||
|
unit_id: str = None,
|
||||||
|
alert_type: str = None,
|
||||||
|
min_severity: str = None,
|
||||||
|
limit: int = 50,
|
||||||
|
) -> List[Alert]:
|
||||||
|
"""
|
||||||
|
Query active alerts with optional filters.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
project_id: Filter by project
|
||||||
|
unit_id: Filter by unit
|
||||||
|
alert_type: Filter by alert type
|
||||||
|
min_severity: Minimum severity (info, warning, critical)
|
||||||
|
limit: Maximum results
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of matching alerts
|
||||||
|
"""
|
||||||
|
query = self.db.query(Alert).filter(Alert.status == "active")
|
||||||
|
|
||||||
|
if project_id:
|
||||||
|
query = query.filter(Alert.project_id == project_id)
|
||||||
|
|
||||||
|
if unit_id:
|
||||||
|
query = query.filter(Alert.unit_id == unit_id)
|
||||||
|
|
||||||
|
if alert_type:
|
||||||
|
query = query.filter(Alert.alert_type == alert_type)
|
||||||
|
|
||||||
|
if min_severity:
|
||||||
|
# Map severity to numeric for comparison
|
||||||
|
severity_levels = {"info": 1, "warning": 2, "critical": 3}
|
||||||
|
min_level = severity_levels.get(min_severity, 1)
|
||||||
|
|
||||||
|
if min_level == 2:
|
||||||
|
query = query.filter(Alert.severity.in_(["warning", "critical"]))
|
||||||
|
elif min_level == 3:
|
||||||
|
query = query.filter(Alert.severity == "critical")
|
||||||
|
|
||||||
|
return query.order_by(Alert.created_at.desc()).limit(limit).all()
|
||||||
|
|
||||||
|
def get_all_alerts(
|
||||||
|
self,
|
||||||
|
status: str = None,
|
||||||
|
project_id: str = None,
|
||||||
|
unit_id: str = None,
|
||||||
|
alert_type: str = None,
|
||||||
|
limit: int = 50,
|
||||||
|
offset: int = 0,
|
||||||
|
) -> List[Alert]:
|
||||||
|
"""
|
||||||
|
Query all alerts with optional filters (includes non-active).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
status: Filter by status (active, acknowledged, resolved, dismissed)
|
||||||
|
project_id: Filter by project
|
||||||
|
unit_id: Filter by unit
|
||||||
|
alert_type: Filter by alert type
|
||||||
|
limit: Maximum results
|
||||||
|
offset: Pagination offset
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of matching alerts
|
||||||
|
"""
|
||||||
|
query = self.db.query(Alert)
|
||||||
|
|
||||||
|
if status:
|
||||||
|
query = query.filter(Alert.status == status)
|
||||||
|
|
||||||
|
if project_id:
|
||||||
|
query = query.filter(Alert.project_id == project_id)
|
||||||
|
|
||||||
|
if unit_id:
|
||||||
|
query = query.filter(Alert.unit_id == unit_id)
|
||||||
|
|
||||||
|
if alert_type:
|
||||||
|
query = query.filter(Alert.alert_type == alert_type)
|
||||||
|
|
||||||
|
return (
|
||||||
|
query.order_by(Alert.created_at.desc())
|
||||||
|
.offset(offset)
|
||||||
|
.limit(limit)
|
||||||
|
.all()
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_active_alert_count(self) -> int:
|
||||||
|
"""Get count of active alerts for badge display."""
|
||||||
|
return self.db.query(Alert).filter(Alert.status == "active").count()
|
||||||
|
|
||||||
|
def acknowledge_alert(self, alert_id: str) -> Optional[Alert]:
|
||||||
|
"""
|
||||||
|
Mark alert as acknowledged.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
alert_id: Alert to acknowledge
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Updated Alert or None if not found
|
||||||
|
"""
|
||||||
|
alert = self.db.query(Alert).filter_by(id=alert_id).first()
|
||||||
|
if not alert:
|
||||||
|
return None
|
||||||
|
|
||||||
|
alert.status = "acknowledged"
|
||||||
|
alert.acknowledged_at = datetime.utcnow()
|
||||||
|
self.db.commit()
|
||||||
|
|
||||||
|
logger.info(f"Acknowledged alert: {alert.title}")
|
||||||
|
return alert
|
||||||
|
|
||||||
|
def dismiss_alert(self, alert_id: str) -> Optional[Alert]:
|
||||||
|
"""
|
||||||
|
Dismiss alert (user chose to ignore).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
alert_id: Alert to dismiss
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Updated Alert or None if not found
|
||||||
|
"""
|
||||||
|
alert = self.db.query(Alert).filter_by(id=alert_id).first()
|
||||||
|
if not alert:
|
||||||
|
return None
|
||||||
|
|
||||||
|
alert.status = "dismissed"
|
||||||
|
self.db.commit()
|
||||||
|
|
||||||
|
logger.info(f"Dismissed alert: {alert.title}")
|
||||||
|
return alert
|
||||||
|
|
||||||
|
def resolve_alert(self, alert_id: str) -> Optional[Alert]:
|
||||||
|
"""
|
||||||
|
Manually resolve an alert.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
alert_id: Alert to resolve
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Updated Alert or None if not found
|
||||||
|
"""
|
||||||
|
alert = self.db.query(Alert).filter_by(id=alert_id).first()
|
||||||
|
if not alert:
|
||||||
|
return None
|
||||||
|
|
||||||
|
alert.status = "resolved"
|
||||||
|
alert.resolved_at = datetime.utcnow()
|
||||||
|
self.db.commit()
|
||||||
|
|
||||||
|
logger.info(f"Resolved alert: {alert.title}")
|
||||||
|
return alert
|
||||||
|
|
||||||
|
def cleanup_expired_alerts(self) -> int:
|
||||||
|
"""
|
||||||
|
Remove alerts past their expiration time.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Number of alerts cleaned up
|
||||||
|
"""
|
||||||
|
now = datetime.utcnow()
|
||||||
|
expired = self.db.query(Alert).filter(
|
||||||
|
and_(
|
||||||
|
Alert.expires_at.isnot(None),
|
||||||
|
Alert.expires_at < now,
|
||||||
|
Alert.status == "active",
|
||||||
|
)
|
||||||
|
).all()
|
||||||
|
|
||||||
|
count = len(expired)
|
||||||
|
for alert in expired:
|
||||||
|
alert.status = "dismissed"
|
||||||
|
|
||||||
|
if count > 0:
|
||||||
|
self.db.commit()
|
||||||
|
logger.info(f"Cleaned up {count} expired alerts")
|
||||||
|
|
||||||
|
return count
|
||||||
|
|
||||||
|
|
||||||
|
def get_alert_service(db: Session) -> AlertService:
|
||||||
|
"""Get an AlertService instance with the given database session."""
|
||||||
|
return AlertService(db)
|
||||||
603
backend/services/device_controller.py
Normal file
@@ -0,0 +1,603 @@
|
|||||||
|
"""
|
||||||
|
Device Controller Service
|
||||||
|
|
||||||
|
Routes device operations to the appropriate backend module:
|
||||||
|
- SLMM for sound level meters
|
||||||
|
- SFM for seismographs (future implementation)
|
||||||
|
|
||||||
|
This abstraction allows Projects system to work with any device type
|
||||||
|
without knowing the underlying communication protocol.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Dict, Any, Optional, List
|
||||||
|
from backend.services.slmm_client import get_slmm_client, SLMMClientError
|
||||||
|
|
||||||
|
|
||||||
|
class DeviceControllerError(Exception):
|
||||||
|
"""Base exception for device controller errors."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class UnsupportedDeviceTypeError(DeviceControllerError):
|
||||||
|
"""Raised when device type is not supported."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class DeviceController:
|
||||||
|
"""
|
||||||
|
Unified interface for controlling all device types.
|
||||||
|
|
||||||
|
Routes commands to appropriate backend module based on device_type.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
controller = DeviceController()
|
||||||
|
await controller.start_recording("nl43-001", "slm", config={})
|
||||||
|
await controller.stop_recording("seismo-042", "seismograph")
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.slmm_client = get_slmm_client()
|
||||||
|
|
||||||
|
# ========================================================================
|
||||||
|
# Recording Control
|
||||||
|
# ========================================================================
|
||||||
|
|
||||||
|
async def start_recording(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
device_type: str,
|
||||||
|
config: Optional[Dict[str, Any]] = None,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Start recording on a device.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
device_type: "slm" | "seismograph"
|
||||||
|
config: Device-specific recording configuration
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Response dict from device module
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
UnsupportedDeviceTypeError: Device type not supported
|
||||||
|
DeviceControllerError: Operation failed
|
||||||
|
"""
|
||||||
|
if device_type == "slm":
|
||||||
|
try:
|
||||||
|
return await self.slmm_client.start_recording(unit_id, config)
|
||||||
|
except SLMMClientError as e:
|
||||||
|
raise DeviceControllerError(f"SLMM error: {str(e)}")
|
||||||
|
|
||||||
|
elif device_type == "seismograph":
|
||||||
|
# TODO: Implement SFM client for seismograph control
|
||||||
|
# For now, return a placeholder response
|
||||||
|
return {
|
||||||
|
"status": "not_implemented",
|
||||||
|
"message": "Seismograph recording control not yet implemented",
|
||||||
|
"unit_id": unit_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise UnsupportedDeviceTypeError(
|
||||||
|
f"Device type '{device_type}' is not supported. "
|
||||||
|
f"Supported types: slm, seismograph"
|
||||||
|
)
|
||||||
|
|
||||||
|
async def stop_recording(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
device_type: str,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Stop recording on a device.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
device_type: "slm" | "seismograph"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Response dict from device module
|
||||||
|
"""
|
||||||
|
if device_type == "slm":
|
||||||
|
try:
|
||||||
|
return await self.slmm_client.stop_recording(unit_id)
|
||||||
|
except SLMMClientError as e:
|
||||||
|
raise DeviceControllerError(f"SLMM error: {str(e)}")
|
||||||
|
|
||||||
|
elif device_type == "seismograph":
|
||||||
|
# TODO: Implement SFM client
|
||||||
|
return {
|
||||||
|
"status": "not_implemented",
|
||||||
|
"message": "Seismograph recording control not yet implemented",
|
||||||
|
"unit_id": unit_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise UnsupportedDeviceTypeError(f"Unsupported device type: {device_type}")
|
||||||
|
|
||||||
|
async def pause_recording(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
device_type: str,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Pause recording on a device.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
device_type: "slm" | "seismograph"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Response dict from device module
|
||||||
|
"""
|
||||||
|
if device_type == "slm":
|
||||||
|
try:
|
||||||
|
return await self.slmm_client.pause_recording(unit_id)
|
||||||
|
except SLMMClientError as e:
|
||||||
|
raise DeviceControllerError(f"SLMM error: {str(e)}")
|
||||||
|
|
||||||
|
elif device_type == "seismograph":
|
||||||
|
return {
|
||||||
|
"status": "not_implemented",
|
||||||
|
"message": "Seismograph pause not yet implemented",
|
||||||
|
"unit_id": unit_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise UnsupportedDeviceTypeError(f"Unsupported device type: {device_type}")
|
||||||
|
|
||||||
|
async def resume_recording(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
device_type: str,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Resume paused recording on a device.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
device_type: "slm" | "seismograph"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Response dict from device module
|
||||||
|
"""
|
||||||
|
if device_type == "slm":
|
||||||
|
try:
|
||||||
|
return await self.slmm_client.resume_recording(unit_id)
|
||||||
|
except SLMMClientError as e:
|
||||||
|
raise DeviceControllerError(f"SLMM error: {str(e)}")
|
||||||
|
|
||||||
|
elif device_type == "seismograph":
|
||||||
|
return {
|
||||||
|
"status": "not_implemented",
|
||||||
|
"message": "Seismograph resume not yet implemented",
|
||||||
|
"unit_id": unit_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise UnsupportedDeviceTypeError(f"Unsupported device type: {device_type}")
|
||||||
|
|
||||||
|
# ========================================================================
|
||||||
|
# Status & Monitoring
|
||||||
|
# ========================================================================
|
||||||
|
|
||||||
|
async def get_device_status(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
device_type: str,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get current device status.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
device_type: "slm" | "seismograph"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Status dict from device module
|
||||||
|
"""
|
||||||
|
if device_type == "slm":
|
||||||
|
try:
|
||||||
|
return await self.slmm_client.get_unit_status(unit_id)
|
||||||
|
except SLMMClientError as e:
|
||||||
|
raise DeviceControllerError(f"SLMM error: {str(e)}")
|
||||||
|
|
||||||
|
elif device_type == "seismograph":
|
||||||
|
# TODO: Implement SFM status check
|
||||||
|
return {
|
||||||
|
"status": "not_implemented",
|
||||||
|
"message": "Seismograph status not yet implemented",
|
||||||
|
"unit_id": unit_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise UnsupportedDeviceTypeError(f"Unsupported device type: {device_type}")
|
||||||
|
|
||||||
|
async def get_live_data(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
device_type: str,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get live data from device.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
device_type: "slm" | "seismograph"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Live data dict from device module
|
||||||
|
"""
|
||||||
|
if device_type == "slm":
|
||||||
|
try:
|
||||||
|
return await self.slmm_client.get_live_data(unit_id)
|
||||||
|
except SLMMClientError as e:
|
||||||
|
raise DeviceControllerError(f"SLMM error: {str(e)}")
|
||||||
|
|
||||||
|
elif device_type == "seismograph":
|
||||||
|
return {
|
||||||
|
"status": "not_implemented",
|
||||||
|
"message": "Seismograph live data not yet implemented",
|
||||||
|
"unit_id": unit_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise UnsupportedDeviceTypeError(f"Unsupported device type: {device_type}")
|
||||||
|
|
||||||
|
# ========================================================================
|
||||||
|
# Data Download
|
||||||
|
# ========================================================================
|
||||||
|
|
||||||
|
async def download_files(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
device_type: str,
|
||||||
|
destination_path: str,
|
||||||
|
files: Optional[List[str]] = None,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Download data files from device.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
device_type: "slm" | "seismograph"
|
||||||
|
destination_path: Local path to save files
|
||||||
|
files: List of filenames, or None for all
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Download result with file list
|
||||||
|
"""
|
||||||
|
if device_type == "slm":
|
||||||
|
try:
|
||||||
|
return await self.slmm_client.download_files(
|
||||||
|
unit_id,
|
||||||
|
destination_path,
|
||||||
|
files,
|
||||||
|
)
|
||||||
|
except SLMMClientError as e:
|
||||||
|
raise DeviceControllerError(f"SLMM error: {str(e)}")
|
||||||
|
|
||||||
|
elif device_type == "seismograph":
|
||||||
|
# TODO: Implement SFM file download
|
||||||
|
return {
|
||||||
|
"status": "not_implemented",
|
||||||
|
"message": "Seismograph file download not yet implemented",
|
||||||
|
"unit_id": unit_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise UnsupportedDeviceTypeError(f"Unsupported device type: {device_type}")
|
||||||
|
|
||||||
|
# ========================================================================
|
||||||
|
# FTP Control
|
||||||
|
# ========================================================================
|
||||||
|
|
||||||
|
async def enable_ftp(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
device_type: str,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Enable FTP server on device.
|
||||||
|
|
||||||
|
Must be called before downloading files.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
device_type: "slm" | "seismograph"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Response dict with status
|
||||||
|
"""
|
||||||
|
if device_type == "slm":
|
||||||
|
try:
|
||||||
|
return await self.slmm_client.enable_ftp(unit_id)
|
||||||
|
except SLMMClientError as e:
|
||||||
|
raise DeviceControllerError(f"SLMM error: {str(e)}")
|
||||||
|
|
||||||
|
elif device_type == "seismograph":
|
||||||
|
return {
|
||||||
|
"status": "not_implemented",
|
||||||
|
"message": "Seismograph FTP not yet implemented",
|
||||||
|
"unit_id": unit_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise UnsupportedDeviceTypeError(f"Unsupported device type: {device_type}")
|
||||||
|
|
||||||
|
async def disable_ftp(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
device_type: str,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Disable FTP server on device.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
device_type: "slm" | "seismograph"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Response dict with status
|
||||||
|
"""
|
||||||
|
if device_type == "slm":
|
||||||
|
try:
|
||||||
|
return await self.slmm_client.disable_ftp(unit_id)
|
||||||
|
except SLMMClientError as e:
|
||||||
|
raise DeviceControllerError(f"SLMM error: {str(e)}")
|
||||||
|
|
||||||
|
elif device_type == "seismograph":
|
||||||
|
return {
|
||||||
|
"status": "not_implemented",
|
||||||
|
"message": "Seismograph FTP not yet implemented",
|
||||||
|
"unit_id": unit_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise UnsupportedDeviceTypeError(f"Unsupported device type: {device_type}")
|
||||||
|
|
||||||
|
# ========================================================================
|
||||||
|
# Device Configuration
|
||||||
|
# ========================================================================
|
||||||
|
|
||||||
|
async def update_device_config(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
device_type: str,
|
||||||
|
config: Dict[str, Any],
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Update device configuration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
device_type: "slm" | "seismograph"
|
||||||
|
config: Configuration parameters
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Updated config from device module
|
||||||
|
"""
|
||||||
|
if device_type == "slm":
|
||||||
|
try:
|
||||||
|
return await self.slmm_client.update_unit_config(
|
||||||
|
unit_id,
|
||||||
|
host=config.get("host"),
|
||||||
|
tcp_port=config.get("tcp_port"),
|
||||||
|
ftp_port=config.get("ftp_port"),
|
||||||
|
ftp_username=config.get("ftp_username"),
|
||||||
|
ftp_password=config.get("ftp_password"),
|
||||||
|
)
|
||||||
|
except SLMMClientError as e:
|
||||||
|
raise DeviceControllerError(f"SLMM error: {str(e)}")
|
||||||
|
|
||||||
|
elif device_type == "seismograph":
|
||||||
|
return {
|
||||||
|
"status": "not_implemented",
|
||||||
|
"message": "Seismograph config update not yet implemented",
|
||||||
|
"unit_id": unit_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise UnsupportedDeviceTypeError(f"Unsupported device type: {device_type}")
|
||||||
|
|
||||||
|
# ========================================================================
|
||||||
|
# Store/Index Management
|
||||||
|
# ========================================================================
|
||||||
|
|
||||||
|
async def increment_index(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
device_type: str,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Increment the store/index number on a device.
|
||||||
|
|
||||||
|
For SLMs, this increments the store name to prevent "overwrite data?" prompts.
|
||||||
|
Should be called before starting a new measurement if auto_increment_index is enabled.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
device_type: "slm" | "seismograph"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Response dict with old_index and new_index
|
||||||
|
"""
|
||||||
|
if device_type == "slm":
|
||||||
|
try:
|
||||||
|
return await self.slmm_client.increment_index(unit_id)
|
||||||
|
except SLMMClientError as e:
|
||||||
|
raise DeviceControllerError(f"SLMM error: {str(e)}")
|
||||||
|
|
||||||
|
elif device_type == "seismograph":
|
||||||
|
# Seismographs may not have the same concept of store index
|
||||||
|
return {
|
||||||
|
"status": "not_applicable",
|
||||||
|
"message": "Index increment not applicable for seismographs",
|
||||||
|
"unit_id": unit_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise UnsupportedDeviceTypeError(f"Unsupported device type: {device_type}")
|
||||||
|
|
||||||
|
async def get_index_number(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
device_type: str,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get current store/index number from device.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
device_type: "slm" | "seismograph"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Response dict with current index_number
|
||||||
|
"""
|
||||||
|
if device_type == "slm":
|
||||||
|
try:
|
||||||
|
return await self.slmm_client.get_index_number(unit_id)
|
||||||
|
except SLMMClientError as e:
|
||||||
|
raise DeviceControllerError(f"SLMM error: {str(e)}")
|
||||||
|
|
||||||
|
elif device_type == "seismograph":
|
||||||
|
return {
|
||||||
|
"status": "not_applicable",
|
||||||
|
"message": "Index number not applicable for seismographs",
|
||||||
|
"unit_id": unit_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise UnsupportedDeviceTypeError(f"Unsupported device type: {device_type}")
|
||||||
|
|
||||||
|
# ========================================================================
|
||||||
|
# Cycle Commands (for scheduled automation)
|
||||||
|
# ========================================================================
|
||||||
|
|
||||||
|
async def start_cycle(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
device_type: str,
|
||||||
|
sync_clock: bool = True,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Execute complete start cycle for scheduled automation.
|
||||||
|
|
||||||
|
This handles the full pre-recording workflow:
|
||||||
|
1. Sync device clock to server time
|
||||||
|
2. Find next safe index (with overwrite protection)
|
||||||
|
3. Start measurement
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
device_type: "slm" | "seismograph"
|
||||||
|
sync_clock: Whether to sync device clock to server time
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Response dict from device module
|
||||||
|
"""
|
||||||
|
if device_type == "slm":
|
||||||
|
try:
|
||||||
|
return await self.slmm_client.start_cycle(unit_id, sync_clock)
|
||||||
|
except SLMMClientError as e:
|
||||||
|
raise DeviceControllerError(f"SLMM error: {str(e)}")
|
||||||
|
|
||||||
|
elif device_type == "seismograph":
|
||||||
|
return {
|
||||||
|
"status": "not_implemented",
|
||||||
|
"message": "Seismograph start cycle not yet implemented",
|
||||||
|
"unit_id": unit_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise UnsupportedDeviceTypeError(f"Unsupported device type: {device_type}")
|
||||||
|
|
||||||
|
async def stop_cycle(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
device_type: str,
|
||||||
|
download: bool = True,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Execute complete stop cycle for scheduled automation.
|
||||||
|
|
||||||
|
This handles the full post-recording workflow:
|
||||||
|
1. Stop measurement
|
||||||
|
2. Enable FTP
|
||||||
|
3. Download measurement folder
|
||||||
|
4. Verify download
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
device_type: "slm" | "seismograph"
|
||||||
|
download: Whether to download measurement data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Response dict from device module
|
||||||
|
"""
|
||||||
|
if device_type == "slm":
|
||||||
|
try:
|
||||||
|
return await self.slmm_client.stop_cycle(unit_id, download)
|
||||||
|
except SLMMClientError as e:
|
||||||
|
raise DeviceControllerError(f"SLMM error: {str(e)}")
|
||||||
|
|
||||||
|
elif device_type == "seismograph":
|
||||||
|
return {
|
||||||
|
"status": "not_implemented",
|
||||||
|
"message": "Seismograph stop cycle not yet implemented",
|
||||||
|
"unit_id": unit_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise UnsupportedDeviceTypeError(f"Unsupported device type: {device_type}")
|
||||||
|
|
||||||
|
# ========================================================================
|
||||||
|
# Health Check
|
||||||
|
# ========================================================================
|
||||||
|
|
||||||
|
async def check_device_connectivity(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
device_type: str,
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
Check if device is reachable.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
device_type: "slm" | "seismograph"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if device is reachable, False otherwise
|
||||||
|
"""
|
||||||
|
if device_type == "slm":
|
||||||
|
try:
|
||||||
|
status = await self.slmm_client.get_unit_status(unit_id)
|
||||||
|
return status.get("last_seen") is not None
|
||||||
|
except:
|
||||||
|
return False
|
||||||
|
|
||||||
|
elif device_type == "seismograph":
|
||||||
|
# TODO: Implement SFM connectivity check
|
||||||
|
return False
|
||||||
|
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
# Singleton instance
|
||||||
|
_default_controller: Optional[DeviceController] = None
|
||||||
|
|
||||||
|
|
||||||
|
def get_device_controller() -> DeviceController:
|
||||||
|
"""
|
||||||
|
Get the default device controller instance.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
DeviceController instance
|
||||||
|
"""
|
||||||
|
global _default_controller
|
||||||
|
if _default_controller is None:
|
||||||
|
_default_controller = DeviceController()
|
||||||
|
return _default_controller
|
||||||
184
backend/services/device_status_monitor.py
Normal file
@@ -0,0 +1,184 @@
|
|||||||
|
"""
|
||||||
|
Device Status Monitor
|
||||||
|
|
||||||
|
Background task that monitors device reachability via SLMM polling status
|
||||||
|
and triggers alerts when devices go offline or come back online.
|
||||||
|
|
||||||
|
This service bridges SLMM's device polling with Terra-View's alert system.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import Optional, Dict
|
||||||
|
|
||||||
|
from backend.database import SessionLocal
|
||||||
|
from backend.services.slmm_client import get_slmm_client, SLMMClientError
|
||||||
|
from backend.services.alert_service import get_alert_service
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class DeviceStatusMonitor:
|
||||||
|
"""
|
||||||
|
Monitors device reachability via SLMM's polling status endpoint.
|
||||||
|
|
||||||
|
Detects state transitions (online→offline, offline→online) and
|
||||||
|
triggers AlertService to create/resolve alerts.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
monitor = DeviceStatusMonitor()
|
||||||
|
await monitor.start() # Start background monitoring
|
||||||
|
monitor.stop() # Stop monitoring
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, check_interval: int = 60):
|
||||||
|
"""
|
||||||
|
Initialize the monitor.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
check_interval: Seconds between status checks (default: 60)
|
||||||
|
"""
|
||||||
|
self.check_interval = check_interval
|
||||||
|
self.running = False
|
||||||
|
self.task: Optional[asyncio.Task] = None
|
||||||
|
self.slmm_client = get_slmm_client()
|
||||||
|
|
||||||
|
# Track previous device states to detect transitions
|
||||||
|
self._device_states: Dict[str, bool] = {}
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
"""Start the monitoring background task."""
|
||||||
|
if self.running:
|
||||||
|
logger.warning("DeviceStatusMonitor is already running")
|
||||||
|
return
|
||||||
|
|
||||||
|
self.running = True
|
||||||
|
self.task = asyncio.create_task(self._monitor_loop())
|
||||||
|
logger.info(f"DeviceStatusMonitor started (checking every {self.check_interval}s)")
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
"""Stop the monitoring background task."""
|
||||||
|
self.running = False
|
||||||
|
if self.task:
|
||||||
|
self.task.cancel()
|
||||||
|
logger.info("DeviceStatusMonitor stopped")
|
||||||
|
|
||||||
|
async def _monitor_loop(self):
|
||||||
|
"""Main monitoring loop."""
|
||||||
|
while self.running:
|
||||||
|
try:
|
||||||
|
await self._check_all_devices()
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in device status monitor: {e}", exc_info=True)
|
||||||
|
|
||||||
|
# Sleep in small intervals for graceful shutdown
|
||||||
|
for _ in range(self.check_interval):
|
||||||
|
if not self.running:
|
||||||
|
break
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
|
logger.info("DeviceStatusMonitor loop exited")
|
||||||
|
|
||||||
|
async def _check_all_devices(self):
|
||||||
|
"""
|
||||||
|
Fetch polling status from SLMM and detect state transitions.
|
||||||
|
|
||||||
|
Uses GET /api/slmm/_polling/status (proxied to SLMM)
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Get status from SLMM
|
||||||
|
status_response = await self.slmm_client.get_polling_status()
|
||||||
|
devices = status_response.get("devices", [])
|
||||||
|
|
||||||
|
if not devices:
|
||||||
|
logger.debug("No devices in polling status response")
|
||||||
|
return
|
||||||
|
|
||||||
|
db = SessionLocal()
|
||||||
|
try:
|
||||||
|
alert_service = get_alert_service(db)
|
||||||
|
|
||||||
|
for device in devices:
|
||||||
|
unit_id = device.get("unit_id")
|
||||||
|
if not unit_id:
|
||||||
|
continue
|
||||||
|
|
||||||
|
is_reachable = device.get("is_reachable", True)
|
||||||
|
previous_reachable = self._device_states.get(unit_id)
|
||||||
|
|
||||||
|
# Skip if this is the first check (no previous state)
|
||||||
|
if previous_reachable is None:
|
||||||
|
self._device_states[unit_id] = is_reachable
|
||||||
|
logger.debug(f"Initial state for {unit_id}: reachable={is_reachable}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Detect offline transition (was online, now offline)
|
||||||
|
if previous_reachable and not is_reachable:
|
||||||
|
logger.warning(f"Device {unit_id} went OFFLINE")
|
||||||
|
alert_service.create_device_offline_alert(
|
||||||
|
unit_id=unit_id,
|
||||||
|
consecutive_failures=device.get("consecutive_failures", 0),
|
||||||
|
last_error=device.get("last_error"),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Detect online transition (was offline, now online)
|
||||||
|
elif not previous_reachable and is_reachable:
|
||||||
|
logger.info(f"Device {unit_id} came back ONLINE")
|
||||||
|
alert_service.resolve_device_offline_alert(unit_id)
|
||||||
|
|
||||||
|
# Update tracked state
|
||||||
|
self._device_states[unit_id] = is_reachable
|
||||||
|
|
||||||
|
# Cleanup expired alerts while we're here
|
||||||
|
alert_service.cleanup_expired_alerts()
|
||||||
|
|
||||||
|
finally:
|
||||||
|
db.close()
|
||||||
|
|
||||||
|
except SLMMClientError as e:
|
||||||
|
logger.warning(f"Could not reach SLMM for status check: {e}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error checking device status: {e}", exc_info=True)
|
||||||
|
|
||||||
|
def get_tracked_devices(self) -> Dict[str, bool]:
|
||||||
|
"""
|
||||||
|
Get the current tracked device states.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict mapping unit_id to is_reachable status
|
||||||
|
"""
|
||||||
|
return dict(self._device_states)
|
||||||
|
|
||||||
|
def clear_tracked_devices(self):
|
||||||
|
"""Clear all tracked device states (useful for testing)."""
|
||||||
|
self._device_states.clear()
|
||||||
|
|
||||||
|
|
||||||
|
# Singleton instance
|
||||||
|
_monitor_instance: Optional[DeviceStatusMonitor] = None
|
||||||
|
|
||||||
|
|
||||||
|
def get_device_status_monitor() -> DeviceStatusMonitor:
|
||||||
|
"""
|
||||||
|
Get the device status monitor singleton instance.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
DeviceStatusMonitor instance
|
||||||
|
"""
|
||||||
|
global _monitor_instance
|
||||||
|
if _monitor_instance is None:
|
||||||
|
_monitor_instance = DeviceStatusMonitor()
|
||||||
|
return _monitor_instance
|
||||||
|
|
||||||
|
|
||||||
|
async def start_device_status_monitor():
|
||||||
|
"""Start the global device status monitor."""
|
||||||
|
monitor = get_device_status_monitor()
|
||||||
|
await monitor.start()
|
||||||
|
|
||||||
|
|
||||||
|
def stop_device_status_monitor():
|
||||||
|
"""Stop the global device status monitor."""
|
||||||
|
monitor = get_device_status_monitor()
|
||||||
|
monitor.stop()
|
||||||
725
backend/services/fleet_calendar_service.py
Normal file
@@ -0,0 +1,725 @@
|
|||||||
|
"""
|
||||||
|
Fleet Calendar Service
|
||||||
|
|
||||||
|
Business logic for:
|
||||||
|
- Calculating unit availability on any given date
|
||||||
|
- Calibration status tracking (valid, expiring soon, expired)
|
||||||
|
- Job reservation management
|
||||||
|
- Conflict detection (calibration expires mid-job)
|
||||||
|
"""
|
||||||
|
|
||||||
|
from datetime import date, datetime, timedelta
|
||||||
|
from typing import Dict, List, Optional, Tuple
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
from sqlalchemy import and_, or_
|
||||||
|
|
||||||
|
from backend.models import (
|
||||||
|
RosterUnit, JobReservation, JobReservationUnit,
|
||||||
|
UserPreferences, Project, DeploymentRecord
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_calibration_status(
|
||||||
|
unit: RosterUnit,
|
||||||
|
check_date: date,
|
||||||
|
warning_days: int = 30
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Determine calibration status for a unit on a specific date.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
"valid" - Calibration is good on this date
|
||||||
|
"expiring_soon" - Within warning_days of expiry
|
||||||
|
"expired" - Calibration has expired
|
||||||
|
"needs_calibration" - No calibration date set
|
||||||
|
"""
|
||||||
|
if not unit.last_calibrated:
|
||||||
|
return "needs_calibration"
|
||||||
|
|
||||||
|
# Calculate expiry date (1 year from last calibration)
|
||||||
|
expiry_date = unit.last_calibrated + timedelta(days=365)
|
||||||
|
|
||||||
|
if check_date >= expiry_date:
|
||||||
|
return "expired"
|
||||||
|
elif check_date >= expiry_date - timedelta(days=warning_days):
|
||||||
|
return "expiring_soon"
|
||||||
|
else:
|
||||||
|
return "valid"
|
||||||
|
|
||||||
|
|
||||||
|
def get_unit_reservations_on_date(
|
||||||
|
db: Session,
|
||||||
|
unit_id: str,
|
||||||
|
check_date: date
|
||||||
|
) -> List[JobReservation]:
|
||||||
|
"""Get all reservations that include this unit on the given date."""
|
||||||
|
|
||||||
|
# Get reservation IDs that have this unit assigned
|
||||||
|
assigned_reservation_ids = db.query(JobReservationUnit.reservation_id).filter(
|
||||||
|
JobReservationUnit.unit_id == unit_id
|
||||||
|
).subquery()
|
||||||
|
|
||||||
|
# Get reservations that:
|
||||||
|
# 1. Have this unit assigned AND date is within range
|
||||||
|
reservations = db.query(JobReservation).filter(
|
||||||
|
JobReservation.id.in_(assigned_reservation_ids),
|
||||||
|
JobReservation.start_date <= check_date,
|
||||||
|
JobReservation.end_date >= check_date
|
||||||
|
).all()
|
||||||
|
|
||||||
|
return reservations
|
||||||
|
|
||||||
|
|
||||||
|
def get_active_deployment(db: Session, unit_id: str) -> Optional[DeploymentRecord]:
|
||||||
|
"""Return the active (unreturned) deployment record for a unit, or None."""
|
||||||
|
return (
|
||||||
|
db.query(DeploymentRecord)
|
||||||
|
.filter(
|
||||||
|
DeploymentRecord.unit_id == unit_id,
|
||||||
|
DeploymentRecord.actual_removal_date == None
|
||||||
|
)
|
||||||
|
.order_by(DeploymentRecord.created_at.desc())
|
||||||
|
.first()
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def is_unit_available_on_date(
|
||||||
|
db: Session,
|
||||||
|
unit: RosterUnit,
|
||||||
|
check_date: date,
|
||||||
|
warning_days: int = 30
|
||||||
|
) -> Tuple[bool, str, Optional[str]]:
|
||||||
|
"""
|
||||||
|
Check if a unit is available on a specific date.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
(is_available, status, reservation_name)
|
||||||
|
- is_available: True if unit can be assigned to new work
|
||||||
|
- status: "available", "reserved", "expired", "retired", "needs_calibration", "in_field"
|
||||||
|
- reservation_name: Name of blocking reservation or project ref (if any)
|
||||||
|
"""
|
||||||
|
# Check if retired
|
||||||
|
if unit.retired:
|
||||||
|
return False, "retired", None
|
||||||
|
|
||||||
|
# Check calibration status
|
||||||
|
cal_status = get_calibration_status(unit, check_date, warning_days)
|
||||||
|
if cal_status == "expired":
|
||||||
|
return False, "expired", None
|
||||||
|
if cal_status == "needs_calibration":
|
||||||
|
return False, "needs_calibration", None
|
||||||
|
|
||||||
|
# Check for an active deployment record (unit is physically in the field)
|
||||||
|
active_deployment = get_active_deployment(db, unit.id)
|
||||||
|
if active_deployment:
|
||||||
|
label = active_deployment.project_ref or "Field deployment"
|
||||||
|
return False, "in_field", label
|
||||||
|
|
||||||
|
# Check if already reserved
|
||||||
|
reservations = get_unit_reservations_on_date(db, unit.id, check_date)
|
||||||
|
if reservations:
|
||||||
|
return False, "reserved", reservations[0].name
|
||||||
|
|
||||||
|
# Unit is available (even if expiring soon - that's just a warning)
|
||||||
|
return True, "available", None
|
||||||
|
|
||||||
|
|
||||||
|
def get_day_summary(
|
||||||
|
db: Session,
|
||||||
|
check_date: date,
|
||||||
|
device_type: str = "seismograph"
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Get a complete summary of fleet status for a specific day.
|
||||||
|
|
||||||
|
Returns dict with:
|
||||||
|
- available_units: List of available unit IDs with calibration info
|
||||||
|
- reserved_units: List of reserved unit IDs with reservation info
|
||||||
|
- expired_units: List of units with expired calibration
|
||||||
|
- expiring_soon_units: List of units expiring within warning period
|
||||||
|
- reservations: List of active reservations on this date
|
||||||
|
- counts: Summary counts
|
||||||
|
"""
|
||||||
|
# Get user preferences for warning days
|
||||||
|
prefs = db.query(UserPreferences).filter_by(id=1).first()
|
||||||
|
warning_days = prefs.calibration_warning_days if prefs else 30
|
||||||
|
|
||||||
|
# Get all non-retired units of the specified device type
|
||||||
|
units = db.query(RosterUnit).filter(
|
||||||
|
RosterUnit.device_type == device_type,
|
||||||
|
RosterUnit.retired == False
|
||||||
|
).all()
|
||||||
|
|
||||||
|
available_units = []
|
||||||
|
reserved_units = []
|
||||||
|
expired_units = []
|
||||||
|
expiring_soon_units = []
|
||||||
|
needs_calibration_units = []
|
||||||
|
in_field_units = []
|
||||||
|
cal_expiring_today = [] # Units whose calibration expires ON this day
|
||||||
|
|
||||||
|
for unit in units:
|
||||||
|
is_avail, status, reservation_name = is_unit_available_on_date(
|
||||||
|
db, unit, check_date, warning_days
|
||||||
|
)
|
||||||
|
|
||||||
|
cal_status = get_calibration_status(unit, check_date, warning_days)
|
||||||
|
expiry_date = None
|
||||||
|
if unit.last_calibrated:
|
||||||
|
expiry_date = (unit.last_calibrated + timedelta(days=365)).isoformat()
|
||||||
|
|
||||||
|
unit_info = {
|
||||||
|
"id": unit.id,
|
||||||
|
"last_calibrated": unit.last_calibrated.isoformat() if unit.last_calibrated else None,
|
||||||
|
"expiry_date": expiry_date,
|
||||||
|
"calibration_status": cal_status,
|
||||||
|
"deployed": unit.deployed,
|
||||||
|
"note": unit.note or ""
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if calibration expires ON this specific day
|
||||||
|
if unit.last_calibrated:
|
||||||
|
unit_expiry_date = unit.last_calibrated + timedelta(days=365)
|
||||||
|
if unit_expiry_date == check_date:
|
||||||
|
cal_expiring_today.append(unit_info)
|
||||||
|
|
||||||
|
if status == "available":
|
||||||
|
available_units.append(unit_info)
|
||||||
|
if cal_status == "expiring_soon":
|
||||||
|
expiring_soon_units.append(unit_info)
|
||||||
|
elif status == "in_field":
|
||||||
|
unit_info["project_ref"] = reservation_name
|
||||||
|
in_field_units.append(unit_info)
|
||||||
|
elif status == "reserved":
|
||||||
|
unit_info["reservation_name"] = reservation_name
|
||||||
|
reserved_units.append(unit_info)
|
||||||
|
if cal_status == "expiring_soon":
|
||||||
|
expiring_soon_units.append(unit_info)
|
||||||
|
elif status == "expired":
|
||||||
|
expired_units.append(unit_info)
|
||||||
|
elif status == "needs_calibration":
|
||||||
|
needs_calibration_units.append(unit_info)
|
||||||
|
|
||||||
|
# Get active reservations on this date
|
||||||
|
reservations = db.query(JobReservation).filter(
|
||||||
|
JobReservation.device_type == device_type,
|
||||||
|
JobReservation.start_date <= check_date,
|
||||||
|
JobReservation.end_date >= check_date
|
||||||
|
).all()
|
||||||
|
|
||||||
|
reservation_list = []
|
||||||
|
for res in reservations:
|
||||||
|
# Count assigned units for this reservation
|
||||||
|
assigned_count = db.query(JobReservationUnit).filter(
|
||||||
|
JobReservationUnit.reservation_id == res.id
|
||||||
|
).count()
|
||||||
|
|
||||||
|
reservation_list.append({
|
||||||
|
"id": res.id,
|
||||||
|
"name": res.name,
|
||||||
|
"start_date": res.start_date.isoformat(),
|
||||||
|
"end_date": res.end_date.isoformat(),
|
||||||
|
"assignment_type": res.assignment_type,
|
||||||
|
"quantity_needed": res.quantity_needed,
|
||||||
|
"assigned_count": assigned_count,
|
||||||
|
"color": res.color,
|
||||||
|
"project_id": res.project_id
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
"date": check_date.isoformat(),
|
||||||
|
"device_type": device_type,
|
||||||
|
"available_units": available_units,
|
||||||
|
"in_field_units": in_field_units,
|
||||||
|
"reserved_units": reserved_units,
|
||||||
|
"expired_units": expired_units,
|
||||||
|
"expiring_soon_units": expiring_soon_units,
|
||||||
|
"needs_calibration_units": needs_calibration_units,
|
||||||
|
"cal_expiring_today": cal_expiring_today,
|
||||||
|
"reservations": reservation_list,
|
||||||
|
"counts": {
|
||||||
|
"available": len(available_units),
|
||||||
|
"in_field": len(in_field_units),
|
||||||
|
"reserved": len(reserved_units),
|
||||||
|
"expired": len(expired_units),
|
||||||
|
"expiring_soon": len(expiring_soon_units),
|
||||||
|
"needs_calibration": len(needs_calibration_units),
|
||||||
|
"cal_expiring_today": len(cal_expiring_today),
|
||||||
|
"total": len(units)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_calendar_year_data(
|
||||||
|
db: Session,
|
||||||
|
year: int,
|
||||||
|
device_type: str = "seismograph"
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Get calendar data for an entire year.
|
||||||
|
|
||||||
|
For performance, this returns summary counts per day rather than
|
||||||
|
full unit lists. Use get_day_summary() for detailed day data.
|
||||||
|
"""
|
||||||
|
# Get user preferences
|
||||||
|
prefs = db.query(UserPreferences).filter_by(id=1).first()
|
||||||
|
warning_days = prefs.calibration_warning_days if prefs else 30
|
||||||
|
|
||||||
|
# Get all units
|
||||||
|
units = db.query(RosterUnit).filter(
|
||||||
|
RosterUnit.device_type == device_type,
|
||||||
|
RosterUnit.retired == False
|
||||||
|
).all()
|
||||||
|
|
||||||
|
# Get all reservations that overlap with this year
|
||||||
|
# Include TBD reservations (end_date is null) that started before year end
|
||||||
|
year_start = date(year, 1, 1)
|
||||||
|
year_end = date(year, 12, 31)
|
||||||
|
|
||||||
|
reservations = db.query(JobReservation).filter(
|
||||||
|
JobReservation.device_type == device_type,
|
||||||
|
JobReservation.start_date <= year_end,
|
||||||
|
or_(
|
||||||
|
JobReservation.end_date >= year_start,
|
||||||
|
JobReservation.end_date == None # TBD reservations
|
||||||
|
)
|
||||||
|
).all()
|
||||||
|
|
||||||
|
# Get all unit assignments for these reservations
|
||||||
|
reservation_ids = [r.id for r in reservations]
|
||||||
|
assignments = db.query(JobReservationUnit).filter(
|
||||||
|
JobReservationUnit.reservation_id.in_(reservation_ids)
|
||||||
|
).all() if reservation_ids else []
|
||||||
|
|
||||||
|
# Build a lookup: unit_id -> list of (start_date, end_date, reservation_name)
|
||||||
|
# For TBD reservations, use estimated_end_date if available, or a far future date
|
||||||
|
unit_reservations = {}
|
||||||
|
for res in reservations:
|
||||||
|
res_assignments = [a for a in assignments if a.reservation_id == res.id]
|
||||||
|
for assignment in res_assignments:
|
||||||
|
unit_id = assignment.unit_id
|
||||||
|
# Use unit-specific dates if set, otherwise use reservation dates
|
||||||
|
start_d = assignment.unit_start_date or res.start_date
|
||||||
|
if assignment.unit_end_tbd or (assignment.unit_end_date is None and res.end_date_tbd):
|
||||||
|
# TBD: use estimated date or far future for availability calculation
|
||||||
|
end_d = res.estimated_end_date or date(year + 5, 12, 31)
|
||||||
|
else:
|
||||||
|
end_d = assignment.unit_end_date or res.end_date or date(year + 5, 12, 31)
|
||||||
|
|
||||||
|
if unit_id not in unit_reservations:
|
||||||
|
unit_reservations[unit_id] = []
|
||||||
|
unit_reservations[unit_id].append((start_d, end_d, res.name))
|
||||||
|
|
||||||
|
# Build set of unit IDs that have an active deployment record (still in the field)
|
||||||
|
unit_ids = [u.id for u in units]
|
||||||
|
active_deployments = db.query(DeploymentRecord.unit_id).filter(
|
||||||
|
DeploymentRecord.unit_id.in_(unit_ids),
|
||||||
|
DeploymentRecord.actual_removal_date == None
|
||||||
|
).all()
|
||||||
|
unit_in_field = {row.unit_id for row in active_deployments}
|
||||||
|
|
||||||
|
# Generate data for each month
|
||||||
|
months_data = {}
|
||||||
|
|
||||||
|
for month in range(1, 13):
|
||||||
|
# Get first and last day of month
|
||||||
|
first_day = date(year, month, 1)
|
||||||
|
if month == 12:
|
||||||
|
last_day = date(year, 12, 31)
|
||||||
|
else:
|
||||||
|
last_day = date(year, month + 1, 1) - timedelta(days=1)
|
||||||
|
|
||||||
|
days_data = {}
|
||||||
|
current_day = first_day
|
||||||
|
|
||||||
|
while current_day <= last_day:
|
||||||
|
available = 0
|
||||||
|
in_field = 0
|
||||||
|
reserved = 0
|
||||||
|
expired = 0
|
||||||
|
expiring_soon = 0
|
||||||
|
needs_cal = 0
|
||||||
|
cal_expiring_on_day = 0 # Units whose calibration expires ON this day
|
||||||
|
cal_expired_on_day = 0 # Units whose calibration expired ON this day
|
||||||
|
|
||||||
|
for unit in units:
|
||||||
|
# Check calibration
|
||||||
|
cal_status = get_calibration_status(unit, current_day, warning_days)
|
||||||
|
|
||||||
|
# Check if calibration expires/expired ON this specific day
|
||||||
|
if unit.last_calibrated:
|
||||||
|
unit_expiry = unit.last_calibrated + timedelta(days=365)
|
||||||
|
if unit_expiry == current_day:
|
||||||
|
cal_expiring_on_day += 1
|
||||||
|
# Check if expired yesterday (first day of being expired)
|
||||||
|
elif unit_expiry == current_day - timedelta(days=1):
|
||||||
|
cal_expired_on_day += 1
|
||||||
|
|
||||||
|
if cal_status == "expired":
|
||||||
|
expired += 1
|
||||||
|
continue
|
||||||
|
if cal_status == "needs_calibration":
|
||||||
|
needs_cal += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check active deployment record (in field)
|
||||||
|
if unit.id in unit_in_field:
|
||||||
|
in_field += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if reserved
|
||||||
|
is_reserved = False
|
||||||
|
if unit.id in unit_reservations:
|
||||||
|
for start_d, end_d, _ in unit_reservations[unit.id]:
|
||||||
|
if start_d <= current_day <= end_d:
|
||||||
|
is_reserved = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if is_reserved:
|
||||||
|
reserved += 1
|
||||||
|
else:
|
||||||
|
available += 1
|
||||||
|
|
||||||
|
if cal_status == "expiring_soon":
|
||||||
|
expiring_soon += 1
|
||||||
|
|
||||||
|
days_data[current_day.day] = {
|
||||||
|
"available": available,
|
||||||
|
"in_field": in_field,
|
||||||
|
"reserved": reserved,
|
||||||
|
"expired": expired,
|
||||||
|
"expiring_soon": expiring_soon,
|
||||||
|
"needs_calibration": needs_cal,
|
||||||
|
"cal_expiring_on_day": cal_expiring_on_day,
|
||||||
|
"cal_expired_on_day": cal_expired_on_day
|
||||||
|
}
|
||||||
|
|
||||||
|
current_day += timedelta(days=1)
|
||||||
|
|
||||||
|
months_data[month] = {
|
||||||
|
"name": first_day.strftime("%B"),
|
||||||
|
"short_name": first_day.strftime("%b"),
|
||||||
|
"days": days_data,
|
||||||
|
"first_weekday": first_day.weekday(), # 0=Monday, 6=Sunday
|
||||||
|
"num_days": last_day.day
|
||||||
|
}
|
||||||
|
|
||||||
|
# Also include reservation summary for the year
|
||||||
|
reservation_list = []
|
||||||
|
for res in reservations:
|
||||||
|
assigned_count = len([a for a in assignments if a.reservation_id == res.id])
|
||||||
|
reservation_list.append({
|
||||||
|
"id": res.id,
|
||||||
|
"name": res.name,
|
||||||
|
"start_date": res.start_date.isoformat(),
|
||||||
|
"end_date": res.end_date.isoformat(),
|
||||||
|
"quantity_needed": res.quantity_needed,
|
||||||
|
"assigned_count": assigned_count,
|
||||||
|
"color": res.color
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
"year": year,
|
||||||
|
"device_type": device_type,
|
||||||
|
"months": months_data,
|
||||||
|
"reservations": reservation_list,
|
||||||
|
"total_units": len(units)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_rolling_calendar_data(
|
||||||
|
db: Session,
|
||||||
|
start_year: int,
|
||||||
|
start_month: int,
|
||||||
|
device_type: str = "seismograph"
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Get calendar data for 12 months starting from a specific month/year.
|
||||||
|
|
||||||
|
This supports the rolling calendar view where users can scroll through
|
||||||
|
months one at a time, viewing any 12-month window.
|
||||||
|
"""
|
||||||
|
# Get user preferences
|
||||||
|
prefs = db.query(UserPreferences).filter_by(id=1).first()
|
||||||
|
warning_days = prefs.calibration_warning_days if prefs else 30
|
||||||
|
|
||||||
|
# Get all units
|
||||||
|
units = db.query(RosterUnit).filter(
|
||||||
|
RosterUnit.device_type == device_type,
|
||||||
|
RosterUnit.retired == False
|
||||||
|
).all()
|
||||||
|
|
||||||
|
# Calculate the date range for 12 months
|
||||||
|
first_date = date(start_year, start_month, 1)
|
||||||
|
# Calculate end date (12 months later)
|
||||||
|
end_year = start_year + 1 if start_month == 1 else start_year
|
||||||
|
end_month = 12 if start_month == 1 else start_month - 1
|
||||||
|
if start_month == 1:
|
||||||
|
end_year = start_year
|
||||||
|
end_month = 12
|
||||||
|
else:
|
||||||
|
# 12 months from start_month means we end at start_month - 1 next year
|
||||||
|
end_year = start_year + 1
|
||||||
|
end_month = start_month - 1
|
||||||
|
|
||||||
|
# Actually, simpler: go 11 months forward from start
|
||||||
|
end_year = start_year + ((start_month + 10) // 12)
|
||||||
|
end_month = ((start_month + 10) % 12) + 1
|
||||||
|
if end_month == 12:
|
||||||
|
last_date = date(end_year, 12, 31)
|
||||||
|
else:
|
||||||
|
last_date = date(end_year, end_month + 1, 1) - timedelta(days=1)
|
||||||
|
|
||||||
|
# Get all reservations that overlap with this 12-month range
|
||||||
|
reservations = db.query(JobReservation).filter(
|
||||||
|
JobReservation.device_type == device_type,
|
||||||
|
JobReservation.start_date <= last_date,
|
||||||
|
or_(
|
||||||
|
JobReservation.end_date >= first_date,
|
||||||
|
JobReservation.end_date == None # TBD reservations
|
||||||
|
)
|
||||||
|
).all()
|
||||||
|
|
||||||
|
# Get all unit assignments for these reservations
|
||||||
|
reservation_ids = [r.id for r in reservations]
|
||||||
|
assignments = db.query(JobReservationUnit).filter(
|
||||||
|
JobReservationUnit.reservation_id.in_(reservation_ids)
|
||||||
|
).all() if reservation_ids else []
|
||||||
|
|
||||||
|
# Build a lookup: unit_id -> list of (start_date, end_date, reservation_name)
|
||||||
|
unit_reservations = {}
|
||||||
|
for res in reservations:
|
||||||
|
res_assignments = [a for a in assignments if a.reservation_id == res.id]
|
||||||
|
for assignment in res_assignments:
|
||||||
|
unit_id = assignment.unit_id
|
||||||
|
start_d = assignment.unit_start_date or res.start_date
|
||||||
|
if assignment.unit_end_tbd or (assignment.unit_end_date is None and res.end_date_tbd):
|
||||||
|
end_d = res.estimated_end_date or date(start_year + 5, 12, 31)
|
||||||
|
else:
|
||||||
|
end_d = assignment.unit_end_date or res.end_date or date(start_year + 5, 12, 31)
|
||||||
|
|
||||||
|
if unit_id not in unit_reservations:
|
||||||
|
unit_reservations[unit_id] = []
|
||||||
|
unit_reservations[unit_id].append((start_d, end_d, res.name))
|
||||||
|
|
||||||
|
# Build set of unit IDs that have an active deployment record (still in the field)
|
||||||
|
unit_ids = [u.id for u in units]
|
||||||
|
active_deployments = db.query(DeploymentRecord.unit_id).filter(
|
||||||
|
DeploymentRecord.unit_id.in_(unit_ids),
|
||||||
|
DeploymentRecord.actual_removal_date == None
|
||||||
|
).all()
|
||||||
|
unit_in_field = {row.unit_id for row in active_deployments}
|
||||||
|
|
||||||
|
# Generate data for each of the 12 months
|
||||||
|
months_data = []
|
||||||
|
current_year = start_year
|
||||||
|
current_month = start_month
|
||||||
|
|
||||||
|
for i in range(12):
|
||||||
|
# Calculate this month's year and month
|
||||||
|
m_year = start_year + ((start_month - 1 + i) // 12)
|
||||||
|
m_month = ((start_month - 1 + i) % 12) + 1
|
||||||
|
|
||||||
|
first_day = date(m_year, m_month, 1)
|
||||||
|
if m_month == 12:
|
||||||
|
last_day = date(m_year, 12, 31)
|
||||||
|
else:
|
||||||
|
last_day = date(m_year, m_month + 1, 1) - timedelta(days=1)
|
||||||
|
|
||||||
|
days_data = {}
|
||||||
|
current_day = first_day
|
||||||
|
|
||||||
|
while current_day <= last_day:
|
||||||
|
available = 0
|
||||||
|
reserved = 0
|
||||||
|
expired = 0
|
||||||
|
expiring_soon = 0
|
||||||
|
needs_cal = 0
|
||||||
|
cal_expiring_on_day = 0
|
||||||
|
cal_expired_on_day = 0
|
||||||
|
|
||||||
|
for unit in units:
|
||||||
|
cal_status = get_calibration_status(unit, current_day, warning_days)
|
||||||
|
|
||||||
|
if unit.last_calibrated:
|
||||||
|
unit_expiry = unit.last_calibrated + timedelta(days=365)
|
||||||
|
if unit_expiry == current_day:
|
||||||
|
cal_expiring_on_day += 1
|
||||||
|
elif unit_expiry == current_day - timedelta(days=1):
|
||||||
|
cal_expired_on_day += 1
|
||||||
|
|
||||||
|
if cal_status == "expired":
|
||||||
|
expired += 1
|
||||||
|
continue
|
||||||
|
if cal_status == "needs_calibration":
|
||||||
|
needs_cal += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
is_reserved = False
|
||||||
|
if unit.id in unit_reservations:
|
||||||
|
for start_d, end_d, _ in unit_reservations[unit.id]:
|
||||||
|
if start_d <= current_day <= end_d:
|
||||||
|
is_reserved = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if is_reserved:
|
||||||
|
reserved += 1
|
||||||
|
else:
|
||||||
|
available += 1
|
||||||
|
|
||||||
|
if cal_status == "expiring_soon":
|
||||||
|
expiring_soon += 1
|
||||||
|
|
||||||
|
days_data[current_day.day] = {
|
||||||
|
"available": available,
|
||||||
|
"reserved": reserved,
|
||||||
|
"expired": expired,
|
||||||
|
"expiring_soon": expiring_soon,
|
||||||
|
"needs_calibration": needs_cal,
|
||||||
|
"cal_expiring_on_day": cal_expiring_on_day,
|
||||||
|
"cal_expired_on_day": cal_expired_on_day
|
||||||
|
}
|
||||||
|
|
||||||
|
current_day += timedelta(days=1)
|
||||||
|
|
||||||
|
months_data.append({
|
||||||
|
"year": m_year,
|
||||||
|
"month": m_month,
|
||||||
|
"name": first_day.strftime("%B"),
|
||||||
|
"short_name": first_day.strftime("%b"),
|
||||||
|
"year_short": first_day.strftime("%y"),
|
||||||
|
"days": days_data,
|
||||||
|
"first_weekday": first_day.weekday(),
|
||||||
|
"num_days": last_day.day
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
"start_year": start_year,
|
||||||
|
"start_month": start_month,
|
||||||
|
"device_type": device_type,
|
||||||
|
"months": months_data,
|
||||||
|
"total_units": len(units)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def check_calibration_conflicts(
|
||||||
|
db: Session,
|
||||||
|
reservation_id: str
|
||||||
|
) -> List[Dict]:
|
||||||
|
"""
|
||||||
|
Check if any units assigned to a reservation will have their
|
||||||
|
calibration expire during the reservation period.
|
||||||
|
|
||||||
|
Returns list of conflicts with unit info and expiry date.
|
||||||
|
"""
|
||||||
|
reservation = db.query(JobReservation).filter_by(id=reservation_id).first()
|
||||||
|
if not reservation:
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Get assigned units
|
||||||
|
assigned = db.query(JobReservationUnit).filter_by(
|
||||||
|
reservation_id=reservation_id
|
||||||
|
).all()
|
||||||
|
|
||||||
|
conflicts = []
|
||||||
|
for assignment in assigned:
|
||||||
|
unit = db.query(RosterUnit).filter_by(id=assignment.unit_id).first()
|
||||||
|
if not unit or not unit.last_calibrated:
|
||||||
|
continue
|
||||||
|
|
||||||
|
expiry_date = unit.last_calibrated + timedelta(days=365)
|
||||||
|
|
||||||
|
# Check if expiry falls within reservation period
|
||||||
|
if reservation.start_date < expiry_date <= reservation.end_date:
|
||||||
|
conflicts.append({
|
||||||
|
"unit_id": unit.id,
|
||||||
|
"last_calibrated": unit.last_calibrated.isoformat(),
|
||||||
|
"expiry_date": expiry_date.isoformat(),
|
||||||
|
"reservation_name": reservation.name,
|
||||||
|
"days_into_job": (expiry_date - reservation.start_date).days
|
||||||
|
})
|
||||||
|
|
||||||
|
return conflicts
|
||||||
|
|
||||||
|
|
||||||
|
def get_available_units_for_period(
|
||||||
|
db: Session,
|
||||||
|
start_date: date,
|
||||||
|
end_date: date,
|
||||||
|
device_type: str = "seismograph",
|
||||||
|
exclude_reservation_id: Optional[str] = None
|
||||||
|
) -> List[Dict]:
|
||||||
|
"""
|
||||||
|
Get units that are available for the entire specified period.
|
||||||
|
|
||||||
|
A unit is available if:
|
||||||
|
- Not retired
|
||||||
|
- Calibration is valid through the end date
|
||||||
|
- Not assigned to any other reservation that overlaps the period
|
||||||
|
"""
|
||||||
|
prefs = db.query(UserPreferences).filter_by(id=1).first()
|
||||||
|
warning_days = prefs.calibration_warning_days if prefs else 30
|
||||||
|
|
||||||
|
units = db.query(RosterUnit).filter(
|
||||||
|
RosterUnit.device_type == device_type,
|
||||||
|
RosterUnit.retired == False
|
||||||
|
).all()
|
||||||
|
|
||||||
|
# Get reservations that overlap with this period
|
||||||
|
overlapping_reservations = db.query(JobReservation).filter(
|
||||||
|
JobReservation.device_type == device_type,
|
||||||
|
JobReservation.start_date <= end_date,
|
||||||
|
JobReservation.end_date >= start_date
|
||||||
|
)
|
||||||
|
|
||||||
|
if exclude_reservation_id:
|
||||||
|
overlapping_reservations = overlapping_reservations.filter(
|
||||||
|
JobReservation.id != exclude_reservation_id
|
||||||
|
)
|
||||||
|
|
||||||
|
overlapping_reservations = overlapping_reservations.all()
|
||||||
|
|
||||||
|
# Get all units assigned to overlapping reservations
|
||||||
|
reserved_unit_ids = set()
|
||||||
|
for res in overlapping_reservations:
|
||||||
|
assigned = db.query(JobReservationUnit).filter_by(
|
||||||
|
reservation_id=res.id
|
||||||
|
).all()
|
||||||
|
for a in assigned:
|
||||||
|
reserved_unit_ids.add(a.unit_id)
|
||||||
|
|
||||||
|
# Get units with active deployment records (still in the field)
|
||||||
|
unit_ids = [u.id for u in units]
|
||||||
|
active_deps = db.query(DeploymentRecord.unit_id).filter(
|
||||||
|
DeploymentRecord.unit_id.in_(unit_ids),
|
||||||
|
DeploymentRecord.actual_removal_date == None
|
||||||
|
).all()
|
||||||
|
in_field_unit_ids = {row.unit_id for row in active_deps}
|
||||||
|
|
||||||
|
available_units = []
|
||||||
|
for unit in units:
|
||||||
|
# Check if already reserved
|
||||||
|
if unit.id in reserved_unit_ids:
|
||||||
|
continue
|
||||||
|
# Check if currently in the field
|
||||||
|
if unit.id in in_field_unit_ids:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if unit.last_calibrated:
|
||||||
|
expiry_date = unit.last_calibrated + timedelta(days=365)
|
||||||
|
cal_status = get_calibration_status(unit, end_date, warning_days)
|
||||||
|
else:
|
||||||
|
expiry_date = None
|
||||||
|
cal_status = "needs_calibration"
|
||||||
|
|
||||||
|
available_units.append({
|
||||||
|
"id": unit.id,
|
||||||
|
"last_calibrated": unit.last_calibrated.isoformat() if unit.last_calibrated else None,
|
||||||
|
"expiry_date": expiry_date.isoformat() if expiry_date else None,
|
||||||
|
"calibration_status": cal_status,
|
||||||
|
"deployed": unit.deployed,
|
||||||
|
"out_for_calibration": unit.out_for_calibration or False,
|
||||||
|
"note": unit.note or ""
|
||||||
|
})
|
||||||
|
|
||||||
|
return available_units
|
||||||
611
backend/services/recurring_schedule_service.py
Normal file
@@ -0,0 +1,611 @@
|
|||||||
|
"""
|
||||||
|
Recurring Schedule Service
|
||||||
|
|
||||||
|
Manages recurring schedule definitions and generates ScheduledAction
|
||||||
|
instances based on patterns (weekly calendar, simple interval).
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import uuid
|
||||||
|
import logging
|
||||||
|
from datetime import datetime, timedelta, date, time
|
||||||
|
from typing import Optional, List, Dict, Any, Tuple
|
||||||
|
from zoneinfo import ZoneInfo
|
||||||
|
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
from sqlalchemy import and_
|
||||||
|
|
||||||
|
from backend.models import RecurringSchedule, ScheduledAction, MonitoringLocation, UnitAssignment, Project
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Day name mapping
|
||||||
|
DAY_NAMES = ["monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"]
|
||||||
|
|
||||||
|
|
||||||
|
class RecurringScheduleService:
|
||||||
|
"""
|
||||||
|
Service for managing recurring schedules and generating ScheduledActions.
|
||||||
|
|
||||||
|
Supports two schedule types:
|
||||||
|
- weekly_calendar: Specific days with start/end times
|
||||||
|
- simple_interval: Daily stop/download/restart cycles for 24/7 monitoring
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, db: Session):
|
||||||
|
self.db = db
|
||||||
|
|
||||||
|
def create_schedule(
|
||||||
|
self,
|
||||||
|
project_id: str,
|
||||||
|
location_id: str,
|
||||||
|
name: str,
|
||||||
|
schedule_type: str,
|
||||||
|
device_type: str = "slm",
|
||||||
|
unit_id: str = None,
|
||||||
|
weekly_pattern: dict = None,
|
||||||
|
interval_type: str = None,
|
||||||
|
cycle_time: str = None,
|
||||||
|
include_download: bool = True,
|
||||||
|
auto_increment_index: bool = True,
|
||||||
|
timezone: str = "America/New_York",
|
||||||
|
start_datetime: datetime = None,
|
||||||
|
end_datetime: datetime = None,
|
||||||
|
) -> RecurringSchedule:
|
||||||
|
"""
|
||||||
|
Create a new recurring schedule.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
project_id: Project ID
|
||||||
|
location_id: Monitoring location ID
|
||||||
|
name: Schedule name
|
||||||
|
schedule_type: "weekly_calendar", "simple_interval", or "one_off"
|
||||||
|
device_type: "slm" or "seismograph"
|
||||||
|
unit_id: Specific unit (optional, can use assignment)
|
||||||
|
weekly_pattern: Dict of day patterns for weekly_calendar
|
||||||
|
interval_type: "daily" or "hourly" for simple_interval
|
||||||
|
cycle_time: Time string "HH:MM" for cycle
|
||||||
|
include_download: Whether to download data on cycle
|
||||||
|
auto_increment_index: Whether to auto-increment store index before start
|
||||||
|
timezone: Timezone for schedule times
|
||||||
|
start_datetime: Start date+time in UTC (one_off only)
|
||||||
|
end_datetime: End date+time in UTC (one_off only)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Created RecurringSchedule
|
||||||
|
"""
|
||||||
|
schedule = RecurringSchedule(
|
||||||
|
id=str(uuid.uuid4()),
|
||||||
|
project_id=project_id,
|
||||||
|
location_id=location_id,
|
||||||
|
unit_id=unit_id,
|
||||||
|
name=name,
|
||||||
|
schedule_type=schedule_type,
|
||||||
|
device_type=device_type,
|
||||||
|
weekly_pattern=json.dumps(weekly_pattern) if weekly_pattern else None,
|
||||||
|
interval_type=interval_type,
|
||||||
|
cycle_time=cycle_time,
|
||||||
|
include_download=include_download,
|
||||||
|
auto_increment_index=auto_increment_index,
|
||||||
|
enabled=True,
|
||||||
|
timezone=timezone,
|
||||||
|
start_datetime=start_datetime,
|
||||||
|
end_datetime=end_datetime,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Calculate next occurrence
|
||||||
|
schedule.next_occurrence = self._calculate_next_occurrence(schedule)
|
||||||
|
|
||||||
|
self.db.add(schedule)
|
||||||
|
self.db.commit()
|
||||||
|
self.db.refresh(schedule)
|
||||||
|
|
||||||
|
logger.info(f"Created recurring schedule: {name} ({schedule_type})")
|
||||||
|
return schedule
|
||||||
|
|
||||||
|
def update_schedule(
|
||||||
|
self,
|
||||||
|
schedule_id: str,
|
||||||
|
**kwargs,
|
||||||
|
) -> Optional[RecurringSchedule]:
|
||||||
|
"""
|
||||||
|
Update a recurring schedule.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
schedule_id: Schedule to update
|
||||||
|
**kwargs: Fields to update
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Updated schedule or None
|
||||||
|
"""
|
||||||
|
schedule = self.db.query(RecurringSchedule).filter_by(id=schedule_id).first()
|
||||||
|
if not schedule:
|
||||||
|
return None
|
||||||
|
|
||||||
|
for key, value in kwargs.items():
|
||||||
|
if hasattr(schedule, key):
|
||||||
|
if key == "weekly_pattern" and isinstance(value, dict):
|
||||||
|
value = json.dumps(value)
|
||||||
|
setattr(schedule, key, value)
|
||||||
|
|
||||||
|
# Recalculate next occurrence
|
||||||
|
schedule.next_occurrence = self._calculate_next_occurrence(schedule)
|
||||||
|
|
||||||
|
self.db.commit()
|
||||||
|
self.db.refresh(schedule)
|
||||||
|
|
||||||
|
logger.info(f"Updated recurring schedule: {schedule.name}")
|
||||||
|
return schedule
|
||||||
|
|
||||||
|
def delete_schedule(self, schedule_id: str) -> bool:
|
||||||
|
"""
|
||||||
|
Delete a recurring schedule and its pending generated actions.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
schedule_id: Schedule to delete
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if deleted, False if not found
|
||||||
|
"""
|
||||||
|
schedule = self.db.query(RecurringSchedule).filter_by(id=schedule_id).first()
|
||||||
|
if not schedule:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Delete pending generated actions for this schedule
|
||||||
|
# The schedule_id is stored in the notes field as JSON
|
||||||
|
pending_actions = self.db.query(ScheduledAction).filter(
|
||||||
|
and_(
|
||||||
|
ScheduledAction.execution_status == "pending",
|
||||||
|
ScheduledAction.notes.like(f'%"schedule_id": "{schedule_id}"%'),
|
||||||
|
)
|
||||||
|
).all()
|
||||||
|
|
||||||
|
deleted_count = len(pending_actions)
|
||||||
|
for action in pending_actions:
|
||||||
|
self.db.delete(action)
|
||||||
|
|
||||||
|
self.db.delete(schedule)
|
||||||
|
self.db.commit()
|
||||||
|
|
||||||
|
logger.info(f"Deleted recurring schedule: {schedule.name} (and {deleted_count} pending actions)")
|
||||||
|
return True
|
||||||
|
|
||||||
|
def enable_schedule(self, schedule_id: str) -> Optional[RecurringSchedule]:
|
||||||
|
"""Enable a disabled schedule."""
|
||||||
|
return self.update_schedule(schedule_id, enabled=True)
|
||||||
|
|
||||||
|
def disable_schedule(self, schedule_id: str) -> Optional[RecurringSchedule]:
|
||||||
|
"""Disable a schedule and cancel its pending actions."""
|
||||||
|
schedule = self.update_schedule(schedule_id, enabled=False)
|
||||||
|
if schedule:
|
||||||
|
# Cancel all pending actions generated by this schedule
|
||||||
|
pending_actions = self.db.query(ScheduledAction).filter(
|
||||||
|
and_(
|
||||||
|
ScheduledAction.execution_status == "pending",
|
||||||
|
ScheduledAction.notes.like(f'%"schedule_id": "{schedule_id}"%'),
|
||||||
|
)
|
||||||
|
).all()
|
||||||
|
|
||||||
|
for action in pending_actions:
|
||||||
|
action.execution_status = "cancelled"
|
||||||
|
|
||||||
|
if pending_actions:
|
||||||
|
self.db.commit()
|
||||||
|
logger.info(f"Cancelled {len(pending_actions)} pending actions for disabled schedule {schedule.name}")
|
||||||
|
|
||||||
|
return schedule
|
||||||
|
|
||||||
|
def generate_actions_for_schedule(
|
||||||
|
self,
|
||||||
|
schedule: RecurringSchedule,
|
||||||
|
horizon_days: int = 7,
|
||||||
|
preview_only: bool = False,
|
||||||
|
) -> List[ScheduledAction]:
|
||||||
|
"""
|
||||||
|
Generate ScheduledAction entries for the next N days based on pattern.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
schedule: The recurring schedule
|
||||||
|
horizon_days: Days ahead to generate
|
||||||
|
preview_only: If True, don't save to DB (for preview)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of generated ScheduledAction instances
|
||||||
|
"""
|
||||||
|
if not schedule.enabled:
|
||||||
|
return []
|
||||||
|
|
||||||
|
if schedule.schedule_type == "weekly_calendar":
|
||||||
|
actions = self._generate_weekly_calendar_actions(schedule, horizon_days)
|
||||||
|
elif schedule.schedule_type == "simple_interval":
|
||||||
|
actions = self._generate_interval_actions(schedule, horizon_days)
|
||||||
|
elif schedule.schedule_type == "one_off":
|
||||||
|
actions = self._generate_one_off_actions(schedule)
|
||||||
|
else:
|
||||||
|
logger.warning(f"Unknown schedule type: {schedule.schedule_type}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
if not preview_only and actions:
|
||||||
|
for action in actions:
|
||||||
|
self.db.add(action)
|
||||||
|
|
||||||
|
schedule.last_generated_at = datetime.utcnow()
|
||||||
|
schedule.next_occurrence = self._calculate_next_occurrence(schedule)
|
||||||
|
|
||||||
|
self.db.commit()
|
||||||
|
logger.info(f"Generated {len(actions)} actions for schedule: {schedule.name}")
|
||||||
|
|
||||||
|
return actions
|
||||||
|
|
||||||
|
def _generate_weekly_calendar_actions(
|
||||||
|
self,
|
||||||
|
schedule: RecurringSchedule,
|
||||||
|
horizon_days: int,
|
||||||
|
) -> List[ScheduledAction]:
|
||||||
|
"""
|
||||||
|
Generate actions from weekly calendar pattern.
|
||||||
|
|
||||||
|
Pattern format:
|
||||||
|
{
|
||||||
|
"monday": {"enabled": true, "start": "19:00", "end": "07:00"},
|
||||||
|
"tuesday": {"enabled": false},
|
||||||
|
...
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
if not schedule.weekly_pattern:
|
||||||
|
return []
|
||||||
|
|
||||||
|
try:
|
||||||
|
pattern = json.loads(schedule.weekly_pattern)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
logger.error(f"Invalid weekly_pattern JSON for schedule {schedule.id}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
actions = []
|
||||||
|
tz = ZoneInfo(schedule.timezone)
|
||||||
|
now_utc = datetime.utcnow()
|
||||||
|
now_local = now_utc.replace(tzinfo=ZoneInfo("UTC")).astimezone(tz)
|
||||||
|
|
||||||
|
# Get unit_id (from schedule or assignment)
|
||||||
|
unit_id = self._resolve_unit_id(schedule)
|
||||||
|
|
||||||
|
for day_offset in range(horizon_days):
|
||||||
|
check_date = now_local.date() + timedelta(days=day_offset)
|
||||||
|
day_name = DAY_NAMES[check_date.weekday()]
|
||||||
|
day_config = pattern.get(day_name, {})
|
||||||
|
|
||||||
|
if not day_config.get("enabled", False):
|
||||||
|
continue
|
||||||
|
|
||||||
|
start_time_str = day_config.get("start")
|
||||||
|
end_time_str = day_config.get("end")
|
||||||
|
|
||||||
|
if not start_time_str or not end_time_str:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Parse times
|
||||||
|
start_time = self._parse_time(start_time_str)
|
||||||
|
end_time = self._parse_time(end_time_str)
|
||||||
|
|
||||||
|
if not start_time or not end_time:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Create start datetime in local timezone
|
||||||
|
start_local = datetime.combine(check_date, start_time, tzinfo=tz)
|
||||||
|
start_utc = start_local.astimezone(ZoneInfo("UTC")).replace(tzinfo=None)
|
||||||
|
|
||||||
|
# Handle overnight schedules (end time is next day)
|
||||||
|
if end_time <= start_time:
|
||||||
|
end_date = check_date + timedelta(days=1)
|
||||||
|
else:
|
||||||
|
end_date = check_date
|
||||||
|
|
||||||
|
end_local = datetime.combine(end_date, end_time, tzinfo=tz)
|
||||||
|
end_utc = end_local.astimezone(ZoneInfo("UTC")).replace(tzinfo=None)
|
||||||
|
|
||||||
|
# Skip if start time has already passed
|
||||||
|
if start_utc <= now_utc:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if action already exists
|
||||||
|
if self._action_exists(schedule.project_id, schedule.location_id, "start", start_utc):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Build notes with automation metadata
|
||||||
|
start_notes = json.dumps({
|
||||||
|
"schedule_name": schedule.name,
|
||||||
|
"schedule_id": schedule.id,
|
||||||
|
"auto_increment_index": schedule.auto_increment_index,
|
||||||
|
})
|
||||||
|
|
||||||
|
# Create START action
|
||||||
|
start_action = ScheduledAction(
|
||||||
|
id=str(uuid.uuid4()),
|
||||||
|
project_id=schedule.project_id,
|
||||||
|
location_id=schedule.location_id,
|
||||||
|
unit_id=unit_id,
|
||||||
|
action_type="start",
|
||||||
|
device_type=schedule.device_type,
|
||||||
|
scheduled_time=start_utc,
|
||||||
|
execution_status="pending",
|
||||||
|
notes=start_notes,
|
||||||
|
)
|
||||||
|
actions.append(start_action)
|
||||||
|
|
||||||
|
# Create STOP action (stop_cycle handles download when include_download is True)
|
||||||
|
stop_notes = json.dumps({
|
||||||
|
"schedule_name": schedule.name,
|
||||||
|
"schedule_id": schedule.id,
|
||||||
|
"schedule_type": "weekly_calendar",
|
||||||
|
"include_download": schedule.include_download,
|
||||||
|
})
|
||||||
|
stop_action = ScheduledAction(
|
||||||
|
id=str(uuid.uuid4()),
|
||||||
|
project_id=schedule.project_id,
|
||||||
|
location_id=schedule.location_id,
|
||||||
|
unit_id=unit_id,
|
||||||
|
action_type="stop",
|
||||||
|
device_type=schedule.device_type,
|
||||||
|
scheduled_time=end_utc,
|
||||||
|
execution_status="pending",
|
||||||
|
notes=stop_notes,
|
||||||
|
)
|
||||||
|
actions.append(stop_action)
|
||||||
|
|
||||||
|
return actions
|
||||||
|
|
||||||
|
def _generate_interval_actions(
|
||||||
|
self,
|
||||||
|
schedule: RecurringSchedule,
|
||||||
|
horizon_days: int,
|
||||||
|
) -> List[ScheduledAction]:
|
||||||
|
"""
|
||||||
|
Generate actions from simple interval pattern.
|
||||||
|
|
||||||
|
For daily cycles: stop, download (optional), start at cycle_time each day.
|
||||||
|
"""
|
||||||
|
if not schedule.cycle_time:
|
||||||
|
return []
|
||||||
|
|
||||||
|
cycle_time = self._parse_time(schedule.cycle_time)
|
||||||
|
if not cycle_time:
|
||||||
|
return []
|
||||||
|
|
||||||
|
actions = []
|
||||||
|
tz = ZoneInfo(schedule.timezone)
|
||||||
|
now_utc = datetime.utcnow()
|
||||||
|
now_local = now_utc.replace(tzinfo=ZoneInfo("UTC")).astimezone(tz)
|
||||||
|
|
||||||
|
# Get unit_id
|
||||||
|
unit_id = self._resolve_unit_id(schedule)
|
||||||
|
|
||||||
|
for day_offset in range(horizon_days):
|
||||||
|
check_date = now_local.date() + timedelta(days=day_offset)
|
||||||
|
|
||||||
|
# Create cycle datetime in local timezone
|
||||||
|
cycle_local = datetime.combine(check_date, cycle_time, tzinfo=tz)
|
||||||
|
cycle_utc = cycle_local.astimezone(ZoneInfo("UTC")).replace(tzinfo=None)
|
||||||
|
|
||||||
|
# Skip if time has passed
|
||||||
|
if cycle_utc <= now_utc:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if cycle action already exists
|
||||||
|
if self._action_exists(schedule.project_id, schedule.location_id, "cycle", cycle_utc):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Build notes with metadata for cycle action
|
||||||
|
cycle_notes = json.dumps({
|
||||||
|
"schedule_name": schedule.name,
|
||||||
|
"schedule_id": schedule.id,
|
||||||
|
"cycle_type": "daily",
|
||||||
|
"include_download": schedule.include_download,
|
||||||
|
"auto_increment_index": schedule.auto_increment_index,
|
||||||
|
})
|
||||||
|
|
||||||
|
# Create single CYCLE action that handles stop -> download -> start
|
||||||
|
# The scheduler's _execute_cycle method handles the full workflow with delays
|
||||||
|
cycle_action = ScheduledAction(
|
||||||
|
id=str(uuid.uuid4()),
|
||||||
|
project_id=schedule.project_id,
|
||||||
|
location_id=schedule.location_id,
|
||||||
|
unit_id=unit_id,
|
||||||
|
action_type="cycle",
|
||||||
|
device_type=schedule.device_type,
|
||||||
|
scheduled_time=cycle_utc,
|
||||||
|
execution_status="pending",
|
||||||
|
notes=cycle_notes,
|
||||||
|
)
|
||||||
|
actions.append(cycle_action)
|
||||||
|
|
||||||
|
return actions
|
||||||
|
|
||||||
|
def _generate_one_off_actions(
|
||||||
|
self,
|
||||||
|
schedule: RecurringSchedule,
|
||||||
|
) -> List[ScheduledAction]:
|
||||||
|
"""
|
||||||
|
Generate start and stop actions for a one-off recording.
|
||||||
|
|
||||||
|
Unlike recurring types, this generates exactly one start and one stop action
|
||||||
|
using the schedule's start_datetime and end_datetime directly.
|
||||||
|
"""
|
||||||
|
if not schedule.start_datetime or not schedule.end_datetime:
|
||||||
|
logger.warning(f"One-off schedule {schedule.id} missing start/end datetime")
|
||||||
|
return []
|
||||||
|
|
||||||
|
actions = []
|
||||||
|
now_utc = datetime.utcnow()
|
||||||
|
unit_id = self._resolve_unit_id(schedule)
|
||||||
|
|
||||||
|
# Skip if end time has already passed
|
||||||
|
if schedule.end_datetime <= now_utc:
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Check if actions already exist for this schedule
|
||||||
|
if self._action_exists(schedule.project_id, schedule.location_id, "start", schedule.start_datetime):
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Create START action (only if start time hasn't passed)
|
||||||
|
if schedule.start_datetime > now_utc:
|
||||||
|
start_notes = json.dumps({
|
||||||
|
"schedule_name": schedule.name,
|
||||||
|
"schedule_id": schedule.id,
|
||||||
|
"schedule_type": "one_off",
|
||||||
|
"auto_increment_index": schedule.auto_increment_index,
|
||||||
|
})
|
||||||
|
|
||||||
|
start_action = ScheduledAction(
|
||||||
|
id=str(uuid.uuid4()),
|
||||||
|
project_id=schedule.project_id,
|
||||||
|
location_id=schedule.location_id,
|
||||||
|
unit_id=unit_id,
|
||||||
|
action_type="start",
|
||||||
|
device_type=schedule.device_type,
|
||||||
|
scheduled_time=schedule.start_datetime,
|
||||||
|
execution_status="pending",
|
||||||
|
notes=start_notes,
|
||||||
|
)
|
||||||
|
actions.append(start_action)
|
||||||
|
|
||||||
|
# Create STOP action
|
||||||
|
stop_notes = json.dumps({
|
||||||
|
"schedule_name": schedule.name,
|
||||||
|
"schedule_id": schedule.id,
|
||||||
|
"schedule_type": "one_off",
|
||||||
|
"include_download": schedule.include_download,
|
||||||
|
})
|
||||||
|
|
||||||
|
stop_action = ScheduledAction(
|
||||||
|
id=str(uuid.uuid4()),
|
||||||
|
project_id=schedule.project_id,
|
||||||
|
location_id=schedule.location_id,
|
||||||
|
unit_id=unit_id,
|
||||||
|
action_type="stop",
|
||||||
|
device_type=schedule.device_type,
|
||||||
|
scheduled_time=schedule.end_datetime,
|
||||||
|
execution_status="pending",
|
||||||
|
notes=stop_notes,
|
||||||
|
)
|
||||||
|
actions.append(stop_action)
|
||||||
|
|
||||||
|
return actions
|
||||||
|
|
||||||
|
def _calculate_next_occurrence(self, schedule: RecurringSchedule) -> Optional[datetime]:
|
||||||
|
"""Calculate when the next action should occur."""
|
||||||
|
if not schedule.enabled:
|
||||||
|
return None
|
||||||
|
|
||||||
|
tz = ZoneInfo(schedule.timezone)
|
||||||
|
now_utc = datetime.utcnow()
|
||||||
|
now_local = now_utc.replace(tzinfo=ZoneInfo("UTC")).astimezone(tz)
|
||||||
|
|
||||||
|
if schedule.schedule_type == "weekly_calendar" and schedule.weekly_pattern:
|
||||||
|
try:
|
||||||
|
pattern = json.loads(schedule.weekly_pattern)
|
||||||
|
except:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Find next enabled day
|
||||||
|
for day_offset in range(8): # Check up to a week ahead
|
||||||
|
check_date = now_local.date() + timedelta(days=day_offset)
|
||||||
|
day_name = DAY_NAMES[check_date.weekday()]
|
||||||
|
day_config = pattern.get(day_name, {})
|
||||||
|
|
||||||
|
if day_config.get("enabled") and day_config.get("start"):
|
||||||
|
start_time = self._parse_time(day_config["start"])
|
||||||
|
if start_time:
|
||||||
|
start_local = datetime.combine(check_date, start_time, tzinfo=tz)
|
||||||
|
start_utc = start_local.astimezone(ZoneInfo("UTC")).replace(tzinfo=None)
|
||||||
|
if start_utc > now_utc:
|
||||||
|
return start_utc
|
||||||
|
|
||||||
|
elif schedule.schedule_type == "simple_interval" and schedule.cycle_time:
|
||||||
|
cycle_time = self._parse_time(schedule.cycle_time)
|
||||||
|
if cycle_time:
|
||||||
|
# Find next cycle time
|
||||||
|
for day_offset in range(2):
|
||||||
|
check_date = now_local.date() + timedelta(days=day_offset)
|
||||||
|
cycle_local = datetime.combine(check_date, cycle_time, tzinfo=tz)
|
||||||
|
cycle_utc = cycle_local.astimezone(ZoneInfo("UTC")).replace(tzinfo=None)
|
||||||
|
if cycle_utc > now_utc:
|
||||||
|
return cycle_utc
|
||||||
|
|
||||||
|
elif schedule.schedule_type == "one_off":
|
||||||
|
if schedule.start_datetime and schedule.start_datetime > now_utc:
|
||||||
|
return schedule.start_datetime
|
||||||
|
elif schedule.end_datetime and schedule.end_datetime > now_utc:
|
||||||
|
return schedule.end_datetime
|
||||||
|
return None
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _resolve_unit_id(self, schedule: RecurringSchedule) -> Optional[str]:
|
||||||
|
"""Get unit_id from schedule or active assignment."""
|
||||||
|
if schedule.unit_id:
|
||||||
|
return schedule.unit_id
|
||||||
|
|
||||||
|
# Try to get from active assignment
|
||||||
|
assignment = self.db.query(UnitAssignment).filter(
|
||||||
|
and_(
|
||||||
|
UnitAssignment.location_id == schedule.location_id,
|
||||||
|
UnitAssignment.status == "active",
|
||||||
|
)
|
||||||
|
).first()
|
||||||
|
|
||||||
|
return assignment.unit_id if assignment else None
|
||||||
|
|
||||||
|
def _action_exists(
|
||||||
|
self,
|
||||||
|
project_id: str,
|
||||||
|
location_id: str,
|
||||||
|
action_type: str,
|
||||||
|
scheduled_time: datetime,
|
||||||
|
) -> bool:
|
||||||
|
"""Check if an action already exists for this time slot."""
|
||||||
|
# Allow 5-minute window for duplicate detection
|
||||||
|
time_window_start = scheduled_time - timedelta(minutes=5)
|
||||||
|
time_window_end = scheduled_time + timedelta(minutes=5)
|
||||||
|
|
||||||
|
exists = self.db.query(ScheduledAction).filter(
|
||||||
|
and_(
|
||||||
|
ScheduledAction.project_id == project_id,
|
||||||
|
ScheduledAction.location_id == location_id,
|
||||||
|
ScheduledAction.action_type == action_type,
|
||||||
|
ScheduledAction.scheduled_time >= time_window_start,
|
||||||
|
ScheduledAction.scheduled_time <= time_window_end,
|
||||||
|
ScheduledAction.execution_status == "pending",
|
||||||
|
)
|
||||||
|
).first()
|
||||||
|
|
||||||
|
return exists is not None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_time(time_str: str) -> Optional[time]:
|
||||||
|
"""Parse time string "HH:MM" to time object."""
|
||||||
|
try:
|
||||||
|
parts = time_str.split(":")
|
||||||
|
return time(int(parts[0]), int(parts[1]))
|
||||||
|
except (ValueError, IndexError):
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_schedules_for_project(self, project_id: str) -> List[RecurringSchedule]:
|
||||||
|
"""Get all recurring schedules for a project."""
|
||||||
|
return self.db.query(RecurringSchedule).filter_by(project_id=project_id).all()
|
||||||
|
|
||||||
|
def get_enabled_schedules(self) -> List[RecurringSchedule]:
|
||||||
|
"""Get all enabled recurring schedules for projects that are not on hold or deleted."""
|
||||||
|
active_project_ids = [
|
||||||
|
p.id for p in self.db.query(Project.id).filter(
|
||||||
|
Project.status.notin_(["on_hold", "archived", "deleted"])
|
||||||
|
).all()
|
||||||
|
]
|
||||||
|
return self.db.query(RecurringSchedule).filter(
|
||||||
|
RecurringSchedule.enabled == True,
|
||||||
|
RecurringSchedule.project_id.in_(active_project_ids),
|
||||||
|
).all()
|
||||||
|
|
||||||
|
|
||||||
|
def get_recurring_schedule_service(db: Session) -> RecurringScheduleService:
|
||||||
|
"""Get a RecurringScheduleService instance."""
|
||||||
|
return RecurringScheduleService(db)
|
||||||
842
backend/services/scheduler.py
Normal file
@@ -0,0 +1,842 @@
|
|||||||
|
"""
|
||||||
|
Scheduler Service
|
||||||
|
|
||||||
|
Executes scheduled actions for Projects system.
|
||||||
|
Monitors pending scheduled actions and executes them by calling device modules (SLMM/SFM).
|
||||||
|
|
||||||
|
Extended to support recurring schedules:
|
||||||
|
- Generates ScheduledActions from RecurringSchedule patterns
|
||||||
|
- Cleans up old completed/failed actions
|
||||||
|
|
||||||
|
This service runs as a background task in FastAPI, checking for pending actions
|
||||||
|
every minute and executing them when their scheduled time arrives.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from typing import Optional, List, Dict, Any
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
from sqlalchemy import and_
|
||||||
|
|
||||||
|
from backend.database import SessionLocal
|
||||||
|
from backend.models import ScheduledAction, MonitoringSession, MonitoringLocation, Project, RecurringSchedule
|
||||||
|
from backend.services.device_controller import get_device_controller, DeviceControllerError
|
||||||
|
from backend.services.alert_service import get_alert_service
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class SchedulerService:
|
||||||
|
"""
|
||||||
|
Service for executing scheduled actions.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
scheduler = SchedulerService()
|
||||||
|
await scheduler.start() # Start background loop
|
||||||
|
scheduler.stop() # Stop background loop
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, check_interval: int = 60):
|
||||||
|
"""
|
||||||
|
Initialize scheduler.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
check_interval: Seconds between checks for pending actions (default: 60)
|
||||||
|
"""
|
||||||
|
self.check_interval = check_interval
|
||||||
|
self.running = False
|
||||||
|
self.task: Optional[asyncio.Task] = None
|
||||||
|
self.device_controller = get_device_controller()
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
"""Start the scheduler background task."""
|
||||||
|
if self.running:
|
||||||
|
print("Scheduler is already running")
|
||||||
|
return
|
||||||
|
|
||||||
|
self.running = True
|
||||||
|
self.task = asyncio.create_task(self._run_loop())
|
||||||
|
print(f"Scheduler started (checking every {self.check_interval}s)")
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
"""Stop the scheduler background task."""
|
||||||
|
self.running = False
|
||||||
|
if self.task:
|
||||||
|
self.task.cancel()
|
||||||
|
print("Scheduler stopped")
|
||||||
|
|
||||||
|
async def _run_loop(self):
|
||||||
|
"""Main scheduler loop."""
|
||||||
|
# Track when we last generated recurring actions (do this once per hour)
|
||||||
|
last_generation_check = datetime.utcnow() - timedelta(hours=1)
|
||||||
|
|
||||||
|
while self.running:
|
||||||
|
try:
|
||||||
|
# Execute pending actions
|
||||||
|
await self.execute_pending_actions()
|
||||||
|
|
||||||
|
# Generate actions from recurring schedules (every hour)
|
||||||
|
now = datetime.utcnow()
|
||||||
|
if (now - last_generation_check).total_seconds() >= 3600:
|
||||||
|
await self.generate_recurring_actions()
|
||||||
|
last_generation_check = now
|
||||||
|
|
||||||
|
# Cleanup old actions (also every hour, during generation cycle)
|
||||||
|
if (now - last_generation_check).total_seconds() < 60:
|
||||||
|
await self.cleanup_old_actions()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Scheduler error: {e}", exc_info=True)
|
||||||
|
# Continue running even if there's an error
|
||||||
|
|
||||||
|
await asyncio.sleep(self.check_interval)
|
||||||
|
|
||||||
|
async def execute_pending_actions(self) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Find and execute all pending scheduled actions that are due.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of execution results
|
||||||
|
"""
|
||||||
|
db = SessionLocal()
|
||||||
|
results = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Find pending actions that are due
|
||||||
|
now = datetime.utcnow()
|
||||||
|
|
||||||
|
# Only execute actions for active/completed projects (not on_hold, archived, or deleted)
|
||||||
|
active_project_ids = [
|
||||||
|
p.id for p in db.query(Project.id).filter(
|
||||||
|
Project.status.notin_(["on_hold", "archived", "deleted"])
|
||||||
|
).all()
|
||||||
|
]
|
||||||
|
|
||||||
|
pending_actions = db.query(ScheduledAction).filter(
|
||||||
|
and_(
|
||||||
|
ScheduledAction.execution_status == "pending",
|
||||||
|
ScheduledAction.scheduled_time <= now,
|
||||||
|
ScheduledAction.project_id.in_(active_project_ids),
|
||||||
|
)
|
||||||
|
).order_by(ScheduledAction.scheduled_time).all()
|
||||||
|
|
||||||
|
if not pending_actions:
|
||||||
|
return []
|
||||||
|
|
||||||
|
print(f"Found {len(pending_actions)} pending action(s) to execute")
|
||||||
|
|
||||||
|
for action in pending_actions:
|
||||||
|
result = await self._execute_action(action, db)
|
||||||
|
results.append(result)
|
||||||
|
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error executing pending actions: {e}")
|
||||||
|
db.rollback()
|
||||||
|
finally:
|
||||||
|
db.close()
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
async def _execute_action(
|
||||||
|
self,
|
||||||
|
action: ScheduledAction,
|
||||||
|
db: Session,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Execute a single scheduled action.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
action: ScheduledAction to execute
|
||||||
|
db: Database session
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Execution result dict
|
||||||
|
"""
|
||||||
|
print(f"Executing action {action.id}: {action.action_type} for unit {action.unit_id}")
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"action_id": action.id,
|
||||||
|
"action_type": action.action_type,
|
||||||
|
"unit_id": action.unit_id,
|
||||||
|
"scheduled_time": action.scheduled_time.isoformat(),
|
||||||
|
"success": False,
|
||||||
|
"error": None,
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Determine which unit to use
|
||||||
|
# If unit_id is specified, use it; otherwise get from location assignment
|
||||||
|
unit_id = action.unit_id
|
||||||
|
if not unit_id:
|
||||||
|
# Get assigned unit from location
|
||||||
|
from backend.models import UnitAssignment
|
||||||
|
assignment = db.query(UnitAssignment).filter(
|
||||||
|
and_(
|
||||||
|
UnitAssignment.location_id == action.location_id,
|
||||||
|
UnitAssignment.status == "active",
|
||||||
|
)
|
||||||
|
).first()
|
||||||
|
|
||||||
|
if not assignment:
|
||||||
|
raise Exception(f"No active unit assigned to location {action.location_id}")
|
||||||
|
|
||||||
|
unit_id = assignment.unit_id
|
||||||
|
|
||||||
|
# Execute the action based on type
|
||||||
|
if action.action_type == "start":
|
||||||
|
response = await self._execute_start(action, unit_id, db)
|
||||||
|
elif action.action_type == "stop":
|
||||||
|
response = await self._execute_stop(action, unit_id, db)
|
||||||
|
elif action.action_type == "download":
|
||||||
|
response = await self._execute_download(action, unit_id, db)
|
||||||
|
elif action.action_type == "cycle":
|
||||||
|
response = await self._execute_cycle(action, unit_id, db)
|
||||||
|
else:
|
||||||
|
raise Exception(f"Unknown action type: {action.action_type}")
|
||||||
|
|
||||||
|
# Mark action as completed
|
||||||
|
action.execution_status = "completed"
|
||||||
|
action.executed_at = datetime.utcnow()
|
||||||
|
action.module_response = json.dumps(response)
|
||||||
|
|
||||||
|
result["success"] = True
|
||||||
|
result["response"] = response
|
||||||
|
|
||||||
|
print(f"✓ Action {action.id} completed successfully")
|
||||||
|
|
||||||
|
# Create success alert
|
||||||
|
try:
|
||||||
|
alert_service = get_alert_service(db)
|
||||||
|
alert_metadata = response.get("cycle_response", {}) if isinstance(response, dict) else {}
|
||||||
|
alert_service.create_schedule_completed_alert(
|
||||||
|
schedule_id=action.id,
|
||||||
|
action_type=action.action_type,
|
||||||
|
unit_id=unit_id,
|
||||||
|
project_id=action.project_id,
|
||||||
|
location_id=action.location_id,
|
||||||
|
metadata=alert_metadata,
|
||||||
|
)
|
||||||
|
except Exception as alert_err:
|
||||||
|
logger.warning(f"Failed to create success alert: {alert_err}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
# Mark action as failed
|
||||||
|
action.execution_status = "failed"
|
||||||
|
action.executed_at = datetime.utcnow()
|
||||||
|
action.error_message = str(e)
|
||||||
|
|
||||||
|
result["error"] = str(e)
|
||||||
|
|
||||||
|
print(f"✗ Action {action.id} failed: {e}")
|
||||||
|
|
||||||
|
# Create failure alert
|
||||||
|
try:
|
||||||
|
alert_service = get_alert_service(db)
|
||||||
|
alert_service.create_schedule_failed_alert(
|
||||||
|
schedule_id=action.id,
|
||||||
|
action_type=action.action_type,
|
||||||
|
unit_id=unit_id if 'unit_id' in dir() else action.unit_id,
|
||||||
|
error_message=str(e),
|
||||||
|
project_id=action.project_id,
|
||||||
|
location_id=action.location_id,
|
||||||
|
)
|
||||||
|
except Exception as alert_err:
|
||||||
|
logger.warning(f"Failed to create failure alert: {alert_err}")
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
async def _execute_start(
|
||||||
|
self,
|
||||||
|
action: ScheduledAction,
|
||||||
|
unit_id: str,
|
||||||
|
db: Session,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Execute a 'start' action using the start_cycle command.
|
||||||
|
|
||||||
|
start_cycle handles:
|
||||||
|
1. Sync device clock to server time
|
||||||
|
2. Find next safe index (with overwrite protection)
|
||||||
|
3. Start measurement
|
||||||
|
"""
|
||||||
|
# Execute the full start cycle via device controller
|
||||||
|
# SLMM handles clock sync, index increment, and start
|
||||||
|
cycle_response = await self.device_controller.start_cycle(
|
||||||
|
unit_id,
|
||||||
|
action.device_type,
|
||||||
|
sync_clock=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create recording session
|
||||||
|
session = MonitoringSession(
|
||||||
|
id=str(uuid.uuid4()),
|
||||||
|
project_id=action.project_id,
|
||||||
|
location_id=action.location_id,
|
||||||
|
unit_id=unit_id,
|
||||||
|
session_type="sound" if action.device_type == "slm" else "vibration",
|
||||||
|
started_at=datetime.utcnow(),
|
||||||
|
status="recording",
|
||||||
|
session_metadata=json.dumps({
|
||||||
|
"scheduled_action_id": action.id,
|
||||||
|
"cycle_response": cycle_response,
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
db.add(session)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"status": "started",
|
||||||
|
"session_id": session.id,
|
||||||
|
"cycle_response": cycle_response,
|
||||||
|
}
|
||||||
|
|
||||||
|
async def _execute_stop(
|
||||||
|
self,
|
||||||
|
action: ScheduledAction,
|
||||||
|
unit_id: str,
|
||||||
|
db: Session,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Execute a 'stop' action using the stop_cycle command.
|
||||||
|
|
||||||
|
stop_cycle handles:
|
||||||
|
1. Stop measurement
|
||||||
|
2. Enable FTP
|
||||||
|
3. Download measurement folder to SLMM local storage
|
||||||
|
|
||||||
|
After stop_cycle, if download succeeded, this method fetches the ZIP
|
||||||
|
from SLMM and extracts it into Terra-View's project directory, creating
|
||||||
|
DataFile records for each file.
|
||||||
|
"""
|
||||||
|
import hashlib
|
||||||
|
import io
|
||||||
|
import os
|
||||||
|
import zipfile
|
||||||
|
import httpx
|
||||||
|
from pathlib import Path
|
||||||
|
from backend.models import DataFile
|
||||||
|
|
||||||
|
# Parse notes for download preference
|
||||||
|
include_download = True
|
||||||
|
try:
|
||||||
|
if action.notes:
|
||||||
|
notes_data = json.loads(action.notes)
|
||||||
|
include_download = notes_data.get("include_download", True)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
pass # Notes is plain text, not JSON
|
||||||
|
|
||||||
|
# Execute the full stop cycle via device controller
|
||||||
|
# SLMM handles stop, FTP enable, and download to SLMM-local storage
|
||||||
|
cycle_response = await self.device_controller.stop_cycle(
|
||||||
|
unit_id,
|
||||||
|
action.device_type,
|
||||||
|
download=include_download,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Find and update the active recording session
|
||||||
|
active_session = db.query(MonitoringSession).filter(
|
||||||
|
and_(
|
||||||
|
MonitoringSession.location_id == action.location_id,
|
||||||
|
MonitoringSession.unit_id == unit_id,
|
||||||
|
MonitoringSession.status == "recording",
|
||||||
|
)
|
||||||
|
).first()
|
||||||
|
|
||||||
|
if active_session:
|
||||||
|
active_session.stopped_at = datetime.utcnow()
|
||||||
|
active_session.status = "completed"
|
||||||
|
active_session.duration_seconds = int(
|
||||||
|
(active_session.stopped_at - active_session.started_at).total_seconds()
|
||||||
|
)
|
||||||
|
# Store download info in session metadata
|
||||||
|
if cycle_response.get("download_success"):
|
||||||
|
try:
|
||||||
|
metadata = json.loads(active_session.session_metadata or "{}")
|
||||||
|
metadata["downloaded_folder"] = cycle_response.get("downloaded_folder")
|
||||||
|
metadata["local_path"] = cycle_response.get("local_path")
|
||||||
|
active_session.session_metadata = json.dumps(metadata)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
# If SLMM downloaded the folder successfully, fetch the ZIP from SLMM
|
||||||
|
# and extract it into Terra-View's project directory, creating DataFile records
|
||||||
|
files_created = 0
|
||||||
|
if include_download and cycle_response.get("download_success") and active_session:
|
||||||
|
folder_name = cycle_response.get("downloaded_folder") # e.g. "Auto_0058"
|
||||||
|
remote_path = f"/NL-43/{folder_name}"
|
||||||
|
|
||||||
|
try:
|
||||||
|
SLMM_BASE_URL = os.getenv("SLMM_BASE_URL", "http://localhost:8100")
|
||||||
|
async with httpx.AsyncClient(timeout=600.0) as client:
|
||||||
|
zip_response = await client.post(
|
||||||
|
f"{SLMM_BASE_URL}/api/nl43/{unit_id}/ftp/download-folder",
|
||||||
|
json={"remote_path": remote_path}
|
||||||
|
)
|
||||||
|
|
||||||
|
if zip_response.is_success and len(zip_response.content) > 22:
|
||||||
|
base_dir = Path(f"data/Projects/{action.project_id}/{active_session.id}/{folder_name}")
|
||||||
|
base_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
file_type_map = {
|
||||||
|
'.wav': 'audio', '.mp3': 'audio',
|
||||||
|
'.csv': 'data', '.txt': 'data', '.json': 'data', '.dat': 'data',
|
||||||
|
'.rnd': 'data', '.rnh': 'data',
|
||||||
|
'.log': 'log',
|
||||||
|
'.zip': 'archive',
|
||||||
|
'.jpg': 'image', '.jpeg': 'image', '.png': 'image',
|
||||||
|
'.pdf': 'document',
|
||||||
|
}
|
||||||
|
|
||||||
|
with zipfile.ZipFile(io.BytesIO(zip_response.content)) as zf:
|
||||||
|
for zip_info in zf.filelist:
|
||||||
|
if zip_info.is_dir():
|
||||||
|
continue
|
||||||
|
file_data = zf.read(zip_info.filename)
|
||||||
|
file_path = base_dir / zip_info.filename
|
||||||
|
file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(file_path, 'wb') as f:
|
||||||
|
f.write(file_data)
|
||||||
|
checksum = hashlib.sha256(file_data).hexdigest()
|
||||||
|
ext = os.path.splitext(zip_info.filename)[1].lower()
|
||||||
|
data_file = DataFile(
|
||||||
|
id=str(uuid.uuid4()),
|
||||||
|
session_id=active_session.id,
|
||||||
|
file_path=str(file_path.relative_to("data")),
|
||||||
|
file_type=file_type_map.get(ext, 'data'),
|
||||||
|
file_size_bytes=len(file_data),
|
||||||
|
downloaded_at=datetime.utcnow(),
|
||||||
|
checksum=checksum,
|
||||||
|
file_metadata=json.dumps({
|
||||||
|
"source": "stop_cycle",
|
||||||
|
"remote_path": remote_path,
|
||||||
|
"unit_id": unit_id,
|
||||||
|
"folder_name": folder_name,
|
||||||
|
"relative_path": zip_info.filename,
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
db.add(data_file)
|
||||||
|
files_created += 1
|
||||||
|
|
||||||
|
db.commit()
|
||||||
|
logger.info(f"Created {files_created} DataFile records for session {active_session.id} from {folder_name}")
|
||||||
|
else:
|
||||||
|
logger.warning(f"ZIP from SLMM for {folder_name} was empty or failed, skipping DataFile creation")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to extract ZIP and create DataFile records for {folder_name}: {e}")
|
||||||
|
# Don't fail the stop action — the device was stopped successfully
|
||||||
|
|
||||||
|
return {
|
||||||
|
"status": "stopped",
|
||||||
|
"session_id": active_session.id if active_session else None,
|
||||||
|
"cycle_response": cycle_response,
|
||||||
|
"files_created": files_created,
|
||||||
|
}
|
||||||
|
|
||||||
|
async def _execute_download(
|
||||||
|
self,
|
||||||
|
action: ScheduledAction,
|
||||||
|
unit_id: str,
|
||||||
|
db: Session,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Execute a 'download' action.
|
||||||
|
|
||||||
|
This handles standalone download actions (not part of stop_cycle).
|
||||||
|
The workflow is:
|
||||||
|
1. Enable FTP on device
|
||||||
|
2. Download current measurement folder
|
||||||
|
3. (Optionally disable FTP - left enabled for now)
|
||||||
|
"""
|
||||||
|
# Get project and location info for file path
|
||||||
|
location = db.query(MonitoringLocation).filter_by(id=action.location_id).first()
|
||||||
|
project = db.query(Project).filter_by(id=action.project_id).first()
|
||||||
|
|
||||||
|
if not location or not project:
|
||||||
|
raise Exception("Project or location not found")
|
||||||
|
|
||||||
|
# Build destination path (for logging/metadata reference)
|
||||||
|
# Actual download location is managed by SLMM (data/downloads/{unit_id}/)
|
||||||
|
session_timestamp = datetime.utcnow().strftime("%Y-%m-%d-%H%M")
|
||||||
|
location_type_dir = "sound" if action.device_type == "slm" else "vibration"
|
||||||
|
|
||||||
|
destination_path = (
|
||||||
|
f"data/Projects/{project.id}/{location_type_dir}/"
|
||||||
|
f"{location.name}/session-{session_timestamp}/"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Step 1: Disable FTP first to reset any stale connection state
|
||||||
|
# Then enable FTP on device
|
||||||
|
logger.info(f"Resetting FTP on {unit_id} for download (disable then enable)")
|
||||||
|
try:
|
||||||
|
await self.device_controller.disable_ftp(unit_id, action.device_type)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"FTP disable failed (may already be off): {e}")
|
||||||
|
await self.device_controller.enable_ftp(unit_id, action.device_type)
|
||||||
|
|
||||||
|
# Step 2: Download current measurement folder
|
||||||
|
# The slmm_client.download_files() now automatically determines the correct
|
||||||
|
# folder based on the device's current index number
|
||||||
|
response = await self.device_controller.download_files(
|
||||||
|
unit_id,
|
||||||
|
action.device_type,
|
||||||
|
destination_path,
|
||||||
|
files=None, # Download all files in current measurement folder
|
||||||
|
)
|
||||||
|
|
||||||
|
# TODO: Create DataFile records for downloaded files
|
||||||
|
|
||||||
|
return {
|
||||||
|
"status": "downloaded",
|
||||||
|
"destination_path": destination_path,
|
||||||
|
"device_response": response,
|
||||||
|
}
|
||||||
|
|
||||||
|
async def _execute_cycle(
|
||||||
|
self,
|
||||||
|
action: ScheduledAction,
|
||||||
|
unit_id: str,
|
||||||
|
db: Session,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Execute a full 'cycle' action: stop -> download -> start.
|
||||||
|
|
||||||
|
This combines stop, download, and start into a single action with
|
||||||
|
appropriate delays between steps to ensure device stability.
|
||||||
|
|
||||||
|
Workflow:
|
||||||
|
0. Pause background polling to prevent command conflicts
|
||||||
|
1. Stop measurement (wait 10s)
|
||||||
|
2. Disable FTP to reset state (wait 10s)
|
||||||
|
3. Enable FTP (wait 10s)
|
||||||
|
4. Download current measurement folder
|
||||||
|
5. Wait 30s for device to settle
|
||||||
|
6. Start new measurement cycle
|
||||||
|
7. Re-enable background polling
|
||||||
|
|
||||||
|
Total time: ~70-90 seconds depending on download size
|
||||||
|
"""
|
||||||
|
logger.info(f"[CYCLE] === Starting full cycle for {unit_id} ===")
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"status": "cycle_complete",
|
||||||
|
"steps": {},
|
||||||
|
"old_session_id": None,
|
||||||
|
"new_session_id": None,
|
||||||
|
"polling_paused": False,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Step 0: Pause background polling for this device to prevent command conflicts
|
||||||
|
# NL-43 devices only support one TCP connection at a time
|
||||||
|
logger.info(f"[CYCLE] Step 0: Pausing background polling for {unit_id}")
|
||||||
|
polling_was_enabled = False
|
||||||
|
try:
|
||||||
|
if action.device_type == "slm":
|
||||||
|
# Get current polling state to restore later
|
||||||
|
from backend.services.slmm_client import get_slmm_client
|
||||||
|
slmm = get_slmm_client()
|
||||||
|
try:
|
||||||
|
polling_config = await slmm.get_device_polling_config(unit_id)
|
||||||
|
polling_was_enabled = polling_config.get("poll_enabled", False)
|
||||||
|
except Exception:
|
||||||
|
polling_was_enabled = True # Assume enabled if can't check
|
||||||
|
|
||||||
|
# Disable polling during cycle
|
||||||
|
await slmm.update_device_polling_config(unit_id, poll_enabled=False)
|
||||||
|
result["polling_paused"] = True
|
||||||
|
logger.info(f"[CYCLE] Background polling paused for {unit_id}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"[CYCLE] Failed to pause polling (continuing anyway): {e}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Step 1: Stop measurement
|
||||||
|
logger.info(f"[CYCLE] Step 1/7: Stopping measurement on {unit_id}")
|
||||||
|
try:
|
||||||
|
stop_response = await self.device_controller.stop_recording(unit_id, action.device_type)
|
||||||
|
result["steps"]["stop"] = {"success": True, "response": stop_response}
|
||||||
|
logger.info(f"[CYCLE] Measurement stopped, waiting 10s...")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"[CYCLE] Stop failed (may already be stopped): {e}")
|
||||||
|
result["steps"]["stop"] = {"success": False, "error": str(e)}
|
||||||
|
|
||||||
|
await asyncio.sleep(10)
|
||||||
|
|
||||||
|
# Step 2: Disable FTP to reset any stale state
|
||||||
|
logger.info(f"[CYCLE] Step 2/7: Disabling FTP on {unit_id}")
|
||||||
|
try:
|
||||||
|
await self.device_controller.disable_ftp(unit_id, action.device_type)
|
||||||
|
result["steps"]["ftp_disable"] = {"success": True}
|
||||||
|
logger.info(f"[CYCLE] FTP disabled, waiting 10s...")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"[CYCLE] FTP disable failed (may already be off): {e}")
|
||||||
|
result["steps"]["ftp_disable"] = {"success": False, "error": str(e)}
|
||||||
|
|
||||||
|
await asyncio.sleep(10)
|
||||||
|
|
||||||
|
# Step 3: Enable FTP
|
||||||
|
logger.info(f"[CYCLE] Step 3/7: Enabling FTP on {unit_id}")
|
||||||
|
try:
|
||||||
|
await self.device_controller.enable_ftp(unit_id, action.device_type)
|
||||||
|
result["steps"]["ftp_enable"] = {"success": True}
|
||||||
|
logger.info(f"[CYCLE] FTP enabled, waiting 10s...")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"[CYCLE] FTP enable failed: {e}")
|
||||||
|
result["steps"]["ftp_enable"] = {"success": False, "error": str(e)}
|
||||||
|
# Continue anyway - download will fail but we can still try to start
|
||||||
|
|
||||||
|
await asyncio.sleep(10)
|
||||||
|
|
||||||
|
# Step 4: Download current measurement folder
|
||||||
|
logger.info(f"[CYCLE] Step 4/7: Downloading measurement data from {unit_id}")
|
||||||
|
location = db.query(MonitoringLocation).filter_by(id=action.location_id).first()
|
||||||
|
project = db.query(Project).filter_by(id=action.project_id).first()
|
||||||
|
|
||||||
|
if location and project:
|
||||||
|
session_timestamp = datetime.utcnow().strftime("%Y-%m-%d-%H%M")
|
||||||
|
location_type_dir = "sound" if action.device_type == "slm" else "vibration"
|
||||||
|
destination_path = (
|
||||||
|
f"data/Projects/{project.id}/{location_type_dir}/"
|
||||||
|
f"{location.name}/session-{session_timestamp}/"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
download_response = await self.device_controller.download_files(
|
||||||
|
unit_id,
|
||||||
|
action.device_type,
|
||||||
|
destination_path,
|
||||||
|
files=None,
|
||||||
|
)
|
||||||
|
result["steps"]["download"] = {"success": True, "response": download_response}
|
||||||
|
logger.info(f"[CYCLE] Download complete")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"[CYCLE] Download failed: {e}")
|
||||||
|
result["steps"]["download"] = {"success": False, "error": str(e)}
|
||||||
|
else:
|
||||||
|
result["steps"]["download"] = {"success": False, "error": "Project or location not found"}
|
||||||
|
|
||||||
|
# Close out the old recording session
|
||||||
|
active_session = db.query(MonitoringSession).filter(
|
||||||
|
and_(
|
||||||
|
MonitoringSession.location_id == action.location_id,
|
||||||
|
MonitoringSession.unit_id == unit_id,
|
||||||
|
MonitoringSession.status == "recording",
|
||||||
|
)
|
||||||
|
).first()
|
||||||
|
|
||||||
|
if active_session:
|
||||||
|
active_session.stopped_at = datetime.utcnow()
|
||||||
|
active_session.status = "completed"
|
||||||
|
active_session.duration_seconds = int(
|
||||||
|
(active_session.stopped_at - active_session.started_at).total_seconds()
|
||||||
|
)
|
||||||
|
result["old_session_id"] = active_session.id
|
||||||
|
|
||||||
|
# Step 5: Wait for device to settle before starting new measurement
|
||||||
|
logger.info(f"[CYCLE] Step 5/7: Waiting 30s for device to settle...")
|
||||||
|
await asyncio.sleep(30)
|
||||||
|
|
||||||
|
# Step 6: Start new measurement cycle
|
||||||
|
logger.info(f"[CYCLE] Step 6/7: Starting new measurement on {unit_id}")
|
||||||
|
try:
|
||||||
|
cycle_response = await self.device_controller.start_cycle(
|
||||||
|
unit_id,
|
||||||
|
action.device_type,
|
||||||
|
sync_clock=True,
|
||||||
|
)
|
||||||
|
result["steps"]["start"] = {"success": True, "response": cycle_response}
|
||||||
|
|
||||||
|
# Create new recording session
|
||||||
|
new_session = MonitoringSession(
|
||||||
|
id=str(uuid.uuid4()),
|
||||||
|
project_id=action.project_id,
|
||||||
|
location_id=action.location_id,
|
||||||
|
unit_id=unit_id,
|
||||||
|
session_type="sound" if action.device_type == "slm" else "vibration",
|
||||||
|
started_at=datetime.utcnow(),
|
||||||
|
status="recording",
|
||||||
|
session_metadata=json.dumps({
|
||||||
|
"scheduled_action_id": action.id,
|
||||||
|
"cycle_response": cycle_response,
|
||||||
|
"action_type": "cycle",
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
db.add(new_session)
|
||||||
|
result["new_session_id"] = new_session.id
|
||||||
|
|
||||||
|
logger.info(f"[CYCLE] New measurement started, session {new_session.id}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"[CYCLE] Start failed: {e}")
|
||||||
|
result["steps"]["start"] = {"success": False, "error": str(e)}
|
||||||
|
raise # Re-raise to mark the action as failed
|
||||||
|
|
||||||
|
finally:
|
||||||
|
# Step 7: Re-enable background polling (always runs, even on failure)
|
||||||
|
if result.get("polling_paused") and polling_was_enabled:
|
||||||
|
logger.info(f"[CYCLE] Step 7/7: Re-enabling background polling for {unit_id}")
|
||||||
|
try:
|
||||||
|
if action.device_type == "slm":
|
||||||
|
from backend.services.slmm_client import get_slmm_client
|
||||||
|
slmm = get_slmm_client()
|
||||||
|
await slmm.update_device_polling_config(unit_id, poll_enabled=True)
|
||||||
|
logger.info(f"[CYCLE] Background polling re-enabled for {unit_id}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"[CYCLE] Failed to re-enable polling: {e}")
|
||||||
|
# Don't raise - cycle completed, just log the error
|
||||||
|
|
||||||
|
logger.info(f"[CYCLE] === Cycle complete for {unit_id} ===")
|
||||||
|
return result
|
||||||
|
|
||||||
|
# ========================================================================
|
||||||
|
# Recurring Schedule Generation
|
||||||
|
# ========================================================================
|
||||||
|
|
||||||
|
async def generate_recurring_actions(self) -> int:
|
||||||
|
"""
|
||||||
|
Generate ScheduledActions from all enabled recurring schedules.
|
||||||
|
|
||||||
|
Runs once per hour to generate actions for the next 7 days.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Total number of actions generated
|
||||||
|
"""
|
||||||
|
db = SessionLocal()
|
||||||
|
total_generated = 0
|
||||||
|
|
||||||
|
try:
|
||||||
|
from backend.services.recurring_schedule_service import get_recurring_schedule_service
|
||||||
|
|
||||||
|
service = get_recurring_schedule_service(db)
|
||||||
|
schedules = service.get_enabled_schedules()
|
||||||
|
|
||||||
|
if not schedules:
|
||||||
|
logger.debug("No enabled recurring schedules found")
|
||||||
|
return 0
|
||||||
|
|
||||||
|
logger.info(f"Generating actions for {len(schedules)} recurring schedule(s)")
|
||||||
|
|
||||||
|
for schedule in schedules:
|
||||||
|
try:
|
||||||
|
# Auto-disable one-off schedules whose end time has passed
|
||||||
|
if schedule.schedule_type == "one_off" and schedule.end_datetime:
|
||||||
|
if schedule.end_datetime <= datetime.utcnow():
|
||||||
|
schedule.enabled = False
|
||||||
|
schedule.next_occurrence = None
|
||||||
|
db.commit()
|
||||||
|
logger.info(f"Auto-disabled completed one-off schedule: {schedule.name}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
actions = service.generate_actions_for_schedule(schedule, horizon_days=7)
|
||||||
|
total_generated += len(actions)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error generating actions for schedule {schedule.id}: {e}")
|
||||||
|
|
||||||
|
if total_generated > 0:
|
||||||
|
logger.info(f"Generated {total_generated} scheduled actions from recurring schedules")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in generate_recurring_actions: {e}", exc_info=True)
|
||||||
|
finally:
|
||||||
|
db.close()
|
||||||
|
|
||||||
|
return total_generated
|
||||||
|
|
||||||
|
async def cleanup_old_actions(self, retention_days: int = 30) -> int:
|
||||||
|
"""
|
||||||
|
Remove old completed/failed actions to prevent database bloat.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
retention_days: Keep actions newer than this many days
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Number of actions cleaned up
|
||||||
|
"""
|
||||||
|
db = SessionLocal()
|
||||||
|
cleaned = 0
|
||||||
|
|
||||||
|
try:
|
||||||
|
cutoff = datetime.utcnow() - timedelta(days=retention_days)
|
||||||
|
|
||||||
|
old_actions = db.query(ScheduledAction).filter(
|
||||||
|
and_(
|
||||||
|
ScheduledAction.execution_status.in_(["completed", "failed", "cancelled"]),
|
||||||
|
ScheduledAction.executed_at < cutoff,
|
||||||
|
)
|
||||||
|
).all()
|
||||||
|
|
||||||
|
cleaned = len(old_actions)
|
||||||
|
for action in old_actions:
|
||||||
|
db.delete(action)
|
||||||
|
|
||||||
|
if cleaned > 0:
|
||||||
|
db.commit()
|
||||||
|
logger.info(f"Cleaned up {cleaned} old scheduled actions (>{retention_days} days)")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error cleaning up old actions: {e}")
|
||||||
|
db.rollback()
|
||||||
|
finally:
|
||||||
|
db.close()
|
||||||
|
|
||||||
|
return cleaned
|
||||||
|
|
||||||
|
# ========================================================================
|
||||||
|
# Manual Execution (for testing/debugging)
|
||||||
|
# ========================================================================
|
||||||
|
|
||||||
|
async def execute_action_by_id(self, action_id: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Manually execute a specific action by ID.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
action_id: ScheduledAction ID
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Execution result
|
||||||
|
"""
|
||||||
|
db = SessionLocal()
|
||||||
|
try:
|
||||||
|
action = db.query(ScheduledAction).filter_by(id=action_id).first()
|
||||||
|
if not action:
|
||||||
|
return {"success": False, "error": "Action not found"}
|
||||||
|
|
||||||
|
result = await self._execute_action(action, db)
|
||||||
|
db.commit()
|
||||||
|
return result
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
db.rollback()
|
||||||
|
return {"success": False, "error": str(e)}
|
||||||
|
finally:
|
||||||
|
db.close()
|
||||||
|
|
||||||
|
|
||||||
|
# Singleton instance
|
||||||
|
_scheduler_instance: Optional[SchedulerService] = None
|
||||||
|
|
||||||
|
|
||||||
|
def get_scheduler() -> SchedulerService:
|
||||||
|
"""
|
||||||
|
Get the scheduler singleton instance.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
SchedulerService instance
|
||||||
|
"""
|
||||||
|
global _scheduler_instance
|
||||||
|
if _scheduler_instance is None:
|
||||||
|
_scheduler_instance = SchedulerService()
|
||||||
|
return _scheduler_instance
|
||||||
|
|
||||||
|
|
||||||
|
async def start_scheduler():
|
||||||
|
"""Start the global scheduler instance."""
|
||||||
|
scheduler = get_scheduler()
|
||||||
|
await scheduler.start()
|
||||||
|
|
||||||
|
|
||||||
|
def stop_scheduler():
|
||||||
|
"""Stop the global scheduler instance."""
|
||||||
|
scheduler = get_scheduler()
|
||||||
|
scheduler.stop()
|
||||||
129
backend/services/slm_status_sync.py
Normal file
@@ -0,0 +1,129 @@
|
|||||||
|
"""
|
||||||
|
SLM Status Synchronization Service
|
||||||
|
|
||||||
|
Syncs SLM device status from SLMM backend to Terra-View's Emitter table.
|
||||||
|
This bridges SLMM's polling data with Terra-View's status snapshot system.
|
||||||
|
|
||||||
|
SLMM tracks device reachability via background polling. This service
|
||||||
|
fetches that data and creates/updates Emitter records so SLMs appear
|
||||||
|
correctly in the dashboard status snapshot.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from typing import Dict, Any
|
||||||
|
|
||||||
|
from backend.database import get_db_session
|
||||||
|
from backend.models import Emitter
|
||||||
|
from backend.services.slmm_client import get_slmm_client, SLMMClientError
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
async def sync_slm_status_to_emitters() -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Fetch SLM status from SLMM and sync to Terra-View's Emitter table.
|
||||||
|
|
||||||
|
For each device in SLMM's polling status:
|
||||||
|
- If last_success exists, create/update Emitter with that timestamp
|
||||||
|
- If not reachable, update Emitter with last known timestamp (or None)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with synced_count, error_count, errors list
|
||||||
|
"""
|
||||||
|
client = get_slmm_client()
|
||||||
|
synced = 0
|
||||||
|
errors = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Get polling status from SLMM
|
||||||
|
status_response = await client.get_polling_status()
|
||||||
|
|
||||||
|
# Handle nested response structure
|
||||||
|
data = status_response.get("data", status_response)
|
||||||
|
devices = data.get("devices", [])
|
||||||
|
|
||||||
|
if not devices:
|
||||||
|
logger.debug("No SLM devices in SLMM polling status")
|
||||||
|
return {"synced_count": 0, "error_count": 0, "errors": []}
|
||||||
|
|
||||||
|
db = get_db_session()
|
||||||
|
try:
|
||||||
|
for device in devices:
|
||||||
|
unit_id = device.get("unit_id")
|
||||||
|
if not unit_id:
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Get or create Emitter record
|
||||||
|
emitter = db.query(Emitter).filter(Emitter.id == unit_id).first()
|
||||||
|
|
||||||
|
# Determine last_seen from SLMM data
|
||||||
|
last_success_str = device.get("last_success")
|
||||||
|
is_reachable = device.get("is_reachable", False)
|
||||||
|
|
||||||
|
if last_success_str:
|
||||||
|
# Parse ISO format timestamp
|
||||||
|
last_seen = datetime.fromisoformat(
|
||||||
|
last_success_str.replace("Z", "+00:00")
|
||||||
|
)
|
||||||
|
# Convert to naive UTC for consistency with existing code
|
||||||
|
if last_seen.tzinfo:
|
||||||
|
last_seen = last_seen.astimezone(timezone.utc).replace(tzinfo=None)
|
||||||
|
elif is_reachable:
|
||||||
|
# Device is reachable but no last_success yet (first poll or just started)
|
||||||
|
# Use current time so it shows as OK, not Missing
|
||||||
|
last_seen = datetime.utcnow()
|
||||||
|
else:
|
||||||
|
last_seen = None
|
||||||
|
|
||||||
|
# Status will be recalculated by snapshot.py based on time thresholds
|
||||||
|
# Just store a provisional status here
|
||||||
|
status = "OK" if is_reachable else "Missing"
|
||||||
|
|
||||||
|
# Store last error message if available
|
||||||
|
last_error = device.get("last_error") or ""
|
||||||
|
|
||||||
|
if emitter:
|
||||||
|
# Update existing record
|
||||||
|
emitter.last_seen = last_seen
|
||||||
|
emitter.status = status
|
||||||
|
emitter.unit_type = "slm"
|
||||||
|
emitter.last_file = last_error
|
||||||
|
else:
|
||||||
|
# Create new record
|
||||||
|
emitter = Emitter(
|
||||||
|
id=unit_id,
|
||||||
|
unit_type="slm",
|
||||||
|
last_seen=last_seen,
|
||||||
|
last_file=last_error,
|
||||||
|
status=status
|
||||||
|
)
|
||||||
|
db.add(emitter)
|
||||||
|
|
||||||
|
synced += 1
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
errors.append(f"{unit_id}: {str(e)}")
|
||||||
|
logger.error(f"Error syncing SLM {unit_id}: {e}")
|
||||||
|
|
||||||
|
db.commit()
|
||||||
|
|
||||||
|
finally:
|
||||||
|
db.close()
|
||||||
|
|
||||||
|
if synced > 0:
|
||||||
|
logger.info(f"Synced {synced} SLM device(s) to Emitter table")
|
||||||
|
|
||||||
|
except SLMMClientError as e:
|
||||||
|
logger.warning(f"Could not reach SLMM for status sync: {e}")
|
||||||
|
errors.append(f"SLMM unreachable: {str(e)}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in SLM status sync: {e}", exc_info=True)
|
||||||
|
errors.append(str(e))
|
||||||
|
|
||||||
|
return {
|
||||||
|
"synced_count": synced,
|
||||||
|
"error_count": len(errors),
|
||||||
|
"errors": errors
|
||||||
|
}
|
||||||
857
backend/services/slmm_client.py
Normal file
@@ -0,0 +1,857 @@
|
|||||||
|
"""
|
||||||
|
SLMM API Client Wrapper
|
||||||
|
|
||||||
|
Provides a clean interface for Terra-View to interact with the SLMM backend.
|
||||||
|
All SLM operations should go through this client instead of direct HTTP calls.
|
||||||
|
|
||||||
|
SLMM (Sound Level Meter Manager) is a separate service running on port 8100
|
||||||
|
that handles TCP/FTP communication with Rion NL-43/NL-53 devices.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import httpx
|
||||||
|
import os
|
||||||
|
from typing import Optional, Dict, Any, List
|
||||||
|
from datetime import datetime
|
||||||
|
import json
|
||||||
|
|
||||||
|
|
||||||
|
# SLMM backend base URLs - use environment variable if set (for Docker)
|
||||||
|
SLMM_BASE_URL = os.environ.get("SLMM_BASE_URL", "http://localhost:8100")
|
||||||
|
SLMM_API_BASE = f"{SLMM_BASE_URL}/api/nl43"
|
||||||
|
|
||||||
|
|
||||||
|
class SLMMClientError(Exception):
|
||||||
|
"""Base exception for SLMM client errors."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class SLMMConnectionError(SLMMClientError):
|
||||||
|
"""Raised when cannot connect to SLMM backend."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class SLMMDeviceError(SLMMClientError):
|
||||||
|
"""Raised when device operation fails."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class SLMMClient:
|
||||||
|
"""
|
||||||
|
Client for interacting with SLMM backend.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
client = SLMMClient()
|
||||||
|
units = await client.get_all_units()
|
||||||
|
status = await client.get_unit_status("nl43-001")
|
||||||
|
await client.start_recording("nl43-001", config={...})
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, base_url: str = SLMM_BASE_URL, timeout: float = 30.0):
|
||||||
|
self.base_url = base_url
|
||||||
|
self.api_base = f"{base_url}/api/nl43"
|
||||||
|
self.timeout = timeout
|
||||||
|
|
||||||
|
async def _request(
|
||||||
|
self,
|
||||||
|
method: str,
|
||||||
|
endpoint: str,
|
||||||
|
data: Optional[Dict] = None,
|
||||||
|
params: Optional[Dict] = None,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Make an HTTP request to SLMM backend.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
method: HTTP method (GET, POST, PUT, DELETE)
|
||||||
|
endpoint: API endpoint (e.g., "/units", "/{unit_id}/status")
|
||||||
|
data: JSON body for POST/PUT requests
|
||||||
|
params: Query parameters
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Response JSON as dict
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
SLMMConnectionError: Cannot reach SLMM
|
||||||
|
SLMMDeviceError: Device operation failed
|
||||||
|
"""
|
||||||
|
url = f"{self.api_base}{endpoint}"
|
||||||
|
|
||||||
|
try:
|
||||||
|
async with httpx.AsyncClient(timeout=self.timeout) as client:
|
||||||
|
response = await client.request(
|
||||||
|
method=method,
|
||||||
|
url=url,
|
||||||
|
json=data,
|
||||||
|
params=params,
|
||||||
|
)
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
# Handle empty responses
|
||||||
|
if not response.content:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
return response.json()
|
||||||
|
|
||||||
|
except httpx.ConnectError as e:
|
||||||
|
raise SLMMConnectionError(
|
||||||
|
f"Cannot connect to SLMM backend at {self.base_url}. "
|
||||||
|
f"Is SLMM running? Error: {str(e)}"
|
||||||
|
)
|
||||||
|
except httpx.HTTPStatusError as e:
|
||||||
|
error_detail = "Unknown error"
|
||||||
|
try:
|
||||||
|
error_data = e.response.json()
|
||||||
|
error_detail = error_data.get("detail", str(error_data))
|
||||||
|
except:
|
||||||
|
error_detail = e.response.text or str(e)
|
||||||
|
|
||||||
|
raise SLMMDeviceError(
|
||||||
|
f"SLMM operation failed: {error_detail}"
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = str(e) if str(e) else type(e).__name__
|
||||||
|
raise SLMMClientError(f"Unexpected error: {error_msg}")
|
||||||
|
|
||||||
|
async def _download_request(
|
||||||
|
self,
|
||||||
|
endpoint: str,
|
||||||
|
data: Dict[str, Any],
|
||||||
|
unit_id: str,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Make a download request to SLMM that returns binary file content (not JSON).
|
||||||
|
|
||||||
|
Saves the file locally and returns metadata about the download.
|
||||||
|
"""
|
||||||
|
url = f"{self.api_base}{endpoint}"
|
||||||
|
|
||||||
|
try:
|
||||||
|
async with httpx.AsyncClient(timeout=httpx.Timeout(300.0)) as client:
|
||||||
|
response = await client.post(url, json=data)
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
# Determine filename from Content-Disposition header or generate one
|
||||||
|
content_disp = response.headers.get("content-disposition", "")
|
||||||
|
filename = None
|
||||||
|
if "filename=" in content_disp:
|
||||||
|
filename = content_disp.split("filename=")[-1].strip('" ')
|
||||||
|
|
||||||
|
if not filename:
|
||||||
|
remote_path = data.get("remote_path", "download")
|
||||||
|
base = os.path.basename(remote_path.rstrip("/"))
|
||||||
|
filename = f"{base}.zip" if not base.endswith(".zip") else base
|
||||||
|
|
||||||
|
# Save to local downloads directory
|
||||||
|
download_dir = os.path.join("data", "downloads", unit_id)
|
||||||
|
os.makedirs(download_dir, exist_ok=True)
|
||||||
|
local_path = os.path.join(download_dir, filename)
|
||||||
|
|
||||||
|
with open(local_path, "wb") as f:
|
||||||
|
f.write(response.content)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"local_path": local_path,
|
||||||
|
"filename": filename,
|
||||||
|
"size_bytes": len(response.content),
|
||||||
|
}
|
||||||
|
|
||||||
|
except httpx.ConnectError as e:
|
||||||
|
raise SLMMConnectionError(
|
||||||
|
f"Cannot connect to SLMM backend at {self.base_url}. "
|
||||||
|
f"Is SLMM running? Error: {str(e)}"
|
||||||
|
)
|
||||||
|
except httpx.HTTPStatusError as e:
|
||||||
|
error_detail = "Unknown error"
|
||||||
|
try:
|
||||||
|
error_data = e.response.json()
|
||||||
|
error_detail = error_data.get("detail", str(error_data))
|
||||||
|
except Exception:
|
||||||
|
error_detail = e.response.text or str(e)
|
||||||
|
raise SLMMDeviceError(f"SLMM download failed: {error_detail}")
|
||||||
|
except (SLMMConnectionError, SLMMDeviceError):
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = str(e) if str(e) else type(e).__name__
|
||||||
|
raise SLMMClientError(f"Download error: {error_msg}")
|
||||||
|
|
||||||
|
# ========================================================================
|
||||||
|
# Unit Management
|
||||||
|
# ========================================================================
|
||||||
|
|
||||||
|
async def get_all_units(self) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Get all configured SLM units from SLMM.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of unit dicts with id, config, and status
|
||||||
|
"""
|
||||||
|
# SLMM doesn't have a /units endpoint yet, so we'll need to add this
|
||||||
|
# For now, return empty list or implement when SLMM endpoint is ready
|
||||||
|
try:
|
||||||
|
response = await self._request("GET", "/units")
|
||||||
|
return response.get("units", [])
|
||||||
|
except SLMMClientError:
|
||||||
|
# Endpoint may not exist yet
|
||||||
|
return []
|
||||||
|
|
||||||
|
async def get_unit_config(self, unit_id: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get unit configuration from SLMM cache.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier (e.g., "nl43-001")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Config dict with host, tcp_port, ftp_port, etc.
|
||||||
|
"""
|
||||||
|
return await self._request("GET", f"/{unit_id}/config")
|
||||||
|
|
||||||
|
async def update_unit_config(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
host: Optional[str] = None,
|
||||||
|
tcp_port: Optional[int] = None,
|
||||||
|
ftp_port: Optional[int] = None,
|
||||||
|
ftp_username: Optional[str] = None,
|
||||||
|
ftp_password: Optional[str] = None,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Update unit configuration in SLMM cache.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
host: Device IP address
|
||||||
|
tcp_port: TCP control port (default: 2255)
|
||||||
|
ftp_port: FTP data port (default: 21)
|
||||||
|
ftp_username: FTP username
|
||||||
|
ftp_password: FTP password
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Updated config
|
||||||
|
"""
|
||||||
|
config = {}
|
||||||
|
if host is not None:
|
||||||
|
config["host"] = host
|
||||||
|
if tcp_port is not None:
|
||||||
|
config["tcp_port"] = tcp_port
|
||||||
|
if ftp_port is not None:
|
||||||
|
config["ftp_port"] = ftp_port
|
||||||
|
if ftp_username is not None:
|
||||||
|
config["ftp_username"] = ftp_username
|
||||||
|
if ftp_password is not None:
|
||||||
|
config["ftp_password"] = ftp_password
|
||||||
|
|
||||||
|
return await self._request("PUT", f"/{unit_id}/config", data=config)
|
||||||
|
|
||||||
|
# ========================================================================
|
||||||
|
# Status & Monitoring
|
||||||
|
# ========================================================================
|
||||||
|
|
||||||
|
async def get_unit_status(self, unit_id: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get cached status snapshot from SLMM.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Status dict with measurement_state, lp, leq, battery, etc.
|
||||||
|
"""
|
||||||
|
return await self._request("GET", f"/{unit_id}/status")
|
||||||
|
|
||||||
|
async def get_live_data(self, unit_id: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Request fresh data from device (DOD command).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Live data snapshot
|
||||||
|
"""
|
||||||
|
return await self._request("GET", f"/{unit_id}/live")
|
||||||
|
|
||||||
|
# ========================================================================
|
||||||
|
# Recording Control
|
||||||
|
# ========================================================================
|
||||||
|
|
||||||
|
async def start_recording(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
config: Optional[Dict[str, Any]] = None,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Start recording on a unit.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
config: Optional recording config (interval, settings, etc.)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Response from SLMM with success status
|
||||||
|
"""
|
||||||
|
return await self._request("POST", f"/{unit_id}/start", data=config or {})
|
||||||
|
|
||||||
|
async def stop_recording(self, unit_id: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Stop recording on a unit.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Response from SLMM
|
||||||
|
"""
|
||||||
|
return await self._request("POST", f"/{unit_id}/stop")
|
||||||
|
|
||||||
|
async def pause_recording(self, unit_id: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Pause recording on a unit.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Response from SLMM
|
||||||
|
"""
|
||||||
|
return await self._request("POST", f"/{unit_id}/pause")
|
||||||
|
|
||||||
|
async def resume_recording(self, unit_id: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Resume paused recording on a unit.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Response from SLMM
|
||||||
|
"""
|
||||||
|
return await self._request("POST", f"/{unit_id}/resume")
|
||||||
|
|
||||||
|
async def reset_data(self, unit_id: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Reset measurement data on a unit.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Response from SLMM
|
||||||
|
"""
|
||||||
|
return await self._request("POST", f"/{unit_id}/reset")
|
||||||
|
|
||||||
|
# ========================================================================
|
||||||
|
# Store/Index Management
|
||||||
|
# ========================================================================
|
||||||
|
|
||||||
|
async def get_index_number(self, unit_id: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get current store/index number from device.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with current index_number (store name)
|
||||||
|
"""
|
||||||
|
return await self._request("GET", f"/{unit_id}/index-number")
|
||||||
|
|
||||||
|
async def set_index_number(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
index_number: int,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Set store/index number on device.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
index_number: New index number to set
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Confirmation response
|
||||||
|
"""
|
||||||
|
return await self._request(
|
||||||
|
"PUT",
|
||||||
|
f"/{unit_id}/index-number",
|
||||||
|
data={"index_number": index_number},
|
||||||
|
)
|
||||||
|
|
||||||
|
async def check_overwrite_status(self, unit_id: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Check if data exists at the current store index.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with:
|
||||||
|
- overwrite_status: "None" (safe) or "Exist" (would overwrite)
|
||||||
|
- will_overwrite: bool
|
||||||
|
- safe_to_store: bool
|
||||||
|
"""
|
||||||
|
return await self._request("GET", f"/{unit_id}/overwrite-check")
|
||||||
|
|
||||||
|
async def increment_index(self, unit_id: str, max_attempts: int = 100) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Find and set the next available (unused) store/index number.
|
||||||
|
|
||||||
|
Checks the current index - if it would overwrite existing data,
|
||||||
|
increments until finding an unused index number.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
max_attempts: Maximum number of indices to try before giving up
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with old_index, new_index, and attempts_made
|
||||||
|
"""
|
||||||
|
# Get current index
|
||||||
|
current = await self.get_index_number(unit_id)
|
||||||
|
old_index = current.get("index_number", 0)
|
||||||
|
|
||||||
|
# Check if current index is safe
|
||||||
|
overwrite_check = await self.check_overwrite_status(unit_id)
|
||||||
|
if overwrite_check.get("safe_to_store", False):
|
||||||
|
# Current index is safe, no need to increment
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"old_index": old_index,
|
||||||
|
"new_index": old_index,
|
||||||
|
"unit_id": unit_id,
|
||||||
|
"already_safe": True,
|
||||||
|
"attempts_made": 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Need to find an unused index
|
||||||
|
attempts = 0
|
||||||
|
test_index = old_index + 1
|
||||||
|
|
||||||
|
while attempts < max_attempts:
|
||||||
|
# Set the new index
|
||||||
|
await self.set_index_number(unit_id, test_index)
|
||||||
|
|
||||||
|
# Check if this index is safe
|
||||||
|
overwrite_check = await self.check_overwrite_status(unit_id)
|
||||||
|
attempts += 1
|
||||||
|
|
||||||
|
if overwrite_check.get("safe_to_store", False):
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"old_index": old_index,
|
||||||
|
"new_index": test_index,
|
||||||
|
"unit_id": unit_id,
|
||||||
|
"already_safe": False,
|
||||||
|
"attempts_made": attempts,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Try next index (wrap around at 9999)
|
||||||
|
test_index = (test_index + 1) % 10000
|
||||||
|
|
||||||
|
# Avoid infinite loops if we've wrapped around
|
||||||
|
if test_index == old_index:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Could not find a safe index
|
||||||
|
raise SLMMDeviceError(
|
||||||
|
f"Could not find unused store index for {unit_id} after {attempts} attempts. "
|
||||||
|
f"Consider downloading and clearing data from the device."
|
||||||
|
)
|
||||||
|
|
||||||
|
# ========================================================================
|
||||||
|
# Device Settings
|
||||||
|
# ========================================================================
|
||||||
|
|
||||||
|
async def get_frequency_weighting(self, unit_id: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get frequency weighting setting (A, C, or Z).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with current weighting
|
||||||
|
"""
|
||||||
|
return await self._request("GET", f"/{unit_id}/frequency-weighting")
|
||||||
|
|
||||||
|
async def set_frequency_weighting(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
weighting: str,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Set frequency weighting (A, C, or Z).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
weighting: "A", "C", or "Z"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Confirmation response
|
||||||
|
"""
|
||||||
|
return await self._request(
|
||||||
|
"PUT",
|
||||||
|
f"/{unit_id}/frequency-weighting",
|
||||||
|
data={"weighting": weighting},
|
||||||
|
)
|
||||||
|
|
||||||
|
async def get_time_weighting(self, unit_id: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get time weighting setting (F, S, or I).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with current time weighting
|
||||||
|
"""
|
||||||
|
return await self._request("GET", f"/{unit_id}/time-weighting")
|
||||||
|
|
||||||
|
async def set_time_weighting(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
weighting: str,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Set time weighting (F=Fast, S=Slow, I=Impulse).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
weighting: "F", "S", or "I"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Confirmation response
|
||||||
|
"""
|
||||||
|
return await self._request(
|
||||||
|
"PUT",
|
||||||
|
f"/{unit_id}/time-weighting",
|
||||||
|
data={"weighting": weighting},
|
||||||
|
)
|
||||||
|
|
||||||
|
async def get_all_settings(self, unit_id: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get all device settings.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with all settings
|
||||||
|
"""
|
||||||
|
return await self._request("GET", f"/{unit_id}/settings")
|
||||||
|
|
||||||
|
# ========================================================================
|
||||||
|
# FTP Control
|
||||||
|
# ========================================================================
|
||||||
|
|
||||||
|
async def enable_ftp(self, unit_id: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Enable FTP server on device.
|
||||||
|
|
||||||
|
Must be called before downloading files. FTP and TCP can work in tandem.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with status message
|
||||||
|
"""
|
||||||
|
return await self._request("POST", f"/{unit_id}/ftp/enable")
|
||||||
|
|
||||||
|
async def disable_ftp(self, unit_id: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Disable FTP server on device.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with status message
|
||||||
|
"""
|
||||||
|
return await self._request("POST", f"/{unit_id}/ftp/disable")
|
||||||
|
|
||||||
|
async def get_ftp_status(self, unit_id: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get FTP server status on device.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with ftp_enabled status
|
||||||
|
"""
|
||||||
|
return await self._request("GET", f"/{unit_id}/ftp/status")
|
||||||
|
|
||||||
|
# ========================================================================
|
||||||
|
# Data Download
|
||||||
|
# ========================================================================
|
||||||
|
|
||||||
|
async def download_file(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
remote_path: str,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Download a single file from unit via FTP.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
remote_path: Path on device to download (e.g., "/NL43_DATA/measurement.wav")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with local_path, filename, size_bytes
|
||||||
|
"""
|
||||||
|
return await self._download_request(
|
||||||
|
f"/{unit_id}/ftp/download",
|
||||||
|
{"remote_path": remote_path},
|
||||||
|
unit_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
async def download_folder(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
remote_path: str,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Download an entire folder from unit via FTP as a ZIP archive.
|
||||||
|
|
||||||
|
Useful for downloading complete measurement sessions (e.g., Auto_0000 folders).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
remote_path: Folder path on device to download (e.g., "/NL43_DATA/Auto_0000")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with local_path, folder_name, size_bytes
|
||||||
|
"""
|
||||||
|
return await self._download_request(
|
||||||
|
f"/{unit_id}/ftp/download-folder",
|
||||||
|
{"remote_path": remote_path},
|
||||||
|
unit_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
async def download_current_measurement(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Download the current measurement folder based on device's index number.
|
||||||
|
|
||||||
|
This is the recommended method for scheduled downloads - it automatically
|
||||||
|
determines which folder to download based on the device's current store index.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with local_path, folder_name, file_count, zip_size_bytes, index_number
|
||||||
|
"""
|
||||||
|
# Get current index number from device
|
||||||
|
index_info = await self.get_index_number(unit_id)
|
||||||
|
index_number_raw = index_info.get("index_number", 0)
|
||||||
|
|
||||||
|
# Convert to int - device returns string like "0000" or "0001"
|
||||||
|
try:
|
||||||
|
index_number = int(index_number_raw)
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
index_number = 0
|
||||||
|
|
||||||
|
# Format as Auto_XXXX folder name
|
||||||
|
folder_name = f"Auto_{index_number:04d}"
|
||||||
|
remote_path = f"/NL-43/{folder_name}"
|
||||||
|
|
||||||
|
# Download the folder
|
||||||
|
result = await self.download_folder(unit_id, remote_path)
|
||||||
|
result["index_number"] = index_number
|
||||||
|
return result
|
||||||
|
|
||||||
|
async def download_files(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
destination_path: str,
|
||||||
|
files: Optional[List[str]] = None,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Download measurement files from unit via FTP.
|
||||||
|
|
||||||
|
This method automatically determines the current measurement folder and downloads it.
|
||||||
|
The destination_path parameter is logged for reference but actual download location
|
||||||
|
is managed by SLMM (data/downloads/{unit_id}/).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
destination_path: Reference path (for logging/metadata, not used by SLMM)
|
||||||
|
files: Ignored - always downloads the current measurement folder
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with download result including local_path, folder_name, etc.
|
||||||
|
"""
|
||||||
|
# Use the new method that automatically determines what to download
|
||||||
|
result = await self.download_current_measurement(unit_id)
|
||||||
|
result["requested_destination"] = destination_path
|
||||||
|
return result
|
||||||
|
|
||||||
|
# ========================================================================
|
||||||
|
# Cycle Commands (for scheduled automation)
|
||||||
|
# ========================================================================
|
||||||
|
|
||||||
|
async def start_cycle(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
sync_clock: bool = True,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Execute complete start cycle on device via SLMM.
|
||||||
|
|
||||||
|
This handles the full pre-recording workflow:
|
||||||
|
1. Sync device clock to server time
|
||||||
|
2. Find next safe index (with overwrite protection)
|
||||||
|
3. Start measurement
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
sync_clock: Whether to sync device clock to server time
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with clock_synced, old_index, new_index, started, etc.
|
||||||
|
"""
|
||||||
|
return await self._request(
|
||||||
|
"POST",
|
||||||
|
f"/{unit_id}/start-cycle",
|
||||||
|
data={"sync_clock": sync_clock},
|
||||||
|
)
|
||||||
|
|
||||||
|
async def stop_cycle(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
download: bool = True,
|
||||||
|
download_path: Optional[str] = None,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Execute complete stop cycle on device via SLMM.
|
||||||
|
|
||||||
|
This handles the full post-recording workflow:
|
||||||
|
1. Stop measurement
|
||||||
|
2. Enable FTP
|
||||||
|
3. Download measurement folder (if download=True)
|
||||||
|
4. Verify download
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
download: Whether to download measurement data
|
||||||
|
download_path: Custom path for downloaded ZIP (optional)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with stopped, ftp_enabled, download_success, local_path, etc.
|
||||||
|
"""
|
||||||
|
data = {"download": download}
|
||||||
|
if download_path:
|
||||||
|
data["download_path"] = download_path
|
||||||
|
return await self._request(
|
||||||
|
"POST",
|
||||||
|
f"/{unit_id}/stop-cycle",
|
||||||
|
data=data,
|
||||||
|
)
|
||||||
|
|
||||||
|
# ========================================================================
|
||||||
|
# Polling Status (for device monitoring/alerts)
|
||||||
|
# ========================================================================
|
||||||
|
|
||||||
|
async def get_polling_status(self) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get global polling status from SLMM.
|
||||||
|
|
||||||
|
Returns device reachability information for all polled devices.
|
||||||
|
Used by DeviceStatusMonitor to detect offline/online transitions.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with devices list containing:
|
||||||
|
- unit_id
|
||||||
|
- is_reachable
|
||||||
|
- consecutive_failures
|
||||||
|
- last_poll_attempt
|
||||||
|
- last_success
|
||||||
|
- last_error
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
async with httpx.AsyncClient(timeout=self.timeout) as client:
|
||||||
|
response = await client.get(f"{self.base_url}/api/nl43/_polling/status")
|
||||||
|
response.raise_for_status()
|
||||||
|
return response.json()
|
||||||
|
except httpx.ConnectError:
|
||||||
|
raise SLMMConnectionError("Cannot connect to SLMM for polling status")
|
||||||
|
except Exception as e:
|
||||||
|
raise SLMMClientError(f"Failed to get polling status: {str(e)}")
|
||||||
|
|
||||||
|
async def get_device_polling_config(self, unit_id: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get polling configuration for a specific device.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with poll_enabled and poll_interval_seconds
|
||||||
|
"""
|
||||||
|
return await self._request("GET", f"/{unit_id}/polling/config")
|
||||||
|
|
||||||
|
async def update_device_polling_config(
|
||||||
|
self,
|
||||||
|
unit_id: str,
|
||||||
|
poll_enabled: Optional[bool] = None,
|
||||||
|
poll_interval_seconds: Optional[int] = None,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Update polling configuration for a device.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: Unit identifier
|
||||||
|
poll_enabled: Enable/disable polling
|
||||||
|
poll_interval_seconds: Polling interval (10-3600)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Updated config
|
||||||
|
"""
|
||||||
|
config = {}
|
||||||
|
if poll_enabled is not None:
|
||||||
|
config["poll_enabled"] = poll_enabled
|
||||||
|
if poll_interval_seconds is not None:
|
||||||
|
config["poll_interval_seconds"] = poll_interval_seconds
|
||||||
|
|
||||||
|
return await self._request("PUT", f"/{unit_id}/polling/config", data=config)
|
||||||
|
|
||||||
|
# ========================================================================
|
||||||
|
# Health Check
|
||||||
|
# ========================================================================
|
||||||
|
|
||||||
|
async def health_check(self) -> bool:
|
||||||
|
"""
|
||||||
|
Check if SLMM backend is reachable.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if SLMM is responding, False otherwise
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
async with httpx.AsyncClient(timeout=5.0) as client:
|
||||||
|
response = await client.get(f"{self.base_url}/health")
|
||||||
|
return response.status_code == 200
|
||||||
|
except:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
# Singleton instance for convenience
|
||||||
|
_default_client: Optional[SLMMClient] = None
|
||||||
|
|
||||||
|
|
||||||
|
def get_slmm_client() -> SLMMClient:
|
||||||
|
"""
|
||||||
|
Get the default SLMM client instance.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
SLMMClient instance
|
||||||
|
"""
|
||||||
|
global _default_client
|
||||||
|
if _default_client is None:
|
||||||
|
_default_client = SLMMClient()
|
||||||
|
return _default_client
|
||||||
231
backend/services/slmm_sync.py
Normal file
@@ -0,0 +1,231 @@
|
|||||||
|
"""
|
||||||
|
SLMM Synchronization Service
|
||||||
|
|
||||||
|
This service ensures Terra-View roster is the single source of truth for SLM device configuration.
|
||||||
|
When SLM devices are added, edited, or deleted in Terra-View, changes are automatically synced to SLMM.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import httpx
|
||||||
|
import os
|
||||||
|
from typing import Optional
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
|
||||||
|
from backend.models import RosterUnit
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
SLMM_BASE_URL = os.getenv("SLMM_BASE_URL", "http://localhost:8100")
|
||||||
|
|
||||||
|
|
||||||
|
async def sync_slm_to_slmm(unit: RosterUnit) -> bool:
|
||||||
|
"""
|
||||||
|
Sync a single SLM device from Terra-View roster to SLMM.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit: RosterUnit with device_type="slm"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if sync successful, False otherwise
|
||||||
|
"""
|
||||||
|
if unit.device_type != "slm":
|
||||||
|
logger.warning(f"Attempted to sync non-SLM unit {unit.id} to SLMM")
|
||||||
|
return False
|
||||||
|
|
||||||
|
if not unit.slm_host:
|
||||||
|
logger.warning(f"SLM {unit.id} has no host configured, skipping SLMM sync")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Disable polling if unit is benched (deployed=False) or retired
|
||||||
|
# Only actively deployed units should be polled
|
||||||
|
should_poll = unit.deployed and not unit.retired
|
||||||
|
|
||||||
|
try:
|
||||||
|
async with httpx.AsyncClient(timeout=5.0) as client:
|
||||||
|
response = await client.put(
|
||||||
|
f"{SLMM_BASE_URL}/api/nl43/{unit.id}/config",
|
||||||
|
json={
|
||||||
|
"host": unit.slm_host,
|
||||||
|
"tcp_port": unit.slm_tcp_port or 2255,
|
||||||
|
"tcp_enabled": True,
|
||||||
|
"ftp_enabled": True,
|
||||||
|
"ftp_username": "USER", # Default NL43 credentials
|
||||||
|
"ftp_password": "0000",
|
||||||
|
"poll_enabled": should_poll, # Disable polling for benched or retired units
|
||||||
|
"poll_interval_seconds": 3600, # Default to 1 hour polling
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.status_code in [200, 201]:
|
||||||
|
logger.info(f"✓ Synced SLM {unit.id} to SLMM at {unit.slm_host}:{unit.slm_tcp_port or 2255}")
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to sync SLM {unit.id} to SLMM: {response.status_code} {response.text}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
except httpx.TimeoutException:
|
||||||
|
logger.error(f"Timeout syncing SLM {unit.id} to SLMM")
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error syncing SLM {unit.id} to SLMM: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
async def delete_slm_from_slmm(unit_id: str) -> bool:
|
||||||
|
"""
|
||||||
|
Delete a device from SLMM database.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unit_id: The unit ID to delete
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if deletion successful or device doesn't exist, False on error
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
async with httpx.AsyncClient(timeout=5.0) as client:
|
||||||
|
response = await client.delete(
|
||||||
|
f"{SLMM_BASE_URL}/api/nl43/{unit_id}/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.status_code == 200:
|
||||||
|
logger.info(f"✓ Deleted SLM {unit_id} from SLMM")
|
||||||
|
return True
|
||||||
|
elif response.status_code == 404:
|
||||||
|
logger.info(f"SLM {unit_id} not found in SLMM (already deleted)")
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to delete SLM {unit_id} from SLMM: {response.status_code} {response.text}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
except httpx.TimeoutException:
|
||||||
|
logger.error(f"Timeout deleting SLM {unit_id} from SLMM")
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error deleting SLM {unit_id} from SLMM: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
async def sync_all_slms_to_slmm(db: Session) -> dict:
|
||||||
|
"""
|
||||||
|
Sync all SLM devices from Terra-View roster to SLMM.
|
||||||
|
|
||||||
|
This ensures SLMM database matches Terra-View roster as the source of truth.
|
||||||
|
Should be called on Terra-View startup and optionally via admin endpoint.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
db: Database session
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary with sync results
|
||||||
|
"""
|
||||||
|
logger.info("Starting full SLM sync to SLMM...")
|
||||||
|
|
||||||
|
# Get all SLM units from roster
|
||||||
|
slm_units = db.query(RosterUnit).filter_by(device_type="slm").all()
|
||||||
|
|
||||||
|
results = {
|
||||||
|
"total": len(slm_units),
|
||||||
|
"synced": 0,
|
||||||
|
"skipped": 0,
|
||||||
|
"failed": 0
|
||||||
|
}
|
||||||
|
|
||||||
|
for unit in slm_units:
|
||||||
|
# Skip units without host configured
|
||||||
|
if not unit.slm_host:
|
||||||
|
results["skipped"] += 1
|
||||||
|
logger.debug(f"Skipped {unit.unit_type} - no host configured")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Sync to SLMM
|
||||||
|
success = await sync_slm_to_slmm(unit)
|
||||||
|
if success:
|
||||||
|
results["synced"] += 1
|
||||||
|
else:
|
||||||
|
results["failed"] += 1
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"SLM sync complete: {results['synced']} synced, "
|
||||||
|
f"{results['skipped']} skipped, {results['failed']} failed"
|
||||||
|
)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
async def get_slmm_devices() -> Optional[list]:
|
||||||
|
"""
|
||||||
|
Get list of all devices currently in SLMM database.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of device unit_ids, or None on error
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
async with httpx.AsyncClient(timeout=5.0) as client:
|
||||||
|
response = await client.get(f"{SLMM_BASE_URL}/api/nl43/_polling/status")
|
||||||
|
|
||||||
|
if response.status_code == 200:
|
||||||
|
data = response.json()
|
||||||
|
return [device["unit_id"] for device in data["data"]["devices"]]
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to get SLMM devices: {response.status_code}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting SLMM devices: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
async def cleanup_orphaned_slmm_devices(db: Session) -> dict:
|
||||||
|
"""
|
||||||
|
Remove devices from SLMM that are not in Terra-View roster.
|
||||||
|
|
||||||
|
This cleans up orphaned test devices or devices that were manually added to SLMM.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
db: Database session
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary with cleanup results
|
||||||
|
"""
|
||||||
|
logger.info("Checking for orphaned devices in SLMM...")
|
||||||
|
|
||||||
|
# Get all device IDs from SLMM
|
||||||
|
slmm_devices = await get_slmm_devices()
|
||||||
|
if slmm_devices is None:
|
||||||
|
return {"error": "Failed to get SLMM device list"}
|
||||||
|
|
||||||
|
# Get all SLM unit IDs from Terra-View roster
|
||||||
|
roster_units = db.query(RosterUnit.id).filter_by(device_type="slm").all()
|
||||||
|
roster_unit_ids = {unit.id for unit in roster_units}
|
||||||
|
|
||||||
|
# Find orphaned devices (in SLMM but not in roster)
|
||||||
|
orphaned = [uid for uid in slmm_devices if uid not in roster_unit_ids]
|
||||||
|
|
||||||
|
results = {
|
||||||
|
"total_in_slmm": len(slmm_devices),
|
||||||
|
"total_in_roster": len(roster_unit_ids),
|
||||||
|
"orphaned": len(orphaned),
|
||||||
|
"deleted": 0,
|
||||||
|
"failed": 0,
|
||||||
|
"orphaned_devices": orphaned
|
||||||
|
}
|
||||||
|
|
||||||
|
if not orphaned:
|
||||||
|
logger.info("No orphaned devices found in SLMM")
|
||||||
|
return results
|
||||||
|
|
||||||
|
logger.info(f"Found {len(orphaned)} orphaned devices in SLMM: {orphaned}")
|
||||||
|
|
||||||
|
# Delete orphaned devices
|
||||||
|
for unit_id in orphaned:
|
||||||
|
success = await delete_slm_from_slmm(unit_id)
|
||||||
|
if success:
|
||||||
|
results["deleted"] += 1
|
||||||
|
else:
|
||||||
|
results["failed"] += 1
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Cleanup complete: {results['deleted']} deleted, {results['failed']} failed"
|
||||||
|
)
|
||||||
|
|
||||||
|
return results
|
||||||
@@ -24,13 +24,47 @@ def format_age(last_seen):
|
|||||||
return f"{int(hours)}h {int(mins)}m"
|
return f"{int(hours)}h {int(mins)}m"
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_status(last_seen, status_ok_threshold=12, status_pending_threshold=24):
|
||||||
|
"""
|
||||||
|
Calculate status based on how long ago the unit was last seen.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
last_seen: datetime of last seen (UTC)
|
||||||
|
status_ok_threshold: hours before status becomes Pending (default 12)
|
||||||
|
status_pending_threshold: hours before status becomes Missing (default 24)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
"OK", "Pending", or "Missing"
|
||||||
|
"""
|
||||||
|
if not last_seen:
|
||||||
|
return "Missing"
|
||||||
|
|
||||||
|
last_seen = ensure_utc(last_seen)
|
||||||
|
now = datetime.now(timezone.utc)
|
||||||
|
hours_ago = (now - last_seen).total_seconds() / 3600
|
||||||
|
|
||||||
|
if hours_ago > status_pending_threshold:
|
||||||
|
return "Missing"
|
||||||
|
elif hours_ago > status_ok_threshold:
|
||||||
|
return "Pending"
|
||||||
|
else:
|
||||||
|
return "OK"
|
||||||
|
|
||||||
|
|
||||||
def emit_status_snapshot():
|
def emit_status_snapshot():
|
||||||
"""
|
"""
|
||||||
Merge roster (what we *intend*) with emitter data (what is *actually happening*).
|
Merge roster (what we *intend*) with emitter data (what is *actually happening*).
|
||||||
|
Status is recalculated based on current time to ensure accuracy.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
db = get_db_session()
|
db = get_db_session()
|
||||||
try:
|
try:
|
||||||
|
# Get user preferences for status thresholds
|
||||||
|
from backend.models import UserPreferences
|
||||||
|
prefs = db.query(UserPreferences).filter_by(id=1).first()
|
||||||
|
status_ok_threshold = prefs.status_ok_threshold_hours if prefs else 12
|
||||||
|
status_pending_threshold = prefs.status_pending_threshold_hours if prefs else 24
|
||||||
|
|
||||||
roster = {r.id: r for r in db.query(RosterUnit).all()}
|
roster = {r.id: r for r in db.query(RosterUnit).all()}
|
||||||
emitters = {e.id: e for e in db.query(Emitter).all()}
|
emitters = {e.id: e for e in db.query(Emitter).all()}
|
||||||
ignored = {i.id for i in db.query(IgnoredUnit).all()}
|
ignored = {i.id for i in db.query(IgnoredUnit).all()}
|
||||||
@@ -40,17 +74,29 @@ def emit_status_snapshot():
|
|||||||
# --- Merge roster entries first ---
|
# --- Merge roster entries first ---
|
||||||
for unit_id, r in roster.items():
|
for unit_id, r in roster.items():
|
||||||
e = emitters.get(unit_id)
|
e = emitters.get(unit_id)
|
||||||
|
|
||||||
if r.retired:
|
if r.retired:
|
||||||
# Retired units get separated later
|
# Retired units get separated later
|
||||||
status = "Retired"
|
status = "Retired"
|
||||||
age = "N/A"
|
age = "N/A"
|
||||||
last_seen = None
|
last_seen = None
|
||||||
fname = ""
|
fname = ""
|
||||||
|
elif r.out_for_calibration:
|
||||||
|
# Out for calibration units get separated later
|
||||||
|
status = "Out for Calibration"
|
||||||
|
age = "N/A"
|
||||||
|
last_seen = None
|
||||||
|
fname = ""
|
||||||
|
elif getattr(r, 'allocated', False) and not r.deployed:
|
||||||
|
# Allocated: staged for an upcoming job, not yet physically deployed
|
||||||
|
status = "Allocated"
|
||||||
|
age = "N/A"
|
||||||
|
last_seen = None
|
||||||
|
fname = ""
|
||||||
else:
|
else:
|
||||||
if e:
|
if e:
|
||||||
status = e.status
|
|
||||||
last_seen = ensure_utc(e.last_seen)
|
last_seen = ensure_utc(e.last_seen)
|
||||||
|
# RECALCULATE status based on current time, not stored value
|
||||||
|
status = calculate_status(last_seen, status_ok_threshold, status_pending_threshold)
|
||||||
age = format_age(last_seen)
|
age = format_age(last_seen)
|
||||||
fname = e.last_file
|
fname = e.last_file
|
||||||
else:
|
else:
|
||||||
@@ -60,47 +106,57 @@ def emit_status_snapshot():
|
|||||||
age = "N/A"
|
age = "N/A"
|
||||||
fname = ""
|
fname = ""
|
||||||
|
|
||||||
units[unit_id] = {
|
units[unit_id] = {
|
||||||
"id": unit_id,
|
"id": unit_id,
|
||||||
"status": status,
|
"status": status,
|
||||||
"age": age,
|
"age": age,
|
||||||
"last": last_seen.isoformat() if last_seen else None,
|
"last": last_seen.isoformat() if last_seen else None,
|
||||||
"fname": fname,
|
"fname": fname,
|
||||||
"deployed": r.deployed,
|
"deployed": r.deployed,
|
||||||
"note": r.note or "",
|
"note": r.note or "",
|
||||||
"retired": r.retired,
|
"retired": r.retired,
|
||||||
|
"out_for_calibration": r.out_for_calibration or False,
|
||||||
|
"allocated": getattr(r, 'allocated', False) or False,
|
||||||
|
"allocated_to_project_id": getattr(r, 'allocated_to_project_id', None) or "",
|
||||||
# Device type and type-specific fields
|
# Device type and type-specific fields
|
||||||
"device_type": r.device_type or "seismograph",
|
"device_type": r.device_type or "seismograph",
|
||||||
"last_calibrated": r.last_calibrated.isoformat() if r.last_calibrated else None,
|
"last_calibrated": r.last_calibrated.isoformat() if r.last_calibrated else None,
|
||||||
"next_calibration_due": r.next_calibration_due.isoformat() if r.next_calibration_due else None,
|
"next_calibration_due": r.next_calibration_due.isoformat() if r.next_calibration_due else None,
|
||||||
"deployed_with_modem_id": r.deployed_with_modem_id,
|
"deployed_with_modem_id": r.deployed_with_modem_id,
|
||||||
|
"deployed_with_unit_id": r.deployed_with_unit_id,
|
||||||
"ip_address": r.ip_address,
|
"ip_address": r.ip_address,
|
||||||
"phone_number": r.phone_number,
|
"phone_number": r.phone_number,
|
||||||
"hardware_model": r.hardware_model,
|
"hardware_model": r.hardware_model,
|
||||||
# Location for mapping
|
# Location for mapping
|
||||||
"location": r.location or "",
|
"location": r.location or "",
|
||||||
"address": r.address or "",
|
"address": r.address or "",
|
||||||
"coordinates": r.coordinates or "",
|
"coordinates": r.coordinates or "",
|
||||||
}
|
}
|
||||||
|
|
||||||
# --- Add unexpected emitter-only units ---
|
# --- Add unexpected emitter-only units ---
|
||||||
for unit_id, e in emitters.items():
|
for unit_id, e in emitters.items():
|
||||||
if unit_id not in roster:
|
if unit_id not in roster:
|
||||||
last_seen = ensure_utc(e.last_seen)
|
last_seen = ensure_utc(e.last_seen)
|
||||||
|
# RECALCULATE status for unknown units too
|
||||||
|
status = calculate_status(last_seen, status_ok_threshold, status_pending_threshold)
|
||||||
units[unit_id] = {
|
units[unit_id] = {
|
||||||
"id": unit_id,
|
"id": unit_id,
|
||||||
"status": e.status,
|
"status": status,
|
||||||
"age": format_age(last_seen),
|
"age": format_age(last_seen),
|
||||||
"last": last_seen.isoformat(),
|
"last": last_seen.isoformat(),
|
||||||
"fname": e.last_file,
|
"fname": e.last_file,
|
||||||
"deployed": False, # default
|
"deployed": False, # default
|
||||||
"note": "",
|
"note": "",
|
||||||
"retired": False,
|
"retired": False,
|
||||||
|
"out_for_calibration": False,
|
||||||
|
"allocated": False,
|
||||||
|
"allocated_to_project_id": "",
|
||||||
# Device type and type-specific fields (defaults for unknown units)
|
# Device type and type-specific fields (defaults for unknown units)
|
||||||
"device_type": "seismograph", # default
|
"device_type": "seismograph", # default
|
||||||
"last_calibrated": None,
|
"last_calibrated": None,
|
||||||
"next_calibration_due": None,
|
"next_calibration_due": None,
|
||||||
"deployed_with_modem_id": None,
|
"deployed_with_modem_id": None,
|
||||||
|
"deployed_with_unit_id": None,
|
||||||
"ip_address": None,
|
"ip_address": None,
|
||||||
"phone_number": None,
|
"phone_number": None,
|
||||||
"hardware_model": None,
|
"hardware_model": None,
|
||||||
@@ -110,15 +166,48 @@ def emit_status_snapshot():
|
|||||||
"coordinates": "",
|
"coordinates": "",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# --- Derive modem status from paired devices ---
|
||||||
|
# Modems don't have their own check-in system, so we inherit status
|
||||||
|
# from whatever device they're paired with (seismograph or SLM)
|
||||||
|
# Check both directions: modem.deployed_with_unit_id OR device.deployed_with_modem_id
|
||||||
|
for unit_id, unit_data in units.items():
|
||||||
|
if unit_data.get("device_type") == "modem" and not unit_data.get("retired"):
|
||||||
|
paired_unit_id = None
|
||||||
|
roster_unit = roster.get(unit_id)
|
||||||
|
|
||||||
|
# First, check if modem has deployed_with_unit_id set
|
||||||
|
if roster_unit and roster_unit.deployed_with_unit_id:
|
||||||
|
paired_unit_id = roster_unit.deployed_with_unit_id
|
||||||
|
else:
|
||||||
|
# Fallback: check if any device has this modem in deployed_with_modem_id
|
||||||
|
for other_id, other_roster in roster.items():
|
||||||
|
if other_roster.deployed_with_modem_id == unit_id:
|
||||||
|
paired_unit_id = other_id
|
||||||
|
break
|
||||||
|
|
||||||
|
if paired_unit_id:
|
||||||
|
paired_unit = units.get(paired_unit_id)
|
||||||
|
if paired_unit:
|
||||||
|
# Inherit status from paired device
|
||||||
|
unit_data["status"] = paired_unit.get("status", "Missing")
|
||||||
|
unit_data["age"] = paired_unit.get("age", "N/A")
|
||||||
|
unit_data["last"] = paired_unit.get("last")
|
||||||
|
unit_data["derived_from"] = paired_unit_id
|
||||||
|
|
||||||
# Separate buckets for UI
|
# Separate buckets for UI
|
||||||
active_units = {
|
active_units = {
|
||||||
uid: u for uid, u in units.items()
|
uid: u for uid, u in units.items()
|
||||||
if not u["retired"] and u["deployed"] and uid not in ignored
|
if not u["retired"] and not u["out_for_calibration"] and u["deployed"] and uid not in ignored
|
||||||
}
|
}
|
||||||
|
|
||||||
benched_units = {
|
benched_units = {
|
||||||
uid: u for uid, u in units.items()
|
uid: u for uid, u in units.items()
|
||||||
if not u["retired"] and not u["deployed"] and uid not in ignored
|
if not u["retired"] and not u["out_for_calibration"] and not u["allocated"] and not u["deployed"] and uid not in ignored
|
||||||
|
}
|
||||||
|
|
||||||
|
allocated_units = {
|
||||||
|
uid: u for uid, u in units.items()
|
||||||
|
if not u["retired"] and not u["out_for_calibration"] and u["allocated"] and not u["deployed"] and uid not in ignored
|
||||||
}
|
}
|
||||||
|
|
||||||
retired_units = {
|
retired_units = {
|
||||||
@@ -126,6 +215,11 @@ def emit_status_snapshot():
|
|||||||
if u["retired"]
|
if u["retired"]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out_for_calibration_units = {
|
||||||
|
uid: u for uid, u in units.items()
|
||||||
|
if u["out_for_calibration"]
|
||||||
|
}
|
||||||
|
|
||||||
# Unknown units - emitters that aren't in the roster and aren't ignored
|
# Unknown units - emitters that aren't in the roster and aren't ignored
|
||||||
unknown_units = {
|
unknown_units = {
|
||||||
uid: u for uid, u in units.items()
|
uid: u for uid, u in units.items()
|
||||||
@@ -137,13 +231,17 @@ def emit_status_snapshot():
|
|||||||
"units": units,
|
"units": units,
|
||||||
"active": active_units,
|
"active": active_units,
|
||||||
"benched": benched_units,
|
"benched": benched_units,
|
||||||
|
"allocated": allocated_units,
|
||||||
"retired": retired_units,
|
"retired": retired_units,
|
||||||
|
"out_for_calibration": out_for_calibration_units,
|
||||||
"unknown": unknown_units,
|
"unknown": unknown_units,
|
||||||
"summary": {
|
"summary": {
|
||||||
"total": len(active_units) + len(benched_units),
|
"total": len(active_units) + len(benched_units) + len(allocated_units),
|
||||||
"active": len(active_units),
|
"active": len(active_units),
|
||||||
"benched": len(benched_units),
|
"benched": len(benched_units),
|
||||||
|
"allocated": len(allocated_units),
|
||||||
"retired": len(retired_units),
|
"retired": len(retired_units),
|
||||||
|
"out_for_calibration": len(out_for_calibration_units),
|
||||||
"unknown": len(unknown_units),
|
"unknown": len(unknown_units),
|
||||||
# Status counts only for deployed units (active_units)
|
# Status counts only for deployed units (active_units)
|
||||||
"ok": sum(1 for u in active_units.values() if u["status"] == "OK"),
|
"ok": sum(1 for u in active_units.values() if u["status"] == "OK"),
|
||||||
|
|||||||
BIN
backend/static/icons/favicon-16.png
Normal file
|
After Width: | Height: | Size: 424 B |
BIN
backend/static/icons/favicon-32.png
Normal file
|
After Width: | Height: | Size: 1.1 KiB |
|
Before Width: | Height: | Size: 1.9 KiB After Width: | Height: | Size: 7.7 KiB |
|
Before Width: | Height: | Size: 2.2 KiB After Width: | Height: | Size: 9.2 KiB |
|
Before Width: | Height: | Size: 2.2 KiB After Width: | Height: | Size: 10 KiB |
|
Before Width: | Height: | Size: 2.9 KiB After Width: | Height: | Size: 15 KiB |
|
Before Width: | Height: | Size: 5.8 KiB After Width: | Height: | Size: 44 KiB |
|
Before Width: | Height: | Size: 7.8 KiB After Width: | Height: | Size: 68 KiB |
|
Before Width: | Height: | Size: 1.1 KiB After Width: | Height: | Size: 3.2 KiB |
|
Before Width: | Height: | Size: 1.4 KiB After Width: | Height: | Size: 5.0 KiB |
@@ -347,6 +347,3 @@ document.addEventListener('DOMContentLoaded', async () => {
|
|||||||
console.error('Failed to initialize offline database:', error);
|
console.error('Failed to initialize offline database:', error);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
// Export for use in other scripts
|
|
||||||
export default OfflineDB;
|
|
||||||
|
|||||||
BIN
backend/static/terra-view-logo-dark.png
Normal file
|
After Width: | Height: | Size: 13 KiB |
BIN
backend/static/terra-view-logo-dark@2x.png
Normal file
|
After Width: | Height: | Size: 57 KiB |
BIN
backend/static/terra-view-logo-light.png
Normal file
|
After Width: | Height: | Size: 14 KiB |
BIN
backend/static/terra-view-logo-light@2x.png
Normal file
|
After Width: | Height: | Size: 49 KiB |
90
backend/templates_config.py
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
"""
|
||||||
|
Shared Jinja2 templates configuration.
|
||||||
|
|
||||||
|
All routers should import `templates` from this module to get consistent
|
||||||
|
filter and global function registration.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json as _json
|
||||||
|
from fastapi.templating import Jinja2Templates
|
||||||
|
|
||||||
|
# Import timezone utilities
|
||||||
|
from backend.utils.timezone import (
|
||||||
|
format_local_datetime, format_local_time,
|
||||||
|
get_user_timezone, get_timezone_abbreviation
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def jinja_local_datetime(dt, fmt="%Y-%m-%d %H:%M"):
|
||||||
|
"""Jinja filter to convert UTC datetime to local timezone."""
|
||||||
|
return format_local_datetime(dt, fmt)
|
||||||
|
|
||||||
|
|
||||||
|
def jinja_local_time(dt):
|
||||||
|
"""Jinja filter to format time in local timezone."""
|
||||||
|
return format_local_time(dt)
|
||||||
|
|
||||||
|
|
||||||
|
def jinja_timezone_abbr():
|
||||||
|
"""Jinja global to get current timezone abbreviation."""
|
||||||
|
return get_timezone_abbreviation()
|
||||||
|
|
||||||
|
|
||||||
|
# Create templates instance
|
||||||
|
templates = Jinja2Templates(directory="templates")
|
||||||
|
|
||||||
|
def jinja_local_date(dt, fmt="%m-%d-%y"):
|
||||||
|
"""Jinja filter: format a UTC datetime as a local date string (e.g. 02-19-26)."""
|
||||||
|
return format_local_datetime(dt, fmt)
|
||||||
|
|
||||||
|
|
||||||
|
def jinja_fromjson(s):
|
||||||
|
"""Jinja filter: parse a JSON string into a dict (returns {} on failure)."""
|
||||||
|
if not s:
|
||||||
|
return {}
|
||||||
|
try:
|
||||||
|
return _json.loads(s)
|
||||||
|
except Exception:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def jinja_same_date(dt1, dt2) -> bool:
|
||||||
|
"""Jinja global: True if two datetimes fall on the same local date."""
|
||||||
|
if not dt1 or not dt2:
|
||||||
|
return False
|
||||||
|
try:
|
||||||
|
d1 = format_local_datetime(dt1, "%Y-%m-%d")
|
||||||
|
d2 = format_local_datetime(dt2, "%Y-%m-%d")
|
||||||
|
return d1 == d2
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def jinja_log_tail_display(s):
|
||||||
|
"""Jinja filter: decode a JSON-encoded log tail array into a plain-text string."""
|
||||||
|
if not s:
|
||||||
|
return ""
|
||||||
|
try:
|
||||||
|
lines = _json.loads(s)
|
||||||
|
if isinstance(lines, list):
|
||||||
|
return "\n".join(str(l) for l in lines)
|
||||||
|
return str(s)
|
||||||
|
except Exception:
|
||||||
|
return str(s)
|
||||||
|
|
||||||
|
|
||||||
|
def jinja_local_datetime_input(dt):
|
||||||
|
"""Jinja filter: format UTC datetime as local YYYY-MM-DDTHH:MM for <input type='datetime-local'>."""
|
||||||
|
return format_local_datetime(dt, "%Y-%m-%dT%H:%M")
|
||||||
|
|
||||||
|
|
||||||
|
# Register Jinja filters and globals
|
||||||
|
templates.env.filters["local_datetime"] = jinja_local_datetime
|
||||||
|
templates.env.filters["local_time"] = jinja_local_time
|
||||||
|
templates.env.filters["local_date"] = jinja_local_date
|
||||||
|
templates.env.filters["local_datetime_input"] = jinja_local_datetime_input
|
||||||
|
templates.env.filters["fromjson"] = jinja_fromjson
|
||||||
|
templates.env.globals["timezone_abbr"] = jinja_timezone_abbr
|
||||||
|
templates.env.globals["get_user_timezone"] = get_user_timezone
|
||||||
|
templates.env.globals["same_date"] = jinja_same_date
|
||||||
|
templates.env.filters["log_tail_display"] = jinja_log_tail_display
|
||||||
1
backend/utils/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# Utils package
|
||||||
173
backend/utils/timezone.py
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
"""
|
||||||
|
Timezone utilities for Terra-View.
|
||||||
|
|
||||||
|
Provides consistent timezone handling throughout the application.
|
||||||
|
All database times are stored in UTC; this module converts for display.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
from zoneinfo import ZoneInfo
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from backend.database import SessionLocal
|
||||||
|
from backend.models import UserPreferences
|
||||||
|
|
||||||
|
|
||||||
|
# Default timezone if none set
|
||||||
|
DEFAULT_TIMEZONE = "America/New_York"
|
||||||
|
|
||||||
|
|
||||||
|
def get_user_timezone() -> str:
|
||||||
|
"""
|
||||||
|
Get the user's configured timezone from preferences.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Timezone string (e.g., "America/New_York")
|
||||||
|
"""
|
||||||
|
db = SessionLocal()
|
||||||
|
try:
|
||||||
|
prefs = db.query(UserPreferences).filter_by(id=1).first()
|
||||||
|
if prefs and prefs.timezone:
|
||||||
|
return prefs.timezone
|
||||||
|
return DEFAULT_TIMEZONE
|
||||||
|
finally:
|
||||||
|
db.close()
|
||||||
|
|
||||||
|
|
||||||
|
def get_timezone_info(tz_name: str = None) -> ZoneInfo:
|
||||||
|
"""
|
||||||
|
Get ZoneInfo object for the specified or user's timezone.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tz_name: Timezone name, or None to use user preference
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ZoneInfo object
|
||||||
|
"""
|
||||||
|
if tz_name is None:
|
||||||
|
tz_name = get_user_timezone()
|
||||||
|
try:
|
||||||
|
return ZoneInfo(tz_name)
|
||||||
|
except Exception:
|
||||||
|
return ZoneInfo(DEFAULT_TIMEZONE)
|
||||||
|
|
||||||
|
|
||||||
|
def utc_to_local(dt: datetime, tz_name: str = None) -> datetime:
|
||||||
|
"""
|
||||||
|
Convert a UTC datetime to local timezone.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dt: Datetime in UTC (naive or aware)
|
||||||
|
tz_name: Target timezone, or None to use user preference
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Datetime in local timezone
|
||||||
|
"""
|
||||||
|
if dt is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
tz = get_timezone_info(tz_name)
|
||||||
|
|
||||||
|
# Assume naive datetime is UTC
|
||||||
|
if dt.tzinfo is None:
|
||||||
|
dt = dt.replace(tzinfo=ZoneInfo("UTC"))
|
||||||
|
|
||||||
|
return dt.astimezone(tz)
|
||||||
|
|
||||||
|
|
||||||
|
def local_to_utc(dt: datetime, tz_name: str = None) -> datetime:
|
||||||
|
"""
|
||||||
|
Convert a local datetime to UTC.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dt: Datetime in local timezone (naive or aware)
|
||||||
|
tz_name: Source timezone, or None to use user preference
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Datetime in UTC (naive, for database storage)
|
||||||
|
"""
|
||||||
|
if dt is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
tz = get_timezone_info(tz_name)
|
||||||
|
|
||||||
|
# Assume naive datetime is in local timezone
|
||||||
|
if dt.tzinfo is None:
|
||||||
|
dt = dt.replace(tzinfo=tz)
|
||||||
|
|
||||||
|
# Convert to UTC and strip tzinfo for database storage
|
||||||
|
return dt.astimezone(ZoneInfo("UTC")).replace(tzinfo=None)
|
||||||
|
|
||||||
|
|
||||||
|
def format_local_datetime(dt: datetime, fmt: str = "%Y-%m-%d %H:%M", tz_name: str = None) -> str:
|
||||||
|
"""
|
||||||
|
Format a UTC datetime as local time string.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dt: Datetime in UTC
|
||||||
|
fmt: strftime format string
|
||||||
|
tz_name: Target timezone, or None to use user preference
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted datetime string in local time
|
||||||
|
"""
|
||||||
|
if dt is None:
|
||||||
|
return "N/A"
|
||||||
|
|
||||||
|
local_dt = utc_to_local(dt, tz_name)
|
||||||
|
return local_dt.strftime(fmt)
|
||||||
|
|
||||||
|
|
||||||
|
def format_local_time(dt: datetime, tz_name: str = None) -> str:
|
||||||
|
"""
|
||||||
|
Format a UTC datetime as local time (HH:MM format).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dt: Datetime in UTC
|
||||||
|
tz_name: Target timezone
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Time string in HH:MM format
|
||||||
|
"""
|
||||||
|
return format_local_datetime(dt, "%H:%M", tz_name)
|
||||||
|
|
||||||
|
|
||||||
|
def format_local_date(dt: datetime, tz_name: str = None) -> str:
|
||||||
|
"""
|
||||||
|
Format a UTC datetime as local date (YYYY-MM-DD format).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dt: Datetime in UTC
|
||||||
|
tz_name: Target timezone
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Date string
|
||||||
|
"""
|
||||||
|
return format_local_datetime(dt, "%Y-%m-%d", tz_name)
|
||||||
|
|
||||||
|
|
||||||
|
def get_timezone_abbreviation(tz_name: str = None) -> str:
|
||||||
|
"""
|
||||||
|
Get the abbreviation for a timezone (e.g., EST, EDT, PST).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tz_name: Timezone name, or None to use user preference
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Timezone abbreviation
|
||||||
|
"""
|
||||||
|
tz = get_timezone_info(tz_name)
|
||||||
|
now = datetime.now(tz)
|
||||||
|
return now.strftime("%Z")
|
||||||
|
|
||||||
|
|
||||||
|
# Common US timezone choices for settings dropdown
|
||||||
|
TIMEZONE_CHOICES = [
|
||||||
|
("America/New_York", "Eastern Time (ET)"),
|
||||||
|
("America/Chicago", "Central Time (CT)"),
|
||||||
|
("America/Denver", "Mountain Time (MT)"),
|
||||||
|
("America/Los_Angeles", "Pacific Time (PT)"),
|
||||||
|
("America/Anchorage", "Alaska Time (AKT)"),
|
||||||
|
("Pacific/Honolulu", "Hawaii Time (HT)"),
|
||||||
|
("UTC", "UTC"),
|
||||||
|
]
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
{
|
|
||||||
"filename": "snapshot_20251216_201738.db",
|
|
||||||
"created_at": "20251216_201738",
|
|
||||||
"created_at_iso": "2025-12-16T20:17:38.638982",
|
|
||||||
"description": "Auto-backup before restore",
|
|
||||||
"size_bytes": 57344,
|
|
||||||
"size_mb": 0.05,
|
|
||||||
"original_db_size_bytes": 57344,
|
|
||||||
"type": "manual"
|
|
||||||
}
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
{
|
|
||||||
"filename": "snapshot_uploaded_20251216_201732.db",
|
|
||||||
"created_at": "20251216_201732",
|
|
||||||
"created_at_iso": "2025-12-16T20:17:32.574205",
|
|
||||||
"description": "Uploaded: snapshot_20251216_200259.db",
|
|
||||||
"size_bytes": 77824,
|
|
||||||
"size_mb": 0.07,
|
|
||||||
"type": "uploaded"
|
|
||||||
}
|
|
||||||
@@ -1,9 +1,7 @@
|
|||||||
services:
|
services:
|
||||||
|
|
||||||
# --- PRODUCTION ---
|
terra-view:
|
||||||
seismo-backend:
|
|
||||||
build: .
|
build: .
|
||||||
container_name: seismo-fleet-manager
|
|
||||||
ports:
|
ports:
|
||||||
- "8001:8001"
|
- "8001:8001"
|
||||||
volumes:
|
volumes:
|
||||||
@@ -11,7 +9,12 @@ services:
|
|||||||
environment:
|
environment:
|
||||||
- PYTHONUNBUFFERED=1
|
- PYTHONUNBUFFERED=1
|
||||||
- ENVIRONMENT=production
|
- ENVIRONMENT=production
|
||||||
|
- SLMM_BASE_URL=http://host.docker.internal:8100
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
depends_on:
|
||||||
|
- slmm
|
||||||
|
extra_hosts:
|
||||||
|
- "host.docker.internal:host-gateway"
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "curl", "-f", "http://localhost:8001/health"]
|
test: ["CMD", "curl", "-f", "http://localhost:8001/health"]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
@@ -19,25 +22,27 @@ services:
|
|||||||
retries: 3
|
retries: 3
|
||||||
start_period: 40s
|
start_period: 40s
|
||||||
|
|
||||||
# --- DEVELOPMENT ---
|
# --- SLMM (Sound Level Meter Manager) ---
|
||||||
sfm-dev:
|
slmm:
|
||||||
build: .
|
build:
|
||||||
container_name: sfm-dev
|
context: ../slmm
|
||||||
ports:
|
dockerfile: Dockerfile
|
||||||
- "1001:8001"
|
network_mode: host
|
||||||
volumes:
|
volumes:
|
||||||
- ./data-dev:/app/data
|
- ../slmm/data:/app/data
|
||||||
environment:
|
environment:
|
||||||
- PYTHONUNBUFFERED=1
|
- PYTHONUNBUFFERED=1
|
||||||
- ENVIRONMENT=development
|
- PORT=8100
|
||||||
|
- CORS_ORIGINS=*
|
||||||
|
- TCP_IDLE_TTL=-1
|
||||||
|
- TCP_MAX_AGE=-1
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "curl", "-f", "http://localhost:8001/health"]
|
test: ["CMD", "curl", "-f", "http://localhost:8100/health"]
|
||||||
interval: 30s
|
interval: 30s
|
||||||
timeout: 10s
|
timeout: 10s
|
||||||
retries: 3
|
retries: 3
|
||||||
start_period: 40s
|
start_period: 10s
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
data:
|
data:
|
||||||
data-dev:
|
|
||||||
|
|||||||
276
docs/DEVICE_TYPE_DASHBOARDS.md
Normal file
@@ -0,0 +1,276 @@
|
|||||||
|
# Device Type Dashboards
|
||||||
|
|
||||||
|
This document describes the separate dashboard system for different device types in SFM.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
SFM now has dedicated dashboards for each device type:
|
||||||
|
- **Main Dashboard** (`/`) - Combined overview of all devices
|
||||||
|
- **Seismographs Dashboard** (`/seismographs`) - Seismograph-specific view
|
||||||
|
- **Sound Level Meters Dashboard** (`/sound-level-meters`) - SLM-specific view
|
||||||
|
- **Fleet Roster** (`/roster`) - All devices with filtering and management
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
### 1. Main Dashboard
|
||||||
|
|
||||||
|
**Route**: `/`
|
||||||
|
**Template**: [templates/dashboard.html](../templates/dashboard.html)
|
||||||
|
**Purpose**: Combined overview showing all device types
|
||||||
|
|
||||||
|
**Features**:
|
||||||
|
- Fleet summary card now includes device type breakdown
|
||||||
|
- Shows count of seismographs and SLMs separately
|
||||||
|
- Links to dedicated dashboards for each device type
|
||||||
|
- Shared components: map, alerts, recent photos, fleet status
|
||||||
|
|
||||||
|
**Device Type Counts**:
|
||||||
|
The dashboard calculates device type counts in JavaScript from the snapshot data:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Device type counts
|
||||||
|
let seismoCount = 0;
|
||||||
|
let slmCount = 0;
|
||||||
|
Object.values(data.units || {}).forEach(unit => {
|
||||||
|
if (unit.retired) return; // Don't count retired units
|
||||||
|
const deviceType = unit.device_type || 'seismograph';
|
||||||
|
if (deviceType === 'seismograph') {
|
||||||
|
seismoCount++;
|
||||||
|
} else if (deviceType === 'sound_level_meter') {
|
||||||
|
slmCount++;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Seismographs Dashboard
|
||||||
|
|
||||||
|
**Route**: `/seismographs`
|
||||||
|
**Template**: [templates/seismographs.html](../templates/seismographs.html)
|
||||||
|
**Router**: [backend/routers/seismo_dashboard.py](../backend/routers/seismo_dashboard.py)
|
||||||
|
|
||||||
|
**API Endpoints**:
|
||||||
|
- `GET /api/seismo-dashboard/stats` - Statistics summary (HTML partial)
|
||||||
|
- `GET /api/seismo-dashboard/units?search=<query>` - Unit list with search (HTML partial)
|
||||||
|
|
||||||
|
**Features**:
|
||||||
|
- Statistics cards (total, deployed, benched, with/without modem)
|
||||||
|
- Searchable unit list with real-time filtering
|
||||||
|
- Shows modem assignments
|
||||||
|
- Links to individual unit detail pages
|
||||||
|
|
||||||
|
**Stats Calculation** ([backend/routers/seismo_dashboard.py:18](../backend/routers/seismo_dashboard.py#L18)):
|
||||||
|
|
||||||
|
```python
|
||||||
|
seismos = db.query(RosterUnit).filter_by(
|
||||||
|
device_type="seismograph",
|
||||||
|
retired=False
|
||||||
|
).all()
|
||||||
|
|
||||||
|
total = len(seismos)
|
||||||
|
deployed = sum(1 for s in seismos if s.deployed)
|
||||||
|
benched = sum(1 for s in seismos if not s.deployed)
|
||||||
|
with_modem = sum(1 for s in seismos if s.deployed and s.deployed_with_modem_id)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Sound Level Meters Dashboard
|
||||||
|
|
||||||
|
**Route**: `/sound-level-meters`
|
||||||
|
**Template**: [templates/sound_level_meters.html](../templates/sound_level_meters.html)
|
||||||
|
**Router**: [backend/routers/slm_dashboard.py](../backend/routers/slm_dashboard.py)
|
||||||
|
|
||||||
|
**API Endpoints**:
|
||||||
|
- `GET /api/slm-dashboard/stats` - Statistics summary (HTML partial)
|
||||||
|
- `GET /api/slm-dashboard/units?search=<query>` - Unit list with search (HTML partial)
|
||||||
|
- `GET /api/slm-dashboard/live-view/{unit_id}` - Live view panel (HTML partial)
|
||||||
|
|
||||||
|
**Features**:
|
||||||
|
- Statistics cards (total, deployed, benched, measuring)
|
||||||
|
- Searchable unit list
|
||||||
|
- Live view panel with real-time measurement data
|
||||||
|
- WebSocket integration for DRD streaming
|
||||||
|
- Shows modem assignments and IP resolution
|
||||||
|
|
||||||
|
See [SOUND_LEVEL_METERS_DASHBOARD.md](SOUND_LEVEL_METERS_DASHBOARD.md) for detailed SLM dashboard documentation.
|
||||||
|
|
||||||
|
## Navigation
|
||||||
|
|
||||||
|
The sidebar navigation ([templates/base.html:116-128](../templates/base.html#L116-L128)) includes links to both dashboards:
|
||||||
|
|
||||||
|
```html
|
||||||
|
<a href="/seismographs">
|
||||||
|
<svg>...</svg>
|
||||||
|
Seismographs
|
||||||
|
</a>
|
||||||
|
|
||||||
|
<a href="/sound-level-meters">
|
||||||
|
<svg>...</svg>
|
||||||
|
Sound Level Meters
|
||||||
|
</a>
|
||||||
|
```
|
||||||
|
|
||||||
|
Active page highlighting is automatic based on `request.url.path`.
|
||||||
|
|
||||||
|
## Database Queries
|
||||||
|
|
||||||
|
All dashboards filter by device type using SQLAlchemy:
|
||||||
|
|
||||||
|
### Seismographs Query
|
||||||
|
```python
|
||||||
|
seismos = db.query(RosterUnit).filter_by(
|
||||||
|
device_type="seismograph",
|
||||||
|
retired=False
|
||||||
|
).all()
|
||||||
|
```
|
||||||
|
|
||||||
|
### Sound Level Meters Query
|
||||||
|
```python
|
||||||
|
slms = db.query(RosterUnit).filter_by(
|
||||||
|
device_type="slm",
|
||||||
|
retired=False
|
||||||
|
).all()
|
||||||
|
```
|
||||||
|
|
||||||
|
### With Search Filter
|
||||||
|
```python
|
||||||
|
query = db.query(RosterUnit).filter_by(
|
||||||
|
device_type="seismograph",
|
||||||
|
retired=False
|
||||||
|
)
|
||||||
|
|
||||||
|
if search:
|
||||||
|
query = query.filter(
|
||||||
|
(RosterUnit.id.ilike(f"%{search}%")) |
|
||||||
|
(RosterUnit.note.ilike(f"%{search}%")) |
|
||||||
|
(RosterUnit.address.ilike(f"%{search}%"))
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
## UI Components
|
||||||
|
|
||||||
|
### Stats Cards (Partials)
|
||||||
|
|
||||||
|
**Seismograph Stats**: [templates/partials/seismo_stats.html](../templates/partials/seismo_stats.html)
|
||||||
|
- Total Seismographs
|
||||||
|
- Deployed
|
||||||
|
- Benched
|
||||||
|
- With Modem (showing X / Y deployed)
|
||||||
|
|
||||||
|
**SLM Stats**: [templates/partials/slm_stats.html](../templates/partials/slm_stats.html)
|
||||||
|
- Total SLMs
|
||||||
|
- Deployed
|
||||||
|
- Benched
|
||||||
|
- Currently Measuring (from live status)
|
||||||
|
|
||||||
|
### Unit Lists (Partials)
|
||||||
|
|
||||||
|
**Seismograph List**: [templates/partials/seismo_unit_list.html](../templates/partials/seismo_unit_list.html)
|
||||||
|
|
||||||
|
Table columns:
|
||||||
|
- Unit ID (link to detail page)
|
||||||
|
- Status (Deployed/Benched badge)
|
||||||
|
- Modem (link to modem unit)
|
||||||
|
- Location (address or coordinates)
|
||||||
|
- Notes
|
||||||
|
- Actions (View Details link)
|
||||||
|
|
||||||
|
**SLM List**: [templates/partials/slm_unit_list.html](../templates/partials/slm_unit_list.html)
|
||||||
|
|
||||||
|
Table columns:
|
||||||
|
- Unit ID (link to detail page)
|
||||||
|
- Model (NL-43, NL-53)
|
||||||
|
- Status (Deployed/Benched badge)
|
||||||
|
- Modem (link to modem unit)
|
||||||
|
- Location
|
||||||
|
- Actions (View Details, Live View)
|
||||||
|
|
||||||
|
## HTMX Integration
|
||||||
|
|
||||||
|
Both dashboards use HTMX for dynamic updates:
|
||||||
|
|
||||||
|
### Auto-refresh Stats
|
||||||
|
```html
|
||||||
|
<div hx-get="/api/seismo-dashboard/stats"
|
||||||
|
hx-trigger="load, every 30s"
|
||||||
|
hx-swap="innerHTML">
|
||||||
|
```
|
||||||
|
|
||||||
|
### Search with Debouncing
|
||||||
|
```html
|
||||||
|
<input type="text"
|
||||||
|
hx-get="/api/seismo-dashboard/units"
|
||||||
|
hx-trigger="keyup changed delay:300ms"
|
||||||
|
hx-target="#seismo-units-list"
|
||||||
|
name="search" />
|
||||||
|
```
|
||||||
|
|
||||||
|
## Adding New Device Types
|
||||||
|
|
||||||
|
To add support for a new device type (e.g., "modem"):
|
||||||
|
|
||||||
|
1. **Create Router** (`backend/routers/modem_dashboard.py`):
|
||||||
|
```python
|
||||||
|
@router.get("/stats", response_class=HTMLResponse)
|
||||||
|
async def get_modem_stats(request: Request, db: Session = Depends(get_db)):
|
||||||
|
modems = db.query(RosterUnit).filter_by(
|
||||||
|
device_type="modem",
|
||||||
|
retired=False
|
||||||
|
).all()
|
||||||
|
# Calculate stats and return partial
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Create Templates**:
|
||||||
|
- `templates/modems.html` - Main dashboard page
|
||||||
|
- `templates/partials/modem_stats.html` - Stats cards
|
||||||
|
- `templates/partials/modem_unit_list.html` - Unit list table
|
||||||
|
|
||||||
|
3. **Register in main.py**:
|
||||||
|
```python
|
||||||
|
from backend.routers import modem_dashboard
|
||||||
|
app.include_router(modem_dashboard.router)
|
||||||
|
|
||||||
|
@app.get("/modems", response_class=HTMLResponse)
|
||||||
|
async def modems_page(request: Request):
|
||||||
|
return templates.TemplateResponse("modems.html", {"request": request})
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **Add to Navigation** (`templates/base.html`):
|
||||||
|
```html
|
||||||
|
<a href="/modems">
|
||||||
|
<svg>...</svg>
|
||||||
|
Modems
|
||||||
|
</a>
|
||||||
|
```
|
||||||
|
|
||||||
|
5. **Update Main Dashboard** (`templates/dashboard.html`):
|
||||||
|
Add modem count to the device type breakdown:
|
||||||
|
```html
|
||||||
|
<div class="flex justify-between items-center">
|
||||||
|
<a href="/modems">Modems</a>
|
||||||
|
<span id="modem-count">--</span>
|
||||||
|
</div>
|
||||||
|
```
|
||||||
|
|
||||||
|
Update JavaScript to count modems:
|
||||||
|
```javascript
|
||||||
|
let modemCount = 0;
|
||||||
|
Object.values(data.units || {}).forEach(unit => {
|
||||||
|
if (unit.device_type === 'modem' && !unit.retired) {
|
||||||
|
modemCount++;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
document.getElementById('modem-count').textContent = modemCount;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Benefits
|
||||||
|
|
||||||
|
1. **Separation of Concerns**: Each device type has its own dedicated interface
|
||||||
|
2. **Scalability**: Easy to add new device types following the established pattern
|
||||||
|
3. **Performance**: Queries are filtered by device type, reducing data transfer
|
||||||
|
4. **User Experience**: Users can focus on specific device types without clutter
|
||||||
|
5. **Maintainability**: Each dashboard is self-contained and easy to modify
|
||||||
|
|
||||||
|
## Related Documentation
|
||||||
|
|
||||||
|
- [SOUND_LEVEL_METERS_DASHBOARD.md](SOUND_LEVEL_METERS_DASHBOARD.md) - SLM dashboard details
|
||||||
|
- [DEVICE_TYPE_SLM_SUPPORT.md](DEVICE_TYPE_SLM_SUPPORT.md) - Adding SLM device type support
|
||||||
|
- [MODEM_INTEGRATION.md](MODEM_INTEGRATION.md) - Modem assignment architecture
|
||||||
288
docs/DEVICE_TYPE_SCHEMA.md
Normal file
@@ -0,0 +1,288 @@
|
|||||||
|
# Device Type Schema - Terra-View
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Terra-View uses a single roster table to manage three different device types. The `device_type` field is the primary discriminator that determines which fields are relevant for each unit.
|
||||||
|
|
||||||
|
## Official device_type Values
|
||||||
|
|
||||||
|
As of **Terra-View v0.4.3**, the following device_type values are standardized:
|
||||||
|
|
||||||
|
### 1. `"seismograph"` (Default)
|
||||||
|
**Purpose**: Seismic monitoring devices
|
||||||
|
|
||||||
|
**Applicable Fields**:
|
||||||
|
- Common: id, unit_type, deployed, retired, note, project_id, location, address, coordinates
|
||||||
|
- Specific: last_calibrated, next_calibration_due, deployed_with_modem_id
|
||||||
|
|
||||||
|
**Examples**:
|
||||||
|
- `BE1234` - Series 3 seismograph
|
||||||
|
- `UM12345` - Series 4 Micromate unit
|
||||||
|
- `SEISMO-001` - Custom seismograph
|
||||||
|
|
||||||
|
**Unit Type Values**:
|
||||||
|
- `series3` - Series 3 devices (default)
|
||||||
|
- `series4` - Series 4 devices
|
||||||
|
- `micromate` - Micromate devices
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 2. `"modem"`
|
||||||
|
**Purpose**: Field modems and network equipment
|
||||||
|
|
||||||
|
**Applicable Fields**:
|
||||||
|
- Common: id, unit_type, deployed, retired, note, project_id, location, address, coordinates
|
||||||
|
- Specific: ip_address, phone_number, hardware_model
|
||||||
|
|
||||||
|
**Examples**:
|
||||||
|
- `MDM001` - Field modem
|
||||||
|
- `MODEM-2025-01` - Network modem
|
||||||
|
- `RAVEN-XTV-01` - Specific modem model
|
||||||
|
|
||||||
|
**Unit Type Values**:
|
||||||
|
- `modem` - Generic modem
|
||||||
|
- `raven-xtv` - Raven XTV model
|
||||||
|
- Custom values for specific hardware
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3. `"slm"` ⭐
|
||||||
|
**Purpose**: Sound level meters (Rion NL-43/NL-53)
|
||||||
|
|
||||||
|
**Applicable Fields**:
|
||||||
|
- Common: id, unit_type, deployed, retired, note, project_id, location, address, coordinates
|
||||||
|
- Specific: slm_host, slm_tcp_port, slm_ftp_port, slm_model, slm_serial_number, slm_frequency_weighting, slm_time_weighting, slm_measurement_range, slm_last_check, deployed_with_modem_id
|
||||||
|
|
||||||
|
**Examples**:
|
||||||
|
- `SLM-43-01` - NL-43 sound level meter
|
||||||
|
- `NL43-001` - NL-43 unit
|
||||||
|
- `NL53-002` - NL-53 unit
|
||||||
|
|
||||||
|
**Unit Type Values**:
|
||||||
|
- `nl43` - Rion NL-43 model
|
||||||
|
- `nl53` - Rion NL-53 model
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Migration from Legacy Values
|
||||||
|
|
||||||
|
### Deprecated Values
|
||||||
|
|
||||||
|
The following device_type values have been **deprecated** and should be migrated:
|
||||||
|
|
||||||
|
- ❌ `"sound_level_meter"` → ✅ `"slm"`
|
||||||
|
|
||||||
|
### How to Migrate
|
||||||
|
|
||||||
|
Run the standardization migration script to update existing databases:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd /home/serversdown/tmi/terra-view
|
||||||
|
python3 backend/migrate_standardize_device_types.py
|
||||||
|
```
|
||||||
|
|
||||||
|
This script:
|
||||||
|
- Converts all `"sound_level_meter"` values to `"slm"`
|
||||||
|
- Is idempotent (safe to run multiple times)
|
||||||
|
- Shows before/after distribution of device types
|
||||||
|
- No data loss
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Database Schema
|
||||||
|
|
||||||
|
### RosterUnit Model (`backend/models.py`)
|
||||||
|
|
||||||
|
```python
|
||||||
|
class RosterUnit(Base):
|
||||||
|
"""
|
||||||
|
Supports multiple device types:
|
||||||
|
- "seismograph" - Seismic monitoring devices (default)
|
||||||
|
- "modem" - Field modems and network equipment
|
||||||
|
- "slm" - Sound level meters (NL-43/NL-53)
|
||||||
|
"""
|
||||||
|
__tablename__ = "roster"
|
||||||
|
|
||||||
|
# Core fields (all device types)
|
||||||
|
id = Column(String, primary_key=True)
|
||||||
|
unit_type = Column(String, default="series3")
|
||||||
|
device_type = Column(String, default="seismograph") # "seismograph" | "modem" | "slm"
|
||||||
|
deployed = Column(Boolean, default=True)
|
||||||
|
retired = Column(Boolean, default=False)
|
||||||
|
# ... other common fields
|
||||||
|
|
||||||
|
# Seismograph-specific
|
||||||
|
last_calibrated = Column(Date, nullable=True)
|
||||||
|
next_calibration_due = Column(Date, nullable=True)
|
||||||
|
|
||||||
|
# Modem-specific
|
||||||
|
ip_address = Column(String, nullable=True)
|
||||||
|
phone_number = Column(String, nullable=True)
|
||||||
|
hardware_model = Column(String, nullable=True)
|
||||||
|
|
||||||
|
# SLM-specific
|
||||||
|
slm_host = Column(String, nullable=True)
|
||||||
|
slm_tcp_port = Column(Integer, nullable=True)
|
||||||
|
slm_ftp_port = Column(Integer, nullable=True)
|
||||||
|
slm_model = Column(String, nullable=True)
|
||||||
|
slm_serial_number = Column(String, nullable=True)
|
||||||
|
slm_frequency_weighting = Column(String, nullable=True)
|
||||||
|
slm_time_weighting = Column(String, nullable=True)
|
||||||
|
slm_measurement_range = Column(String, nullable=True)
|
||||||
|
slm_last_check = Column(DateTime, nullable=True)
|
||||||
|
|
||||||
|
# Shared fields (seismograph + SLM)
|
||||||
|
deployed_with_modem_id = Column(String, nullable=True) # FK to modem
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## API Usage
|
||||||
|
|
||||||
|
### Adding a New Unit
|
||||||
|
|
||||||
|
**Seismograph**:
|
||||||
|
```bash
|
||||||
|
curl -X POST http://localhost:8001/api/roster/add \
|
||||||
|
-F "id=BE1234" \
|
||||||
|
-F "device_type=seismograph" \
|
||||||
|
-F "unit_type=series3" \
|
||||||
|
-F "deployed=true"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Modem**:
|
||||||
|
```bash
|
||||||
|
curl -X POST http://localhost:8001/api/roster/add \
|
||||||
|
-F "id=MDM001" \
|
||||||
|
-F "device_type=modem" \
|
||||||
|
-F "ip_address=192.0.2.10" \
|
||||||
|
-F "phone_number=+1-555-0100"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Sound Level Meter**:
|
||||||
|
```bash
|
||||||
|
curl -X POST http://localhost:8001/api/roster/add \
|
||||||
|
-F "id=SLM-43-01" \
|
||||||
|
-F "device_type=slm" \
|
||||||
|
-F "slm_host=63.45.161.30" \
|
||||||
|
-F "slm_tcp_port=2255" \
|
||||||
|
-F "slm_model=NL-43"
|
||||||
|
```
|
||||||
|
|
||||||
|
### CSV Import Format
|
||||||
|
|
||||||
|
```csv
|
||||||
|
unit_id,unit_type,device_type,deployed,slm_host,slm_tcp_port,slm_model
|
||||||
|
SLM-43-01,nl43,slm,true,63.45.161.30,2255,NL-43
|
||||||
|
SLM-43-02,nl43,slm,true,63.45.161.31,2255,NL-43
|
||||||
|
BE1234,series3,seismograph,true,,,
|
||||||
|
MDM001,modem,modem,true,,,
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Frontend Behavior
|
||||||
|
|
||||||
|
### Device Type Selection
|
||||||
|
|
||||||
|
**Templates**: `unit_detail.html`, `roster.html`
|
||||||
|
|
||||||
|
```html
|
||||||
|
<select name="device_type">
|
||||||
|
<option value="seismograph">Seismograph</option>
|
||||||
|
<option value="modem">Modem</option>
|
||||||
|
<option value="slm">Sound Level Meter</option>
|
||||||
|
</select>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Conditional Field Display
|
||||||
|
|
||||||
|
JavaScript functions check `device_type` to show/hide relevant fields:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function toggleDetailFields() {
|
||||||
|
const deviceType = document.getElementById('device_type').value;
|
||||||
|
|
||||||
|
if (deviceType === 'seismograph') {
|
||||||
|
// Show calibration fields
|
||||||
|
} else if (deviceType === 'modem') {
|
||||||
|
// Show network fields
|
||||||
|
} else if (deviceType === 'slm') {
|
||||||
|
// Show SLM configuration fields
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Code Conventions
|
||||||
|
|
||||||
|
### Always Use Lowercase
|
||||||
|
|
||||||
|
✅ **Correct**:
|
||||||
|
```python
|
||||||
|
if unit.device_type == "slm":
|
||||||
|
# Handle sound level meter
|
||||||
|
```
|
||||||
|
|
||||||
|
❌ **Incorrect**:
|
||||||
|
```python
|
||||||
|
if unit.device_type == "SLM": # Wrong - case sensitive
|
||||||
|
if unit.device_type == "sound_level_meter": # Deprecated
|
||||||
|
```
|
||||||
|
|
||||||
|
### Query Patterns
|
||||||
|
|
||||||
|
**Filter by device type**:
|
||||||
|
```python
|
||||||
|
# Get all SLMs
|
||||||
|
slms = db.query(RosterUnit).filter_by(device_type="slm").all()
|
||||||
|
|
||||||
|
# Get deployed seismographs
|
||||||
|
seismos = db.query(RosterUnit).filter_by(
|
||||||
|
device_type="seismograph",
|
||||||
|
deployed=True
|
||||||
|
).all()
|
||||||
|
|
||||||
|
# Get all modems
|
||||||
|
modems = db.query(RosterUnit).filter_by(device_type="modem").all()
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
### Verify Device Type Distribution
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Quick check
|
||||||
|
sqlite3 data/seismo_fleet.db "SELECT device_type, COUNT(*) FROM roster GROUP BY device_type;"
|
||||||
|
|
||||||
|
# Detailed view
|
||||||
|
sqlite3 data/seismo_fleet.db "SELECT id, device_type, unit_type, deployed FROM roster ORDER BY device_type, id;"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Check for Legacy Values
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Should return 0 rows after migration
|
||||||
|
sqlite3 data/seismo_fleet.db "SELECT id FROM roster WHERE device_type = 'sound_level_meter';"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Version History
|
||||||
|
|
||||||
|
- **v0.4.3** (2026-01-16) - Standardized device_type values, deprecated `"sound_level_meter"` → `"slm"`
|
||||||
|
- **v0.4.0** (2026-01-05) - Added SLM support with `"sound_level_meter"` value
|
||||||
|
- **v0.2.0** (2025-12-03) - Added modem device type
|
||||||
|
- **v0.1.0** (2024-11-20) - Initial release with seismograph-only support
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Related Documentation
|
||||||
|
|
||||||
|
- [README.md](../README.md) - Main project documentation with data model
|
||||||
|
- [DEVICE_TYPE_SLM_SUPPORT.md](DEVICE_TYPE_SLM_SUPPORT.md) - Legacy SLM implementation notes
|
||||||
|
- [SOUND_LEVEL_METERS_DASHBOARD.md](SOUND_LEVEL_METERS_DASHBOARD.md) - SLM dashboard features
|
||||||
|
- [SLM_CONFIGURATION.md](SLM_CONFIGURATION.md) - SLM device configuration guide
|
||||||
161
docs/DEVICE_TYPE_SLM_SUPPORT.md
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
# Sound Level Meter Device Type Support
|
||||||
|
|
||||||
|
**⚠️ IMPORTANT**: This documentation uses the legacy `sound_level_meter` device type value. As of v0.4.3, the standardized value is `"slm"`. Run `backend/migrate_standardize_device_types.py` to update your database.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Added full support for "Sound Level Meter" as a device type in the roster management system. Users can now create, edit, and manage SLM units through the Fleet Roster interface.
|
||||||
|
|
||||||
|
## Changes Made
|
||||||
|
|
||||||
|
### 1. Frontend - Unit Detail Page
|
||||||
|
|
||||||
|
**File**: `templates/unit_detail.html`
|
||||||
|
|
||||||
|
#### Added Device Type Option
|
||||||
|
- Added "Sound Level Meter" option to device type dropdown (line 243)
|
||||||
|
- Value: `sound_level_meter`
|
||||||
|
|
||||||
|
#### Added SLM-Specific Fields Section (lines 320-370)
|
||||||
|
New form fields for Sound Level Meter configuration:
|
||||||
|
|
||||||
|
- **Host (IP Address)**: Device network address
|
||||||
|
- Field name: `slm_host`
|
||||||
|
- Example: `192.168.1.100`
|
||||||
|
|
||||||
|
- **TCP Port**: Control port (default 2255)
|
||||||
|
- Field name: `slm_tcp_port`
|
||||||
|
- Type: number
|
||||||
|
|
||||||
|
- **Model**: Device model designation
|
||||||
|
- Field name: `slm_model`
|
||||||
|
- Example: `NL-43`, `NL-53`
|
||||||
|
|
||||||
|
- **Serial Number**: Manufacturer serial number
|
||||||
|
- Field name: `slm_serial_number`
|
||||||
|
|
||||||
|
- **Frequency Weighting**: Sound measurement weighting curve
|
||||||
|
- Field name: `slm_frequency_weighting`
|
||||||
|
- Options: A-weighting, C-weighting, Z-weighting (Flat)
|
||||||
|
|
||||||
|
- **Time Weighting**: Temporal averaging method
|
||||||
|
- Field name: `slm_time_weighting`
|
||||||
|
- Options: Fast (125ms), Slow (1s), Impulse (35ms)
|
||||||
|
|
||||||
|
- **Measurement Range**: Device measurement capability
|
||||||
|
- Field name: `slm_measurement_range`
|
||||||
|
- Example: `30-130 dB`
|
||||||
|
|
||||||
|
#### Updated JavaScript Functions
|
||||||
|
|
||||||
|
**toggleDetailFields()** (lines 552-571)
|
||||||
|
- Now handles three device types: seismograph, modem, sound_level_meter
|
||||||
|
- Hides all device-specific sections, then shows only the relevant one
|
||||||
|
- Shows `slmFields` section when device type is `sound_level_meter`
|
||||||
|
|
||||||
|
**populateEditForm()** (lines 527-558)
|
||||||
|
- Populates all 7 SLM-specific fields from unit data
|
||||||
|
- Sets empty string as default if field is null
|
||||||
|
|
||||||
|
### 2. Backend - API Endpoints
|
||||||
|
|
||||||
|
**File**: `backend/routers/roster_edit.py`
|
||||||
|
|
||||||
|
#### Updated Add Unit Endpoint
|
||||||
|
`POST /api/roster/add`
|
||||||
|
|
||||||
|
**New Parameters** (lines 61-67):
|
||||||
|
- `slm_host`: str (optional)
|
||||||
|
- `slm_tcp_port`: int (optional)
|
||||||
|
- `slm_model`: str (optional)
|
||||||
|
- `slm_serial_number`: str (optional)
|
||||||
|
- `slm_frequency_weighting`: str (optional)
|
||||||
|
- `slm_time_weighting`: str (optional)
|
||||||
|
- `slm_measurement_range`: str (optional)
|
||||||
|
|
||||||
|
**Unit Creation** (lines 108-115):
|
||||||
|
All SLM fields are set when creating new unit.
|
||||||
|
|
||||||
|
#### Updated Get Unit Endpoint
|
||||||
|
`GET /api/roster/{unit_id}`
|
||||||
|
|
||||||
|
**New Response Fields** (lines 146-152):
|
||||||
|
Returns all 7 SLM fields in the response, with empty string as default if null.
|
||||||
|
|
||||||
|
#### Updated Edit Unit Endpoint
|
||||||
|
`POST /api/roster/edit/{unit_id}`
|
||||||
|
|
||||||
|
**New Parameters** (lines 177-183):
|
||||||
|
Same 7 SLM-specific parameters as add endpoint.
|
||||||
|
|
||||||
|
**Unit Update** (lines 232-239):
|
||||||
|
All SLM fields are updated when editing existing unit.
|
||||||
|
|
||||||
|
### 3. Database Schema
|
||||||
|
|
||||||
|
**File**: `backend/models.py`
|
||||||
|
|
||||||
|
The database schema already included SLM fields (no changes needed):
|
||||||
|
- All fields are nullable to support multiple device types
|
||||||
|
- Fields are only relevant when `device_type = "slm"`
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Creating a New SLM Unit
|
||||||
|
|
||||||
|
1. Go to Fleet Roster page
|
||||||
|
2. Click "Add Unit" or edit an existing unit
|
||||||
|
3. Select "Sound Level Meter" from Device Type dropdown
|
||||||
|
4. Fill in SLM-specific fields (Host, Port, Model, etc.)
|
||||||
|
5. Save
|
||||||
|
|
||||||
|
### Converting Existing Unit to SLM
|
||||||
|
|
||||||
|
1. Open unit detail page
|
||||||
|
2. Click "Edit Unit"
|
||||||
|
3. Change Device Type to "Sound Level Meter"
|
||||||
|
4. SLM fields section will appear
|
||||||
|
5. Fill in required SLM configuration
|
||||||
|
6. Save changes
|
||||||
|
|
||||||
|
### Field Visibility
|
||||||
|
|
||||||
|
The form automatically shows/hides relevant fields based on device type:
|
||||||
|
- **Seismograph**: Shows calibration dates, modem deployment info
|
||||||
|
- **Modem**: Shows IP, phone number, hardware model
|
||||||
|
- **Sound Level Meter**: Shows host, port, model, serial, weightings, range
|
||||||
|
|
||||||
|
## Integration with SLMM Dashboard
|
||||||
|
|
||||||
|
Units with `device_type = "slm"` will:
|
||||||
|
- Appear in the Sound Level Meters dashboard (`/sound-level-meters`)
|
||||||
|
- Be available for live monitoring and control
|
||||||
|
- Use the configured `slm_host` and `slm_tcp_port` for device communication
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
Test SLM units have been added via `add_test_slms.py`:
|
||||||
|
- `nl43-001` - Deployed at construction site A
|
||||||
|
- `nl43-002` - Deployed at construction site B
|
||||||
|
- `nl53-001` - Deployed at residential area
|
||||||
|
- `nl43-003` - Benched for calibration
|
||||||
|
|
||||||
|
You can edit any of these units to verify the form works correctly.
|
||||||
|
|
||||||
|
## Files Modified
|
||||||
|
|
||||||
|
1. `templates/unit_detail.html` - Added dropdown option, SLM fields section, updated JavaScript
|
||||||
|
2. `backend/routers/roster_edit.py` - Added SLM parameters to add/edit/get endpoints
|
||||||
|
3. `backend/models.py` - No changes (schema already supported SLM)
|
||||||
|
|
||||||
|
## Backward Compatibility
|
||||||
|
|
||||||
|
- Existing seismograph and modem units are unaffected
|
||||||
|
- All SLM fields are optional/nullable
|
||||||
|
- Forms gracefully handle units with missing device_type (defaults to seismograph)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Version**: 1.0.0
|
||||||
|
**Date**: January 5, 2026
|
||||||
|
**Related**: SOUND_LEVEL_METERS_DASHBOARD.md
|
||||||
132
docs/DEV_DATABASE_SETUP.md
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
# DEV Database Setup Instructions
|
||||||
|
|
||||||
|
## Current Situation
|
||||||
|
|
||||||
|
The test SLM and modem data was accidentally added to the **PRODUCTION** database (`data/seismo_fleet.db`).
|
||||||
|
|
||||||
|
**Good news**: I've already removed it! The production database is clean.
|
||||||
|
|
||||||
|
**Issue**: The DEV database (`data-dev/seismo_fleet.db`) is:
|
||||||
|
1. Owned by root (read-only for your user)
|
||||||
|
2. Missing the SLM-specific columns in its schema
|
||||||
|
|
||||||
|
## What You Need to Do
|
||||||
|
|
||||||
|
### Step 1: Fix DEV Database Permissions
|
||||||
|
|
||||||
|
Run this command to make the DEV database writable:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd /home/serversdown/sfm/seismo-fleet-manager
|
||||||
|
sudo chown serversdown:serversdown data-dev/seismo_fleet.db
|
||||||
|
sudo chmod 664 data-dev/seismo_fleet.db
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Migrate DEV Database Schema
|
||||||
|
|
||||||
|
Add the SLM columns to the DEV database:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python3 scripts/migrate_dev_db.py
|
||||||
|
```
|
||||||
|
|
||||||
|
This will add these columns to the `roster` table:
|
||||||
|
- `slm_host`
|
||||||
|
- `slm_tcp_port`
|
||||||
|
- `slm_model`
|
||||||
|
- `slm_serial_number`
|
||||||
|
- `slm_frequency_weighting`
|
||||||
|
- `slm_time_weighting`
|
||||||
|
- `slm_measurement_range`
|
||||||
|
- `slm_last_check`
|
||||||
|
|
||||||
|
### Step 3: Add Test Data to DEV
|
||||||
|
|
||||||
|
Now you can safely add test data to the DEV database:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Add test SLMs
|
||||||
|
python3 scripts/add_test_slms.py
|
||||||
|
|
||||||
|
# Add test modems and assign to SLMs
|
||||||
|
python3 scripts/add_test_modems.py
|
||||||
|
```
|
||||||
|
|
||||||
|
This will create:
|
||||||
|
- 4 test SLM units (nl43-001, nl43-002, nl53-001, nl43-003)
|
||||||
|
- 4 test modem units (modem-001, modem-002, modem-003, modem-004)
|
||||||
|
- Assign modems to the SLMs
|
||||||
|
|
||||||
|
## Production Database Status
|
||||||
|
|
||||||
|
✅ **Production database is CLEAN** - all test data has been removed.
|
||||||
|
|
||||||
|
The production database (`data/seismo_fleet.db`) is ready for real production use.
|
||||||
|
|
||||||
|
## Test Scripts
|
||||||
|
|
||||||
|
All test scripts have been updated to use the DEV database:
|
||||||
|
|
||||||
|
### Scripts Updated:
|
||||||
|
1. `scripts/add_test_slms.py` - Now uses `data-dev/seismo_fleet.db`
|
||||||
|
2. `scripts/add_test_modems.py` - Now uses `data-dev/seismo_fleet.db`
|
||||||
|
|
||||||
|
### Scripts Created:
|
||||||
|
1. `scripts/remove_test_data_from_prod.py` - Removes test data from production (already run)
|
||||||
|
2. `scripts/update_dev_db_schema.py` - Updates schema (doesn't work for SQLite ALTER)
|
||||||
|
3. `scripts/migrate_dev_db.py` - Adds SLM columns to DEV database
|
||||||
|
|
||||||
|
All helper scripts are located in the `scripts/` directory. See [scripts/README.md](../scripts/README.md) for detailed usage instructions.
|
||||||
|
|
||||||
|
## Verification
|
||||||
|
|
||||||
|
After running the steps above, verify everything worked:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check DEV database has test data
|
||||||
|
sqlite3 data-dev/seismo_fleet.db "SELECT id, device_type FROM roster WHERE device_type IN ('sound_level_meter', 'modem');"
|
||||||
|
```
|
||||||
|
|
||||||
|
You should see:
|
||||||
|
```
|
||||||
|
nl43-001|sound_level_meter
|
||||||
|
nl43-002|sound_level_meter
|
||||||
|
nl53-001|sound_level_meter
|
||||||
|
nl43-003|sound_level_meter
|
||||||
|
modem-001|modem
|
||||||
|
modem-002|modem
|
||||||
|
modem-003|modem
|
||||||
|
modem-004|modem
|
||||||
|
```
|
||||||
|
|
||||||
|
## Development vs Production
|
||||||
|
|
||||||
|
### When to Use DEV Database
|
||||||
|
|
||||||
|
To use the DEV database, set the environment variable:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Not implemented yet, but you could add this to database.py:
|
||||||
|
export DATABASE_ENV=dev
|
||||||
|
```
|
||||||
|
|
||||||
|
Or modify `backend/database.py` to check for an environment variable.
|
||||||
|
|
||||||
|
### Current Setup
|
||||||
|
|
||||||
|
Right now, the application always uses `data/seismo_fleet.db` (production).
|
||||||
|
|
||||||
|
For development/testing, you could:
|
||||||
|
1. Point SFM to use the DEV database temporarily
|
||||||
|
2. Or keep test data in production (not recommended)
|
||||||
|
3. Or implement environment-based database selection
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
- ✅ Production DB cleaned (no test data)
|
||||||
|
- ⚠️ DEV DB needs permission fix (run sudo commands above)
|
||||||
|
- ⚠️ DEV DB needs schema migration (run scripts/migrate_dev_db.py)
|
||||||
|
- ✅ Test scripts updated to use DEV DB
|
||||||
|
- ✅ All test data ready to be added to DEV DB
|
||||||
|
|
||||||
|
Run the commands above and you'll be all set!
|
||||||
62
docs/FIX_DEV_PERMISSIONS.md
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
# Fix DEV Database Permissions
|
||||||
|
|
||||||
|
## The Problem
|
||||||
|
|
||||||
|
SQLite needs write access to both the database file AND the directory it's in (to create temporary files like journals and WAL files).
|
||||||
|
|
||||||
|
Currently:
|
||||||
|
- ✅ Database file: `data-dev/seismo_fleet.db` - permissions fixed
|
||||||
|
- ❌ Directory: `data-dev/` - still owned by root
|
||||||
|
|
||||||
|
## The Fix
|
||||||
|
|
||||||
|
Run this command to fix the directory ownership:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo chown -R serversdown:serversdown /home/serversdown/sfm/seismo-fleet-manager/data-dev/
|
||||||
|
```
|
||||||
|
|
||||||
|
Then run the migration again:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python3 scripts/migrate_dev_db.py
|
||||||
|
```
|
||||||
|
|
||||||
|
## Full Setup Commands
|
||||||
|
|
||||||
|
Here's the complete sequence:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd /home/serversdown/sfm/seismo-fleet-manager
|
||||||
|
|
||||||
|
# Fix directory ownership (includes all files inside)
|
||||||
|
sudo chown -R serversdown:serversdown data-dev/
|
||||||
|
|
||||||
|
# Migrate schema
|
||||||
|
python3 scripts/migrate_dev_db.py
|
||||||
|
|
||||||
|
# Add test data
|
||||||
|
python3 scripts/add_test_slms.py
|
||||||
|
python3 scripts/add_test_modems.py
|
||||||
|
```
|
||||||
|
|
||||||
|
## Verify It Worked
|
||||||
|
|
||||||
|
After running the migration, you should see:
|
||||||
|
|
||||||
|
```
|
||||||
|
Migrating DEV database to add SLM columns...
|
||||||
|
============================================================
|
||||||
|
✓ Added column: slm_host
|
||||||
|
✓ Added column: slm_tcp_port
|
||||||
|
✓ Added column: slm_model
|
||||||
|
✓ Added column: slm_serial_number
|
||||||
|
✓ Added column: slm_frequency_weighting
|
||||||
|
✓ Added column: slm_time_weighting
|
||||||
|
✓ Added column: slm_measurement_range
|
||||||
|
✓ Added column: slm_last_check
|
||||||
|
============================================================
|
||||||
|
DEV database migration completed!
|
||||||
|
```
|
||||||
|
|
||||||
|
Then the test data scripts should work without errors!
|
||||||
375
docs/MODEM_INTEGRATION.md
Normal file
@@ -0,0 +1,375 @@
|
|||||||
|
# Modem Integration System
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The modem integration system allows Sound Level Meters (SLMs) and Seismographs to be deployed with network connectivity modems. Instead of storing IP addresses directly on each device, units are assigned to modems which provide the network connection. This enables:
|
||||||
|
|
||||||
|
- Centralized modem management and tracking
|
||||||
|
- IP address updates in one place
|
||||||
|
- Future modem API integration for diagnostics
|
||||||
|
- Proper asset tracking for network equipment
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
### Database Design
|
||||||
|
|
||||||
|
**Modem Units**: Stored as `RosterUnit` with `device_type = "modem"`
|
||||||
|
|
||||||
|
**Modem-Specific Fields**:
|
||||||
|
- `ip_address`: Network IP address or hostname
|
||||||
|
- `phone_number`: Cellular phone number (if applicable)
|
||||||
|
- `hardware_model`: Modem hardware (e.g., "Raven XTV", "Sierra Wireless AirLink")
|
||||||
|
|
||||||
|
**Device Assignment**:
|
||||||
|
- Both SLMs and Seismographs use `deployed_with_modem_id` to reference their modem
|
||||||
|
- This is a foreign key to another `RosterUnit.id` where `device_type = "modem"`
|
||||||
|
|
||||||
|
### How It Works
|
||||||
|
|
||||||
|
1. **Create Modem Units**
|
||||||
|
- Add modems as regular roster units with `device_type = "modem"`
|
||||||
|
- Set IP address, phone number, and hardware model
|
||||||
|
- Deploy or bench modems like any other asset
|
||||||
|
|
||||||
|
2. **Assign Devices to Modems**
|
||||||
|
- When editing an SLM or Seismograph, select modem from dropdown
|
||||||
|
- The `deployed_with_modem_id` field stores the modem ID
|
||||||
|
- IP address is fetched from the assigned modem at runtime
|
||||||
|
|
||||||
|
3. **Runtime Resolution**
|
||||||
|
- When SLM dashboard needs to connect to a device:
|
||||||
|
1. Load SLM unit data
|
||||||
|
2. Check `deployed_with_modem_id`
|
||||||
|
3. Fetch modem unit
|
||||||
|
4. Use modem's `ip_address` for connection
|
||||||
|
5. Fallback to legacy `slm_host` if no modem assigned
|
||||||
|
|
||||||
|
## Implementation Details
|
||||||
|
|
||||||
|
### Frontend Changes
|
||||||
|
|
||||||
|
#### Unit Detail Page (`templates/unit_detail.html`)
|
||||||
|
|
||||||
|
**SLM Fields** (lines 320-375):
|
||||||
|
- Removed direct "Host (IP Address)" field
|
||||||
|
- Added "Deployed With Modem" dropdown selector
|
||||||
|
- Dropdown populated with all active modems via JavaScript
|
||||||
|
- Shows modem ID, IP address, and hardware model in dropdown
|
||||||
|
|
||||||
|
**JavaScript** (lines 456-485):
|
||||||
|
```javascript
|
||||||
|
async function loadModemsList() {
|
||||||
|
// Fetches all modems from /api/roster/modems
|
||||||
|
// Populates both seismograph and SLM modem dropdowns
|
||||||
|
// Shows format: "modem-001 (192.168.1.100) - Raven XTV"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### SLM Dashboard (`backend/routers/slm_dashboard.py`)
|
||||||
|
|
||||||
|
**Live View Endpoint** (lines 84-148):
|
||||||
|
```python
|
||||||
|
# Get modem information if assigned
|
||||||
|
modem = None
|
||||||
|
modem_ip = None
|
||||||
|
if unit.deployed_with_modem_id:
|
||||||
|
modem = db.query(RosterUnit).filter_by(
|
||||||
|
id=unit.deployed_with_modem_id,
|
||||||
|
device_type="modem"
|
||||||
|
).first()
|
||||||
|
if modem:
|
||||||
|
modem_ip = modem.ip_address
|
||||||
|
|
||||||
|
# Fallback to direct slm_host (backward compatibility)
|
||||||
|
if not modem_ip and unit.slm_host:
|
||||||
|
modem_ip = unit.slm_host
|
||||||
|
```
|
||||||
|
|
||||||
|
**Live View Template** (`templates/partials/slm_live_view.html`):
|
||||||
|
- Displays modem information in header
|
||||||
|
- Shows "via Modem: modem-001 (192.168.1.100)"
|
||||||
|
- Warning if no modem assigned
|
||||||
|
|
||||||
|
### Backend Changes
|
||||||
|
|
||||||
|
#### New API Endpoint (`backend/routers/roster_edit.py`)
|
||||||
|
|
||||||
|
```python
|
||||||
|
GET /api/roster/modems
|
||||||
|
```
|
||||||
|
|
||||||
|
Returns list of all non-retired modem units:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"id": "modem-001",
|
||||||
|
"ip_address": "192.168.1.100",
|
||||||
|
"phone_number": "+1-555-0100",
|
||||||
|
"hardware_model": "Raven XTV",
|
||||||
|
"deployed": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Used by frontend to populate modem selection dropdowns.
|
||||||
|
|
||||||
|
#### Database Schema (`backend/models.py`)
|
||||||
|
|
||||||
|
**Modem Assignment Field** (line 44):
|
||||||
|
```python
|
||||||
|
# Shared by seismographs and SLMs
|
||||||
|
deployed_with_modem_id = Column(String, nullable=True)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Modem Fields** (lines 46-48):
|
||||||
|
```python
|
||||||
|
ip_address = Column(String, nullable=True)
|
||||||
|
phone_number = Column(String, nullable=True)
|
||||||
|
hardware_model = Column(String, nullable=True)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Legacy SLM Fields** (kept for backward compatibility):
|
||||||
|
```python
|
||||||
|
slm_host = Column(String, nullable=True) # Deprecated - use modem instead
|
||||||
|
slm_tcp_port = Column(Integer, nullable=True) # Still used
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Creating Modem Units
|
||||||
|
|
||||||
|
1. Go to Fleet Roster
|
||||||
|
2. Click "Add Unit"
|
||||||
|
3. Set Device Type to "Modem"
|
||||||
|
4. Fill in:
|
||||||
|
- Unit ID (e.g., "modem-001")
|
||||||
|
- IP Address (e.g., "192.168.1.100")
|
||||||
|
- Phone Number (if cellular)
|
||||||
|
- Hardware Model (e.g., "Raven XTV")
|
||||||
|
- Address/Coordinates (physical location)
|
||||||
|
5. Set Deployed status
|
||||||
|
6. Save
|
||||||
|
|
||||||
|
### Assigning Modem to SLM
|
||||||
|
|
||||||
|
1. Open SLM unit detail page
|
||||||
|
2. Click "Edit Unit"
|
||||||
|
3. Ensure Device Type is "Sound Level Meter"
|
||||||
|
4. In "Deployed With Modem" dropdown, select modem
|
||||||
|
5. Verify TCP Port (default 2255)
|
||||||
|
6. Save
|
||||||
|
|
||||||
|
### Assigning Modem to Seismograph
|
||||||
|
|
||||||
|
Same process - both device types use the same modem selection field.
|
||||||
|
|
||||||
|
## Test Data
|
||||||
|
|
||||||
|
Use the included script to create test modems:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python3 add_test_modems.py
|
||||||
|
```
|
||||||
|
|
||||||
|
This creates:
|
||||||
|
- **modem-001**: 192.168.1.100, Raven XTV → assigned to nl43-001
|
||||||
|
- **modem-002**: 192.168.1.101, Raven XTV → assigned to nl43-002
|
||||||
|
- **modem-003**: 192.168.1.102, Sierra Wireless → assigned to nl53-001
|
||||||
|
- **modem-004**: Spare modem (not deployed)
|
||||||
|
|
||||||
|
## Benefits
|
||||||
|
|
||||||
|
### For Operations
|
||||||
|
|
||||||
|
1. **Centralized IP Management**
|
||||||
|
- Update modem IP once, affects all assigned devices
|
||||||
|
- Easy to track which modem serves which devices
|
||||||
|
- Inventory management for network equipment
|
||||||
|
|
||||||
|
2. **Asset Tracking**
|
||||||
|
- Modems are first-class assets in the roster
|
||||||
|
- Track deployment status, location, notes
|
||||||
|
- Can bench/retire modems independently
|
||||||
|
|
||||||
|
3. **Future Capabilities**
|
||||||
|
- Modem API integration (signal strength, data usage)
|
||||||
|
- Automatic IP updates from DHCP/cellular network
|
||||||
|
- Modem health monitoring
|
||||||
|
- Remote modem diagnostics
|
||||||
|
|
||||||
|
### For Maintenance
|
||||||
|
|
||||||
|
1. **Easier Troubleshooting**
|
||||||
|
- See which modem serves a device
|
||||||
|
- Check modem status separately
|
||||||
|
- Swap modems without reconfiguring devices
|
||||||
|
|
||||||
|
2. **Configuration Changes**
|
||||||
|
- Change IP addresses system-wide
|
||||||
|
- Move devices between modems
|
||||||
|
- Test with backup modems
|
||||||
|
|
||||||
|
## Migration from Legacy System
|
||||||
|
|
||||||
|
### For Existing SLMs with Direct IP
|
||||||
|
|
||||||
|
Legacy SLMs with `slm_host` set still work:
|
||||||
|
- System checks `deployed_with_modem_id` first
|
||||||
|
- Falls back to `slm_host` if no modem assigned
|
||||||
|
- Logs fallback usage for visibility
|
||||||
|
|
||||||
|
### Migration Steps
|
||||||
|
|
||||||
|
1. Create modem units for each IP address
|
||||||
|
2. Assign SLMs to their modems
|
||||||
|
3. System will use modem IP automatically
|
||||||
|
4. Legacy `slm_host` can be cleared (optional)
|
||||||
|
|
||||||
|
Script `add_test_modems.py` demonstrates this:
|
||||||
|
```python
|
||||||
|
# Clear legacy field after modem assignment
|
||||||
|
slm.slm_host = None
|
||||||
|
slm.deployed_with_modem_id = "modem-001"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Future Enhancements
|
||||||
|
|
||||||
|
### Near-term
|
||||||
|
|
||||||
|
1. **Modem Status Dashboard**
|
||||||
|
- List all modems with connection status
|
||||||
|
- Show which devices use each modem
|
||||||
|
- Signal strength, data usage indicators
|
||||||
|
|
||||||
|
2. **Automatic IP Discovery**
|
||||||
|
- Query cellular provider API for modem IPs
|
||||||
|
- Auto-update IP addresses in database
|
||||||
|
- Alert on IP changes
|
||||||
|
|
||||||
|
3. **Modem Health Monitoring**
|
||||||
|
- Ping modems periodically
|
||||||
|
- Check cellular signal quality
|
||||||
|
- Data usage tracking
|
||||||
|
|
||||||
|
### Long-term
|
||||||
|
|
||||||
|
1. **Modem API Integration**
|
||||||
|
- Direct modem management (Raven, Sierra APIs)
|
||||||
|
- Remote reboot capability
|
||||||
|
- Configuration backup/restore
|
||||||
|
- Firmware updates
|
||||||
|
|
||||||
|
2. **Network Topology View**
|
||||||
|
- Graphical view of modem-device relationships
|
||||||
|
- Network health visualization
|
||||||
|
- Troubleshooting tools
|
||||||
|
|
||||||
|
3. **Multi-Modem Support**
|
||||||
|
- Failover between modems
|
||||||
|
- Load balancing
|
||||||
|
- Automatic fallback on modem failure
|
||||||
|
|
||||||
|
## API Reference
|
||||||
|
|
||||||
|
### Get Modems List
|
||||||
|
|
||||||
|
**Endpoint**: `GET /api/roster/modems`
|
||||||
|
|
||||||
|
**Response**:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"id": "modem-001",
|
||||||
|
"ip_address": "192.168.1.100",
|
||||||
|
"phone_number": "+1-555-0100",
|
||||||
|
"hardware_model": "Raven XTV",
|
||||||
|
"deployed": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Used By**:
|
||||||
|
- Unit detail page (modem dropdown)
|
||||||
|
- Future modem management dashboard
|
||||||
|
|
||||||
|
### Get Unit with Modem Info
|
||||||
|
|
||||||
|
**Endpoint**: `GET /api/roster/{unit_id}`
|
||||||
|
|
||||||
|
**Response** (for SLM):
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"id": "nl43-001",
|
||||||
|
"device_type": "slm",
|
||||||
|
"deployed_with_modem_id": "modem-001",
|
||||||
|
"slm_tcp_port": 2255,
|
||||||
|
"slm_model": "NL-43",
|
||||||
|
...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Then fetch modem separately or use dashboard endpoint which resolves it automatically.
|
||||||
|
|
||||||
|
### SLM Live View (with modem resolution)
|
||||||
|
|
||||||
|
**Endpoint**: `GET /api/slm-dashboard/live-view/{unit_id}`
|
||||||
|
|
||||||
|
**Process**:
|
||||||
|
1. Loads SLM unit
|
||||||
|
2. Resolves `deployed_with_modem_id` to modem unit
|
||||||
|
3. Extracts `modem.ip_address`
|
||||||
|
4. Uses IP to connect to SLMM backend
|
||||||
|
5. Returns live view HTML with modem info
|
||||||
|
|
||||||
|
## Files Modified/Created
|
||||||
|
|
||||||
|
### Modified
|
||||||
|
1. `backend/models.py` - Clarified modem assignment field
|
||||||
|
2. `templates/unit_detail.html` - Added modem selector for SLMs
|
||||||
|
3. `backend/routers/roster_edit.py` - Added modems list endpoint
|
||||||
|
4. `backend/routers/slm_dashboard.py` - Modem resolution logic
|
||||||
|
5. `templates/partials/slm_live_view.html` - Display modem info
|
||||||
|
|
||||||
|
### Created
|
||||||
|
1. `add_test_modems.py` - Script to create test modems and assignments
|
||||||
|
2. `docs/MODEM_INTEGRATION.md` - This documentation
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### SLM shows "No modem assigned"
|
||||||
|
|
||||||
|
**Cause**: Unit has no `deployed_with_modem_id` set
|
||||||
|
|
||||||
|
**Solution**:
|
||||||
|
1. Edit the SLM unit
|
||||||
|
2. Select a modem from dropdown
|
||||||
|
3. Save
|
||||||
|
|
||||||
|
### Modem dropdown is empty
|
||||||
|
|
||||||
|
**Cause**: No modem units in database
|
||||||
|
|
||||||
|
**Solution**:
|
||||||
|
1. Create modem units first
|
||||||
|
2. Set `device_type = "modem"`
|
||||||
|
3. Ensure they're not retired
|
||||||
|
|
||||||
|
### Can't connect to SLM
|
||||||
|
|
||||||
|
**Possible Causes**:
|
||||||
|
1. Modem IP is incorrect
|
||||||
|
2. Modem is offline
|
||||||
|
3. SLM TCP port is wrong
|
||||||
|
4. Network routing issue
|
||||||
|
|
||||||
|
**Debug Steps**:
|
||||||
|
1. Check modem unit's IP address
|
||||||
|
2. Ping the modem IP
|
||||||
|
3. Check SLM's `slm_tcp_port` (default 2255)
|
||||||
|
4. Review logs for connection errors
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Version**: 1.0.0
|
||||||
|
**Date**: January 5, 2026
|
||||||
|
**Related**: SOUND_LEVEL_METERS_DASHBOARD.md, DEVICE_TYPE_SLM_SUPPORT.md
|
||||||
275
docs/SLM_CONFIGURATION.md
Normal file
@@ -0,0 +1,275 @@
|
|||||||
|
# SLM Configuration Interface
|
||||||
|
|
||||||
|
This document describes the SLM configuration interface added to the Sound Level Meters dashboard.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Sound Level Meters can now be configured directly from the dashboard without needing to navigate to the unit detail page. A configuration button appears on each SLM unit card on hover, opening a modal with all configurable parameters.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
### 1. Quick Access Configuration Button
|
||||||
|
|
||||||
|
- **Location**: Appears on each SLM unit card in the unit list
|
||||||
|
- **Behavior**: Shows on hover (desktop) or always visible (mobile)
|
||||||
|
- **Icon**: Gear/settings icon in top-right corner of unit card
|
||||||
|
|
||||||
|
### 2. Configuration Modal
|
||||||
|
|
||||||
|
The configuration modal provides a comprehensive interface for all SLM parameters:
|
||||||
|
|
||||||
|
#### Device Information
|
||||||
|
- **Model**: Dropdown selection (NL-43, NL-53)
|
||||||
|
- **Serial Number**: Text input for device serial number
|
||||||
|
|
||||||
|
#### Measurement Parameters
|
||||||
|
- **Frequency Weighting**: A, C, or Z (Linear)
|
||||||
|
- **Time Weighting**: Fast (125ms), Slow (1s), or Impulse
|
||||||
|
- **Measurement Range**: 30-130 dB, 40-140 dB, or 50-140 dB
|
||||||
|
|
||||||
|
#### Network Configuration
|
||||||
|
- **Assigned Modem**: Dropdown list of available modems
|
||||||
|
- Shows modem ID and IP address
|
||||||
|
- Option for "No modem (direct connection)"
|
||||||
|
- **Direct IP Address**: Only shown when no modem assigned
|
||||||
|
- **TCP Port**: Only shown when no modem assigned (default: 502)
|
||||||
|
|
||||||
|
### 3. Actions
|
||||||
|
|
||||||
|
The modal provides three action buttons:
|
||||||
|
|
||||||
|
- **Test Connection**: Tests network connectivity to the SLM
|
||||||
|
- Uses current form values (not saved values)
|
||||||
|
- Shows toast notification with results
|
||||||
|
- Green: Connection successful
|
||||||
|
- Yellow: Connection failed or device offline
|
||||||
|
- Red: Test error
|
||||||
|
|
||||||
|
- **Cancel**: Closes modal without saving changes
|
||||||
|
|
||||||
|
- **Save Configuration**: Saves all changes to database
|
||||||
|
- Shows success/error toast
|
||||||
|
- Refreshes unit list on success
|
||||||
|
- Auto-closes modal after 2 seconds
|
||||||
|
|
||||||
|
## Implementation
|
||||||
|
|
||||||
|
### Frontend Components
|
||||||
|
|
||||||
|
#### Unit List Partial
|
||||||
|
**File**: [templates/partials/slm_unit_list.html](../templates/partials/slm_unit_list.html)
|
||||||
|
|
||||||
|
```html
|
||||||
|
<!-- Configure button (appears on hover) -->
|
||||||
|
<button onclick="event.stopPropagation(); openConfigModal('{{ unit.id }}');"
|
||||||
|
class="absolute top-2 right-2 opacity-0 group-hover:opacity-100...">
|
||||||
|
<svg>...</svg>
|
||||||
|
</button>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Configuration Modal
|
||||||
|
**File**: [templates/sound_level_meters.html](../templates/sound_level_meters.html#L73)
|
||||||
|
|
||||||
|
```html
|
||||||
|
<!-- Configuration Modal -->
|
||||||
|
<div id="config-modal" class="hidden fixed inset-0 bg-black bg-opacity-50...">
|
||||||
|
<div id="config-modal-content">
|
||||||
|
<!-- Content loaded via HTMX -->
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Configuration Form
|
||||||
|
**File**: [templates/partials/slm_config_form.html](../templates/partials/slm_config_form.html)
|
||||||
|
|
||||||
|
Form fields mapped to database columns:
|
||||||
|
- `slm_model` → `unit.slm_model`
|
||||||
|
- `slm_serial_number` → `unit.slm_serial_number`
|
||||||
|
- `slm_frequency_weighting` → `unit.slm_frequency_weighting`
|
||||||
|
- `slm_time_weighting` → `unit.slm_time_weighting`
|
||||||
|
- `slm_measurement_range` → `unit.slm_measurement_range`
|
||||||
|
- `deployed_with_modem_id` → `unit.deployed_with_modem_id`
|
||||||
|
- `slm_host` → `unit.slm_host` (legacy, only if no modem)
|
||||||
|
- `slm_tcp_port` → `unit.slm_tcp_port` (legacy, only if no modem)
|
||||||
|
|
||||||
|
### Backend Endpoints
|
||||||
|
|
||||||
|
#### GET /api/slm-dashboard/config/{unit_id}
|
||||||
|
**File**: [backend/routers/slm_dashboard.py:184](../backend/routers/slm_dashboard.py#L184)
|
||||||
|
|
||||||
|
Returns configuration form HTML partial with current unit values pre-populated.
|
||||||
|
|
||||||
|
**Response**: HTML partial (slm_config_form.html)
|
||||||
|
|
||||||
|
#### POST /api/slm-dashboard/config/{unit_id}
|
||||||
|
**File**: [backend/routers/slm_dashboard.py:203](../backend/routers/slm_dashboard.py#L203)
|
||||||
|
|
||||||
|
Saves configuration changes to database.
|
||||||
|
|
||||||
|
**Request**: Form data with configuration parameters
|
||||||
|
|
||||||
|
**Response**: JSON
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"status": "success",
|
||||||
|
"unit_id": "nl43-001"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Behavior**:
|
||||||
|
- Updates all SLM-specific fields from form data
|
||||||
|
- If modem is assigned: clears legacy `slm_host` and `slm_tcp_port`
|
||||||
|
- If no modem: uses direct IP fields from form
|
||||||
|
|
||||||
|
### JavaScript Functions
|
||||||
|
|
||||||
|
#### openConfigModal(unitId)
|
||||||
|
**File**: [templates/sound_level_meters.html:127](../templates/sound_level_meters.html#L127)
|
||||||
|
|
||||||
|
Opens configuration modal and loads form via HTMX.
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function openConfigModal(unitId) {
|
||||||
|
const modal = document.getElementById('config-modal');
|
||||||
|
modal.classList.remove('hidden');
|
||||||
|
|
||||||
|
htmx.ajax('GET', `/api/slm-dashboard/config/${unitId}`, {
|
||||||
|
target: '#config-modal-content',
|
||||||
|
swap: 'innerHTML'
|
||||||
|
});
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### closeConfigModal()
|
||||||
|
**File**: [templates/sound_level_meters.html:136](../templates/sound_level_meters.html#L136)
|
||||||
|
|
||||||
|
Closes configuration modal.
|
||||||
|
|
||||||
|
#### handleConfigSave(event)
|
||||||
|
**File**: [templates/partials/slm_config_form.html:109](../templates/partials/slm_config_form.html#L109)
|
||||||
|
|
||||||
|
Handles HTMX response after form submission:
|
||||||
|
- Shows success/error toast
|
||||||
|
- Refreshes unit list
|
||||||
|
- Auto-closes modal after 2 seconds
|
||||||
|
|
||||||
|
#### testConnection(unitId)
|
||||||
|
**File**: [templates/partials/slm_config_form.html:129](../templates/partials/slm_config_form.html#L129)
|
||||||
|
|
||||||
|
Tests connection to SLM unit:
|
||||||
|
```javascript
|
||||||
|
async function testConnection(unitId) {
|
||||||
|
const response = await fetch(`/api/slmm/${unitId}/status`);
|
||||||
|
const data = await response.json();
|
||||||
|
|
||||||
|
if (response.ok && data.status === 'online') {
|
||||||
|
// Show success toast
|
||||||
|
} else {
|
||||||
|
// Show warning toast
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### loadModemsForConfig()
|
||||||
|
**File**: [templates/partials/slm_config_form.html:87](../templates/partials/slm_config_form.html#L87)
|
||||||
|
|
||||||
|
Loads available modems from `/api/roster/modems` and populates dropdown.
|
||||||
|
|
||||||
|
## User Workflow
|
||||||
|
|
||||||
|
### Configuring an SLM
|
||||||
|
|
||||||
|
1. Navigate to Sound Level Meters dashboard ([/sound-level-meters](../sound-level-meters))
|
||||||
|
2. Hover over desired SLM unit card in the list
|
||||||
|
3. Click the gear icon that appears in top-right corner
|
||||||
|
4. Configuration modal opens with current values pre-filled
|
||||||
|
5. Modify desired parameters:
|
||||||
|
- Update model/serial if needed
|
||||||
|
- Set measurement parameters (frequency/time weighting, range)
|
||||||
|
- Choose network configuration:
|
||||||
|
- **Option A**: Select a modem from dropdown (recommended)
|
||||||
|
- **Option B**: Enter direct IP address and port
|
||||||
|
6. (Optional) Click "Test Connection" to verify network settings
|
||||||
|
7. Click "Save Configuration"
|
||||||
|
8. Modal shows success message and auto-closes
|
||||||
|
9. Unit list refreshes to show updated information
|
||||||
|
|
||||||
|
### Network Configuration Options
|
||||||
|
|
||||||
|
**Modem Assignment (Recommended)**:
|
||||||
|
- Select modem from dropdown
|
||||||
|
- IP address automatically resolved from modem's `ip_address` field
|
||||||
|
- Direct IP/port fields hidden
|
||||||
|
- Enables modem tracking and management
|
||||||
|
|
||||||
|
**Direct Connection (Legacy)**:
|
||||||
|
- Select "No modem (direct connection)"
|
||||||
|
- Enter IP address and TCP port manually
|
||||||
|
- Direct IP/port fields become visible
|
||||||
|
- Useful for temporary setups or non-modem connections
|
||||||
|
|
||||||
|
## Database Schema
|
||||||
|
|
||||||
|
The configuration interface updates these `roster` table columns:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- SLM-specific fields
|
||||||
|
slm_model VARCHAR -- Device model (NL-43, NL-53)
|
||||||
|
slm_serial_number VARCHAR -- Serial number
|
||||||
|
slm_frequency_weighting VARCHAR -- A, C, or Z weighting
|
||||||
|
slm_time_weighting VARCHAR -- Fast, Slow, or Impulse
|
||||||
|
slm_measurement_range VARCHAR -- Measurement range (30-130, 40-140, 50-140)
|
||||||
|
|
||||||
|
-- Network configuration
|
||||||
|
deployed_with_modem_id VARCHAR -- FK to modem unit (preferred method)
|
||||||
|
slm_host VARCHAR -- Legacy direct IP (only if no modem)
|
||||||
|
slm_tcp_port INTEGER -- Legacy TCP port (only if no modem)
|
||||||
|
```
|
||||||
|
|
||||||
|
## UI/UX Design
|
||||||
|
|
||||||
|
### Modal Behavior
|
||||||
|
- **Opens**: Via configure button on unit card
|
||||||
|
- **Closes**:
|
||||||
|
- Cancel button
|
||||||
|
- X button in header
|
||||||
|
- Escape key
|
||||||
|
- Clicking outside modal (on backdrop)
|
||||||
|
- **Auto-close**: After successful save (2 second delay)
|
||||||
|
|
||||||
|
### Responsive Design
|
||||||
|
- **Desktop**: Configuration button appears on hover
|
||||||
|
- **Mobile**: Configuration button always visible
|
||||||
|
- **Modal**: Responsive width, scrollable on small screens
|
||||||
|
- **Form**: Two-column layout on desktop, single column on mobile
|
||||||
|
|
||||||
|
### Visual Feedback
|
||||||
|
- **Loading**: Skeleton loader while form loads
|
||||||
|
- **Saving**: HTMX handles form submission
|
||||||
|
- **Success**: Green toast notification
|
||||||
|
- **Error**: Red toast notification
|
||||||
|
- **Testing**: Blue toast while testing, then green/yellow/red based on result
|
||||||
|
|
||||||
|
### Accessibility
|
||||||
|
- **Keyboard**: Modal can be closed with Escape key
|
||||||
|
- **Focus**: Modal traps focus when open
|
||||||
|
- **Labels**: All form fields have proper labels
|
||||||
|
- **Colors**: Sufficient contrast in dark/light modes
|
||||||
|
|
||||||
|
## Future Enhancements
|
||||||
|
|
||||||
|
Potential improvements for future versions:
|
||||||
|
|
||||||
|
1. **Bulk Configuration**: Configure multiple SLMs at once
|
||||||
|
2. **Configuration Templates**: Save and apply configuration presets
|
||||||
|
3. **Configuration History**: Track configuration changes over time
|
||||||
|
4. **Remote Configuration**: Push configuration directly to device via SLMM
|
||||||
|
5. **Validation**: Real-time validation of IP addresses and ports
|
||||||
|
6. **Advanced Settings**: Additional NL-43/NL-53 specific parameters
|
||||||
|
7. **Configuration Import/Export**: JSON/CSV configuration files
|
||||||
|
|
||||||
|
## Related Documentation
|
||||||
|
|
||||||
|
- [SOUND_LEVEL_METERS_DASHBOARD.md](SOUND_LEVEL_METERS_DASHBOARD.md) - Main SLM dashboard
|
||||||
|
- [MODEM_INTEGRATION.md](MODEM_INTEGRATION.md) - Modem assignment architecture
|
||||||
|
- [DEVICE_TYPE_SLM_SUPPORT.md](DEVICE_TYPE_SLM_SUPPORT.md) - SLM device type implementation
|
||||||
333
docs/SOUND_LEVEL_METERS_DASHBOARD.md
Normal file
@@ -0,0 +1,333 @@
|
|||||||
|
# Sound Level Meters Dashboard
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Sound Level Meters dashboard is a new feature in SFM (soon to be rebranded as Terra-view) that provides real-time monitoring and control of Rion NL-43/NL-53 sound level meters through the SLMM backend integration.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
### 1. Dashboard Summary Statistics
|
||||||
|
- **Total Units**: Count of all SLM devices in the system
|
||||||
|
- **Deployed Units**: Active devices currently in the field
|
||||||
|
- **Active Now**: Units that have checked in within the last hour
|
||||||
|
- **Benched Units**: Devices not currently deployed
|
||||||
|
|
||||||
|
### 2. Unit List (Sidebar)
|
||||||
|
- Searchable list of all deployed SLM units
|
||||||
|
- Real-time status indicators:
|
||||||
|
- 🟢 Green: Active (recently checked in)
|
||||||
|
- ⚪ Gray: No check-in data
|
||||||
|
- Quick unit information:
|
||||||
|
- Device model (NL-43, NL-53, etc.)
|
||||||
|
- Location/address
|
||||||
|
- Network address (IP:port)
|
||||||
|
- Click any unit to view its live data
|
||||||
|
|
||||||
|
### 3. Live View Panel
|
||||||
|
|
||||||
|
When a unit is selected, the live view panel displays:
|
||||||
|
|
||||||
|
#### Control Buttons
|
||||||
|
- **Start**: Begin measurement
|
||||||
|
- **Pause**: Pause current measurement
|
||||||
|
- **Stop**: Stop measurement
|
||||||
|
- **Reset**: Reset measurement data
|
||||||
|
- **Start Live Stream**: Open WebSocket connection for real-time DRD data
|
||||||
|
|
||||||
|
#### Real-time Metrics
|
||||||
|
- **Lp (Current)**: Instantaneous sound level in dB
|
||||||
|
- **Leq (Average)**: Equivalent continuous sound level
|
||||||
|
- **Lmax (Peak)**: Maximum sound level recorded
|
||||||
|
- **Lmin**: Minimum sound level recorded
|
||||||
|
|
||||||
|
#### Live Chart
|
||||||
|
- Real-time line chart showing Lp and Leq over time
|
||||||
|
- 60-second rolling window (adjustable)
|
||||||
|
- Chart.js-powered visualization with dark mode support
|
||||||
|
- No animation for smooth real-time updates
|
||||||
|
|
||||||
|
#### Device Information
|
||||||
|
- Battery level and power source
|
||||||
|
- Frequency weighting (A, C, Z)
|
||||||
|
- Time weighting (F, S, I)
|
||||||
|
- SD card remaining space
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
### Frontend Components
|
||||||
|
|
||||||
|
#### Main Template
|
||||||
|
**File**: `templates/sound_level_meters.html`
|
||||||
|
|
||||||
|
The main dashboard page that includes:
|
||||||
|
- Page header and navigation integration
|
||||||
|
- Stats summary section (auto-refreshes every 10s)
|
||||||
|
- Two-column layout: unit list (left) + live view (right)
|
||||||
|
- JavaScript functions for unit selection and WebSocket streaming
|
||||||
|
|
||||||
|
#### Partial Templates
|
||||||
|
|
||||||
|
1. **slm_stats.html** - Summary statistics cards
|
||||||
|
- Auto-loads on page load
|
||||||
|
- Refreshes every 10 seconds via HTMX
|
||||||
|
|
||||||
|
2. **slm_unit_list.html** - Searchable unit list
|
||||||
|
- Auto-loads on page load
|
||||||
|
- Refreshes every 10 seconds via HTMX
|
||||||
|
- Supports search filtering
|
||||||
|
|
||||||
|
3. **slm_live_view.html** - Live data panel for selected unit
|
||||||
|
- Loaded on-demand when unit is selected
|
||||||
|
- Includes Chart.js for visualization
|
||||||
|
- WebSocket connection for streaming data
|
||||||
|
|
||||||
|
4. **slm_live_view_error.html** - Error state display
|
||||||
|
|
||||||
|
### Backend Components
|
||||||
|
|
||||||
|
#### Router: `backend/routers/slm_dashboard.py`
|
||||||
|
|
||||||
|
**Endpoints:**
|
||||||
|
|
||||||
|
```python
|
||||||
|
GET /api/slm-dashboard/stats
|
||||||
|
```
|
||||||
|
Returns HTML partial with summary statistics.
|
||||||
|
|
||||||
|
```python
|
||||||
|
GET /api/slm-dashboard/units?search={term}
|
||||||
|
```
|
||||||
|
Returns HTML partial with filtered unit list.
|
||||||
|
|
||||||
|
```python
|
||||||
|
GET /api/slm-dashboard/live-view/{unit_id}
|
||||||
|
```
|
||||||
|
Returns HTML partial with live view panel for specific unit.
|
||||||
|
- Fetches unit details from database
|
||||||
|
- Queries SLMM API for current measurement state
|
||||||
|
- Queries SLMM API for live status (DOD data)
|
||||||
|
|
||||||
|
```python
|
||||||
|
POST /api/slm-dashboard/control/{unit_id}/{action}
|
||||||
|
```
|
||||||
|
Sends control commands to SLMM backend.
|
||||||
|
- Valid actions: start, stop, pause, resume, reset
|
||||||
|
- Proxies to `http://localhost:8100/api/nl43/{unit_id}/{action}`
|
||||||
|
|
||||||
|
### Integration with SLMM
|
||||||
|
|
||||||
|
The dashboard communicates with the SLMM backend service running on port 8100:
|
||||||
|
|
||||||
|
**REST API Calls:**
|
||||||
|
- `GET /api/nl43/{unit_id}/measurement-state` - Check if measuring
|
||||||
|
- `GET /api/nl43/{unit_id}/live` - Get current DOD data
|
||||||
|
- `POST /api/nl43/{unit_id}/start|stop|pause|resume|reset` - Control commands
|
||||||
|
|
||||||
|
**WebSocket Streaming:**
|
||||||
|
- `WS /api/nl43/{unit_id}/live` - Real-time DRD data stream
|
||||||
|
- Proxied through SFM at `/api/slmm/{unit_id}/live`
|
||||||
|
- Streams continuous measurement data for live charting
|
||||||
|
|
||||||
|
### Database Schema
|
||||||
|
|
||||||
|
**Table**: `roster`
|
||||||
|
|
||||||
|
SLM-specific fields in the RosterUnit model:
|
||||||
|
|
||||||
|
```python
|
||||||
|
device_type = "slm" # Distinguishes SLMs from seismographs
|
||||||
|
slm_host = String # Device IP or hostname
|
||||||
|
slm_tcp_port = Integer # TCP control port (default 2255)
|
||||||
|
slm_model = String # NL-43, NL-53, etc.
|
||||||
|
slm_serial_number = String # Device serial number
|
||||||
|
slm_frequency_weighting = String # A, C, or Z weighting
|
||||||
|
slm_time_weighting = String # F (Fast), S (Slow), I (Impulse)
|
||||||
|
slm_measurement_range = String # e.g., "30-130 dB"
|
||||||
|
slm_last_check = DateTime # Last communication timestamp
|
||||||
|
```
|
||||||
|
|
||||||
|
## Navigation
|
||||||
|
|
||||||
|
The Sound Level Meters page is accessible from:
|
||||||
|
- **URL**: `/sound-level-meters`
|
||||||
|
- **Sidebar**: "Sound Level Meters" menu item (between Fleet Roster and Projects)
|
||||||
|
- **Icon**: Speaker/sound wave SVG icon
|
||||||
|
|
||||||
|
## Real-time Updates
|
||||||
|
|
||||||
|
The dashboard uses three mechanisms for real-time updates:
|
||||||
|
|
||||||
|
1. **HTMX Polling** (10-second intervals)
|
||||||
|
- Summary statistics
|
||||||
|
- Unit list
|
||||||
|
- Ensures data freshness even without user interaction
|
||||||
|
|
||||||
|
2. **On-Demand Loading** (HTMX)
|
||||||
|
- Live view panel loads when unit is selected
|
||||||
|
- Control button responses
|
||||||
|
|
||||||
|
3. **WebSocket Streaming** (continuous)
|
||||||
|
- Real-time DRD data for live charting
|
||||||
|
- User-initiated via "Start Live Stream" button
|
||||||
|
- Automatically closed on page unload or unit change
|
||||||
|
|
||||||
|
## Measurement Duration Tracking
|
||||||
|
|
||||||
|
**Important**: The NL-43/NL-53 devices do not expose measurement duration via their API. Elapsed time and interval counts are only visible on the device's on-screen display (OSD).
|
||||||
|
|
||||||
|
**Solution**: Track measurement start time in your application when calling the `/start` endpoint:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// When starting measurement
|
||||||
|
const startTime = new Date();
|
||||||
|
localStorage.setItem(`slm_${unitId}_start`, startTime.toISOString());
|
||||||
|
|
||||||
|
// Calculate elapsed time
|
||||||
|
const startTime = new Date(localStorage.getItem(`slm_${unitId}_start`));
|
||||||
|
const elapsed = (new Date() - startTime) / 1000; // seconds
|
||||||
|
```
|
||||||
|
|
||||||
|
**Future Enhancement**: SLMM backend could store measurement start times in a database table to track duration across sessions.
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
### Add Test Data
|
||||||
|
|
||||||
|
Use the included script to add test SLM units:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python3 add_test_slms.py
|
||||||
|
```
|
||||||
|
|
||||||
|
This creates:
|
||||||
|
- 3 deployed test units (nl43-001, nl43-002, nl53-001)
|
||||||
|
- 1 benched unit (nl43-003)
|
||||||
|
|
||||||
|
### Running the Dashboard
|
||||||
|
|
||||||
|
1. Start SLMM backend (port 8100):
|
||||||
|
```bash
|
||||||
|
cd /home/serversdown/slmm
|
||||||
|
uvicorn main:app --host 0.0.0.0 --port 8100
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Start SFM (port 8000):
|
||||||
|
```bash
|
||||||
|
cd /home/serversdown/sfm/seismo-fleet-manager
|
||||||
|
uvicorn backend.main:app --host 0.0.0.0 --port 8000 --reload
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Access dashboard:
|
||||||
|
```
|
||||||
|
http://localhost:8000/sound-level-meters
|
||||||
|
```
|
||||||
|
|
||||||
|
### Testing Without Physical Devices
|
||||||
|
|
||||||
|
The dashboard will work without physical NL-43 devices connected:
|
||||||
|
- Unit list will display based on database records
|
||||||
|
- Live view will show connection errors (gracefully handled)
|
||||||
|
- Mock data can be added to SLMM for testing
|
||||||
|
|
||||||
|
## Future Enhancements
|
||||||
|
|
||||||
|
### Near-term
|
||||||
|
1. **Measurement Duration Tracking**
|
||||||
|
- Add database table to track measurement sessions
|
||||||
|
- Display elapsed time in live view
|
||||||
|
- Store start/stop timestamps
|
||||||
|
|
||||||
|
2. **Historical Data View**
|
||||||
|
- Chart historical Leq intervals
|
||||||
|
- Export measurement data
|
||||||
|
- Comparison between units
|
||||||
|
|
||||||
|
3. **Alerts & Thresholds**
|
||||||
|
- Configurable sound level alerts
|
||||||
|
- Email/SMS notifications when thresholds exceeded
|
||||||
|
- Visual indicators on dashboard
|
||||||
|
|
||||||
|
### Long-term
|
||||||
|
1. **Map View**
|
||||||
|
- Display all SLMs on a map (like seismographs)
|
||||||
|
- Click map markers to view live data
|
||||||
|
- Color-coded by current sound level
|
||||||
|
|
||||||
|
2. **Batch Operations**
|
||||||
|
- Start/stop multiple units simultaneously
|
||||||
|
- Synchronized measurements
|
||||||
|
- Group configurations
|
||||||
|
|
||||||
|
3. **Advanced Analytics**
|
||||||
|
- Noise compliance reports
|
||||||
|
- Statistical summaries
|
||||||
|
- Trend analysis
|
||||||
|
|
||||||
|
## Integration with Terra-view
|
||||||
|
|
||||||
|
When SFM is rebranded to Terra-view:
|
||||||
|
|
||||||
|
1. **Multi-Module Dashboard**
|
||||||
|
- Sound Level Meters module (this dashboard)
|
||||||
|
- Seismograph Fleet Manager module (existing)
|
||||||
|
- Future monitoring modules
|
||||||
|
|
||||||
|
2. **Project Management**
|
||||||
|
- Link SLMs to projects
|
||||||
|
- Combined project view with seismographs + SLMs
|
||||||
|
- Project-level reporting
|
||||||
|
|
||||||
|
3. **Unified Navigation**
|
||||||
|
- Top-level module switcher
|
||||||
|
- Consistent UI/UX across modules
|
||||||
|
- Shared authentication and settings
|
||||||
|
|
||||||
|
## Technical Notes
|
||||||
|
|
||||||
|
### HTMX Integration
|
||||||
|
The dashboard extensively uses HTMX for dynamic updates without full page reloads:
|
||||||
|
- `hx-get`: Fetch and swap content
|
||||||
|
- `hx-trigger`: Auto-refresh intervals
|
||||||
|
- `hx-swap`: Content replacement strategy
|
||||||
|
- `hx-target`: Specify update target
|
||||||
|
|
||||||
|
### Dark Mode Support
|
||||||
|
All components support dark mode:
|
||||||
|
- Chart.js colors adapt to theme
|
||||||
|
- Tailwind dark: classes throughout
|
||||||
|
- Automatic theme detection
|
||||||
|
|
||||||
|
### Performance Considerations
|
||||||
|
- WebSocket connections are per-unit (only one active at a time)
|
||||||
|
- Chart data limited to 60 points (1 minute) to prevent memory bloat
|
||||||
|
- Polling intervals balanced for responsiveness vs server load
|
||||||
|
- Lazy loading of live view panel (only when unit selected)
|
||||||
|
|
||||||
|
## Files Modified/Created
|
||||||
|
|
||||||
|
### New Files
|
||||||
|
- `templates/sound_level_meters.html`
|
||||||
|
- `templates/partials/slm_stats.html`
|
||||||
|
- `templates/partials/slm_unit_list.html`
|
||||||
|
- `templates/partials/slm_live_view.html`
|
||||||
|
- `templates/partials/slm_live_view_error.html`
|
||||||
|
- `backend/routers/slm_dashboard.py`
|
||||||
|
- `add_test_slms.py`
|
||||||
|
- `docs/SOUND_LEVEL_METERS_DASHBOARD.md`
|
||||||
|
|
||||||
|
### Modified Files
|
||||||
|
- `backend/main.py` - Added route and router import
|
||||||
|
- `templates/base.html` - Added navigation menu item
|
||||||
|
|
||||||
|
## Support
|
||||||
|
|
||||||
|
For issues or questions:
|
||||||
|
- Check SLMM API documentation: `/home/serversdown/slmm/docs/API.md`
|
||||||
|
- Review SFM changelog: `CHANGELOG.md`
|
||||||
|
- Submit issues to project repository
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Version**: 1.0.0
|
||||||
|
**Created**: January 2026
|
||||||
|
**Last Updated**: January 5, 2026
|
||||||
546
docs/archive/PROJECTS_SYSTEM_IMPLEMENTATION.md
Normal file
@@ -0,0 +1,546 @@
|
|||||||
|
# Projects System Implementation - Terra-View
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Projects system has been successfully scaffolded in Terra-View. This document provides a complete overview of what has been built, how it works, and what needs to be completed.
|
||||||
|
|
||||||
|
## ✅ Completed Components
|
||||||
|
|
||||||
|
### 1. Database Schema
|
||||||
|
|
||||||
|
**Location**: `/backend/models.py`
|
||||||
|
|
||||||
|
Seven new tables have been added:
|
||||||
|
|
||||||
|
- **ProjectType**: Template definitions for project types (Sound, Vibration, Combined)
|
||||||
|
- **Project**: Top-level project organization with type reference
|
||||||
|
- **MonitoringLocation**: Generic locations (NRLs for sound, monitoring points for vibration)
|
||||||
|
- **UnitAssignment**: Links devices to locations
|
||||||
|
- **ScheduledAction**: Automated recording control schedules
|
||||||
|
- **RecordingSession**: Tracks actual recording/monitoring sessions
|
||||||
|
- **DataFile**: File references for downloaded data
|
||||||
|
|
||||||
|
**Key Features**:
|
||||||
|
- Type-aware design (project_type_id determines features)
|
||||||
|
- Flexible metadata fields (JSON columns for type-specific data)
|
||||||
|
- Denormalized fields for efficient queries
|
||||||
|
- Proper indexing on foreign keys
|
||||||
|
|
||||||
|
### 2. Service Layer
|
||||||
|
|
||||||
|
#### SLMM Client (`/backend/services/slmm_client.py`)
|
||||||
|
- Clean wrapper for all SLMM API operations
|
||||||
|
- Methods for: start/stop/pause/resume recording, get status, configure devices
|
||||||
|
- Error handling with custom exceptions
|
||||||
|
- Singleton pattern for easy access
|
||||||
|
|
||||||
|
#### Device Controller (`/backend/services/device_controller.py`)
|
||||||
|
- Routes commands to appropriate backend (SLMM for SLMs, SFM for seismographs)
|
||||||
|
- Unified interface across device types
|
||||||
|
- Ready for future SFM implementation
|
||||||
|
|
||||||
|
#### Scheduler Service (`/backend/services/scheduler.py`)
|
||||||
|
- Background task that checks for pending scheduled actions every 60 seconds
|
||||||
|
- Executes actions by calling device controller
|
||||||
|
- Creates/updates recording sessions
|
||||||
|
- Tracks execution status and errors
|
||||||
|
- Manual execution support for testing
|
||||||
|
|
||||||
|
### 3. API Routers
|
||||||
|
|
||||||
|
#### Projects Router (`/backend/routers/projects.py`)
|
||||||
|
Endpoints:
|
||||||
|
- `GET /api/projects/list` - Project list with stats
|
||||||
|
- `GET /api/projects/stats` - Overview statistics
|
||||||
|
- `POST /api/projects/create` - Create new project
|
||||||
|
- `GET /api/projects/{id}` - Get project details
|
||||||
|
- `PUT /api/projects/{id}` - Update project
|
||||||
|
- `DELETE /api/projects/{id}` - Archive project
|
||||||
|
- `GET /api/projects/{id}/dashboard` - Project dashboard data
|
||||||
|
- `GET /api/projects/types/list` - Get project type templates
|
||||||
|
|
||||||
|
#### Project Locations Router (`/backend/routers/project_locations.py`)
|
||||||
|
Endpoints:
|
||||||
|
- `GET /api/projects/{id}/locations` - List locations
|
||||||
|
- `POST /api/projects/{id}/locations/create` - Create location
|
||||||
|
- `PUT /api/projects/{id}/locations/{location_id}` - Update location
|
||||||
|
- `DELETE /api/projects/{id}/locations/{location_id}` - Delete location
|
||||||
|
- `GET /api/projects/{id}/assignments` - List unit assignments
|
||||||
|
- `POST /api/projects/{id}/locations/{location_id}/assign` - Assign unit
|
||||||
|
- `POST /api/projects/{id}/assignments/{assignment_id}/unassign` - Unassign unit
|
||||||
|
- `GET /api/projects/{id}/available-units` - Get units available for assignment
|
||||||
|
|
||||||
|
#### Scheduler Router (`/backend/routers/scheduler.py`)
|
||||||
|
Endpoints:
|
||||||
|
- `GET /api/projects/{id}/scheduler/actions` - List scheduled actions
|
||||||
|
- `POST /api/projects/{id}/scheduler/actions/create` - Create action
|
||||||
|
- `POST /api/projects/{id}/scheduler/schedule-session` - Schedule recording session
|
||||||
|
- `PUT /api/projects/{id}/scheduler/actions/{action_id}` - Update action
|
||||||
|
- `POST /api/projects/{id}/scheduler/actions/{action_id}/cancel` - Cancel action
|
||||||
|
- `DELETE /api/projects/{id}/scheduler/actions/{action_id}` - Delete action
|
||||||
|
- `POST /api/projects/{id}/scheduler/actions/{action_id}/execute` - Manual execution
|
||||||
|
- `GET /api/projects/{id}/scheduler/status` - Scheduler status
|
||||||
|
- `POST /api/projects/{id}/scheduler/execute-pending` - Trigger pending executions
|
||||||
|
|
||||||
|
### 4. Frontend
|
||||||
|
|
||||||
|
#### Main Page
|
||||||
|
**Location**: `/templates/projects/overview.html`
|
||||||
|
|
||||||
|
Features:
|
||||||
|
- Summary statistics cards (projects, locations, assignments, sessions)
|
||||||
|
- Tabbed interface (All, Active, Completed, Archived)
|
||||||
|
- Project cards grid layout
|
||||||
|
- Create project modal with two-step flow:
|
||||||
|
1. Select project type (Sound/Vibration/Combined)
|
||||||
|
2. Fill project details
|
||||||
|
- HTMX-powered dynamic updates
|
||||||
|
|
||||||
|
#### Navigation
|
||||||
|
**Location**: `/templates/base.html` (updated)
|
||||||
|
- "Projects" link added to sidebar
|
||||||
|
- Active state highlighting
|
||||||
|
|
||||||
|
### 5. Application Integration
|
||||||
|
|
||||||
|
**Location**: `/backend/main.py`
|
||||||
|
|
||||||
|
- Routers registered
|
||||||
|
- Page route added (`/projects`)
|
||||||
|
- Scheduler service starts on application startup
|
||||||
|
- Scheduler stops on application shutdown
|
||||||
|
|
||||||
|
### 6. Database Initialization
|
||||||
|
|
||||||
|
**Script**: `/backend/init_projects_db.py`
|
||||||
|
|
||||||
|
- Creates all project tables
|
||||||
|
- Populates ProjectType with default templates
|
||||||
|
- ✅ Successfully executed - database is ready
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📁 File Organization
|
||||||
|
|
||||||
|
```
|
||||||
|
terra-view/
|
||||||
|
├── backend/
|
||||||
|
│ ├── models.py [✅ Updated]
|
||||||
|
│ ├── init_projects_db.py [✅ Created]
|
||||||
|
│ ├── main.py [✅ Updated]
|
||||||
|
│ ├── routers/
|
||||||
|
│ │ ├── projects.py [✅ Created]
|
||||||
|
│ │ ├── project_locations.py [✅ Created]
|
||||||
|
│ │ └── scheduler.py [✅ Created]
|
||||||
|
│ └── services/
|
||||||
|
│ ├── slmm_client.py [✅ Created]
|
||||||
|
│ ├── device_controller.py [✅ Created]
|
||||||
|
│ └── scheduler.py [✅ Created]
|
||||||
|
├── templates/
|
||||||
|
│ ├── base.html [✅ Updated]
|
||||||
|
│ ├── projects/
|
||||||
|
│ │ └── overview.html [✅ Created]
|
||||||
|
│ └── partials/
|
||||||
|
│ └── projects/ [📁 Created, empty]
|
||||||
|
└── data/
|
||||||
|
└── seismo_fleet.db [✅ Tables created]
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🔨 What Still Needs to be Built
|
||||||
|
|
||||||
|
### 1. Frontend Templates (Partials)
|
||||||
|
|
||||||
|
**Directory**: `/templates/partials/projects/`
|
||||||
|
|
||||||
|
**Required Files**:
|
||||||
|
|
||||||
|
#### `project_stats.html`
|
||||||
|
Stats cards for overview page:
|
||||||
|
- Total/Active/Completed projects
|
||||||
|
- Total locations
|
||||||
|
- Assigned units
|
||||||
|
- Active sessions
|
||||||
|
|
||||||
|
#### `project_list.html`
|
||||||
|
Project cards grid:
|
||||||
|
- Project name, type, status
|
||||||
|
- Location count, unit count
|
||||||
|
- Active session indicator
|
||||||
|
- Link to project dashboard
|
||||||
|
|
||||||
|
#### `project_dashboard.html`
|
||||||
|
Main project dashboard panel with tabs:
|
||||||
|
- Summary stats
|
||||||
|
- Active locations and assignments
|
||||||
|
- Upcoming scheduled actions
|
||||||
|
- Recent sessions
|
||||||
|
|
||||||
|
#### `location_list.html`
|
||||||
|
Location cards/table:
|
||||||
|
- Location name, type, coordinates
|
||||||
|
- Assigned unit (if any)
|
||||||
|
- Session count
|
||||||
|
- Assign/unassign button
|
||||||
|
|
||||||
|
#### `assignment_list.html`
|
||||||
|
Unit assignment table:
|
||||||
|
- Unit ID, device type
|
||||||
|
- Location name
|
||||||
|
- Assignment dates
|
||||||
|
- Status
|
||||||
|
- Unassign button
|
||||||
|
|
||||||
|
#### `scheduler_agenda.html`
|
||||||
|
Calendar/agenda view:
|
||||||
|
- Scheduled actions sorted by time
|
||||||
|
- Action type (start/stop/download)
|
||||||
|
- Location and unit
|
||||||
|
- Status indicator
|
||||||
|
- Cancel/execute buttons
|
||||||
|
|
||||||
|
### 2. Project Dashboard Page
|
||||||
|
|
||||||
|
**Location**: `/templates/projects/project_dashboard.html`
|
||||||
|
|
||||||
|
Full project detail page with:
|
||||||
|
- Header with project name, type, status
|
||||||
|
- Tab navigation (Dashboard, Scheduler, Locations, Units, Data, Settings)
|
||||||
|
- Tab content areas
|
||||||
|
- Modals for adding locations, scheduling sessions
|
||||||
|
|
||||||
|
### 3. Additional UI Components
|
||||||
|
|
||||||
|
- Project type selection cards (with icons)
|
||||||
|
- Location creation modal
|
||||||
|
- Unit assignment modal
|
||||||
|
- Schedule session modal (with date/time picker)
|
||||||
|
- Data file browser
|
||||||
|
|
||||||
|
### 4. SLMM Enhancements
|
||||||
|
|
||||||
|
**Location**: `/slmm/app/routers.py` (SLMM repo)
|
||||||
|
|
||||||
|
New endpoint needed:
|
||||||
|
```python
|
||||||
|
POST /api/nl43/{unit_id}/ftp/download
|
||||||
|
```
|
||||||
|
|
||||||
|
This should:
|
||||||
|
- Accept destination_path and files list
|
||||||
|
- Connect to SLM via FTP
|
||||||
|
- Download specified files
|
||||||
|
- Save to Terra-View's `data/Projects/` directory
|
||||||
|
- Return file list with metadata
|
||||||
|
|
||||||
|
### 5. SFM Client (Future)
|
||||||
|
|
||||||
|
**Location**: `/backend/services/sfm_client.py` (to be created)
|
||||||
|
|
||||||
|
Similar to SLMM client, but for seismographs:
|
||||||
|
- Get seismograph status
|
||||||
|
- Start/stop recording
|
||||||
|
- Download data files
|
||||||
|
- Integrate with device controller
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🚀 Testing the System
|
||||||
|
|
||||||
|
### 1. Start Terra-View
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd /home/serversdown/tmi/terra-view
|
||||||
|
# Start Terra-View (however you normally start it)
|
||||||
|
```
|
||||||
|
|
||||||
|
Verify in logs:
|
||||||
|
```
|
||||||
|
Starting scheduler service...
|
||||||
|
Scheduler service started
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Navigate to Projects
|
||||||
|
|
||||||
|
Open browser: `http://localhost:8001/projects`
|
||||||
|
|
||||||
|
You should see:
|
||||||
|
- Summary stats cards (all zeros initially)
|
||||||
|
- Tabs (All Projects, Active, Completed, Archived)
|
||||||
|
- "New Project" button
|
||||||
|
|
||||||
|
### 3. Create a Project
|
||||||
|
|
||||||
|
1. Click "New Project"
|
||||||
|
2. Select a project type (e.g., "Sound Monitoring")
|
||||||
|
3. Fill in details:
|
||||||
|
- Name: "Test Sound Project"
|
||||||
|
- Client: "Test Client"
|
||||||
|
- Start Date: Today
|
||||||
|
4. Submit
|
||||||
|
|
||||||
|
### 4. Test API Endpoints
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Get project types
|
||||||
|
curl http://localhost:8001/api/projects/types/list
|
||||||
|
|
||||||
|
# Get projects list
|
||||||
|
curl http://localhost:8001/api/projects/list
|
||||||
|
|
||||||
|
# Get project stats
|
||||||
|
curl http://localhost:8001/api/projects/stats
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Test Scheduler Status
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl http://localhost:8001/api/projects/{project_id}/scheduler/status
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📋 Dataflow Examples
|
||||||
|
|
||||||
|
### Creating and Scheduling a Recording Session
|
||||||
|
|
||||||
|
1. **User creates project** → Project record in DB
|
||||||
|
2. **User adds NRL** → MonitoringLocation record
|
||||||
|
3. **User assigns SLM to NRL** → UnitAssignment record
|
||||||
|
4. **User schedules recording** → 2 ScheduledAction records (start + stop)
|
||||||
|
5. **Scheduler runs every minute** → Checks for pending actions
|
||||||
|
6. **Start action time arrives** → Scheduler calls SLMM via device controller
|
||||||
|
7. **SLMM sends TCP command to SLM** → Recording starts
|
||||||
|
8. **RecordingSession created** → Tracks the session
|
||||||
|
9. **Stop action time arrives** → Scheduler stops recording
|
||||||
|
10. **Session updated** → stopped_at, duration_seconds filled
|
||||||
|
11. **User triggers download** → Files copied to `data/Projects/{project_id}/sound/{nrl_name}/`
|
||||||
|
12. **DataFile records created** → Track file references
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🎨 UI Design Patterns
|
||||||
|
|
||||||
|
### Established Patterns (from SLM dashboard):
|
||||||
|
|
||||||
|
1. **Stats Cards**: 4-column grid, auto-refresh every 30s
|
||||||
|
2. **Sidebar Lists**: Searchable, filterable, auto-refresh
|
||||||
|
3. **Main Panel**: Large central area for details
|
||||||
|
4. **Modals**: Centered, overlay background
|
||||||
|
5. **HTMX**: All dynamic updates, minimal JavaScript
|
||||||
|
6. **Tailwind**: Consistent styling with dark mode support
|
||||||
|
|
||||||
|
### Color Scheme:
|
||||||
|
|
||||||
|
- Primary: `seismo-orange` (#f48b1c)
|
||||||
|
- Secondary: `seismo-navy` (#142a66)
|
||||||
|
- Accent: `seismo-burgundy` (#7d234d)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🔧 Configuration
|
||||||
|
|
||||||
|
### Environment Variables
|
||||||
|
|
||||||
|
- `SLMM_BASE_URL`: SLMM backend URL (default: http://localhost:8100)
|
||||||
|
- `ENVIRONMENT`: "development" or "production"
|
||||||
|
|
||||||
|
### Scheduler Settings
|
||||||
|
|
||||||
|
Located in `/backend/services/scheduler.py`:
|
||||||
|
- `check_interval`: 60 seconds (adjust as needed)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📚 Next Steps
|
||||||
|
|
||||||
|
### Immediate (Get Basic UI Working):
|
||||||
|
1. Create partial templates (stats, lists)
|
||||||
|
2. Test creating projects via UI
|
||||||
|
3. Implement project dashboard page
|
||||||
|
|
||||||
|
### Short-term (Core Features):
|
||||||
|
4. Add location management UI
|
||||||
|
5. Add unit assignment UI
|
||||||
|
6. Add scheduler UI (agenda view)
|
||||||
|
|
||||||
|
### Medium-term (Data Flow):
|
||||||
|
7. Implement SLMM download endpoint
|
||||||
|
8. Test full recording workflow
|
||||||
|
9. Add file browser for downloaded data
|
||||||
|
|
||||||
|
### Long-term (Complete System):
|
||||||
|
10. Implement SFM client for seismographs
|
||||||
|
11. Add data visualization
|
||||||
|
12. Add project reporting
|
||||||
|
13. Add user authentication
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🐛 Known Issues / TODOs
|
||||||
|
|
||||||
|
1. **Partial templates missing**: Need to create HTML templates for all partials
|
||||||
|
2. **SLMM download endpoint**: Needs implementation in SLMM backend
|
||||||
|
3. **Project dashboard page**: Not yet created
|
||||||
|
4. **SFM integration**: Placeholder only, needs real implementation
|
||||||
|
5. **File download tracking**: DataFile records not yet created after downloads
|
||||||
|
6. **Error handling**: Need better user-facing error messages
|
||||||
|
7. **Validation**: Form validation could be improved
|
||||||
|
8. **Testing**: No automated tests yet
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📖 API Documentation
|
||||||
|
|
||||||
|
### Project Type Object
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"id": "sound_monitoring",
|
||||||
|
"name": "Sound Monitoring",
|
||||||
|
"description": "...",
|
||||||
|
"icon": "volume-2",
|
||||||
|
"supports_sound": true,
|
||||||
|
"supports_vibration": false
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Project Object
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"id": "uuid",
|
||||||
|
"name": "Project Name",
|
||||||
|
"description": "...",
|
||||||
|
"project_type_id": "sound_monitoring",
|
||||||
|
"status": "active",
|
||||||
|
"client_name": "Client Inc",
|
||||||
|
"site_address": "123 Main St",
|
||||||
|
"site_coordinates": "40.7128,-74.0060",
|
||||||
|
"start_date": "2024-01-15",
|
||||||
|
"end_date": null,
|
||||||
|
"created_at": "2024-01-15T10:00:00",
|
||||||
|
"updated_at": "2024-01-15T10:00:00"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### MonitoringLocation Object
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"id": "uuid",
|
||||||
|
"project_id": "uuid",
|
||||||
|
"location_type": "sound",
|
||||||
|
"name": "NRL-001",
|
||||||
|
"description": "...",
|
||||||
|
"coordinates": "40.7128,-74.0060",
|
||||||
|
"address": "123 Main St",
|
||||||
|
"location_metadata": "{...}",
|
||||||
|
"created_at": "2024-01-15T10:00:00"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### UnitAssignment Object
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"id": "uuid",
|
||||||
|
"unit_id": "nl43-001",
|
||||||
|
"location_id": "uuid",
|
||||||
|
"project_id": "uuid",
|
||||||
|
"device_type": "sound_level_meter",
|
||||||
|
"assigned_at": "2024-01-15T10:00:00",
|
||||||
|
"assigned_until": null,
|
||||||
|
"status": "active",
|
||||||
|
"notes": "..."
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### ScheduledAction Object
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"id": "uuid",
|
||||||
|
"project_id": "uuid",
|
||||||
|
"location_id": "uuid",
|
||||||
|
"unit_id": "nl43-001",
|
||||||
|
"action_type": "start",
|
||||||
|
"device_type": "sound_level_meter",
|
||||||
|
"scheduled_time": "2024-01-16T08:00:00",
|
||||||
|
"executed_at": null,
|
||||||
|
"execution_status": "pending",
|
||||||
|
"module_response": null,
|
||||||
|
"error_message": null
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🎓 Architecture Decisions
|
||||||
|
|
||||||
|
### Why Project Types?
|
||||||
|
Allows the system to scale to different monitoring scenarios (air quality, multi-hazard, etc.) without code changes. Just add a new ProjectType record and the UI adapts.
|
||||||
|
|
||||||
|
### Why Generic MonitoringLocation?
|
||||||
|
Instead of separate NRL and MonitoringPoint tables, one table with a `location_type` discriminator keeps the schema clean and allows for combined projects.
|
||||||
|
|
||||||
|
### Why Denormalized Fields?
|
||||||
|
Fields like `project_id` in UnitAssignment (already have via location) enable faster queries without joins.
|
||||||
|
|
||||||
|
### Why Scheduler in Terra-View?
|
||||||
|
Terra-View is the orchestration layer. SLMM only handles device communication. Keeping scheduling logic in Terra-View allows for complex workflows across multiple device types.
|
||||||
|
|
||||||
|
### Why JSON Metadata Columns?
|
||||||
|
Type-specific fields (like ambient_conditions for sound projects) don't apply to all location types. JSON columns provide flexibility without cluttering the schema.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 💡 Tips for Continuing Development
|
||||||
|
|
||||||
|
1. **Follow Existing Patterns**: Look at the SLM dashboard code for reference
|
||||||
|
2. **Use HTMX Aggressively**: Minimize JavaScript, let HTMX handle updates
|
||||||
|
3. **Keep Routers Thin**: Move business logic to service layer
|
||||||
|
4. **Return HTML Partials**: Most endpoints should return HTML, not JSON
|
||||||
|
5. **Test Incrementally**: Build one partial at a time and test in browser
|
||||||
|
6. **Check Logs**: Scheduler logs execution attempts
|
||||||
|
7. **Use Browser DevTools**: Network tab shows HTMX requests
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📞 Support
|
||||||
|
|
||||||
|
For questions or issues:
|
||||||
|
1. Check this document first
|
||||||
|
2. Review existing dashboards (SLM, Seismographs) for patterns
|
||||||
|
3. Check logs for scheduler execution details
|
||||||
|
4. Test API endpoints with curl to isolate issues
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## ✅ Checklist for Completion
|
||||||
|
|
||||||
|
- [x] Database schema designed
|
||||||
|
- [x] Models created
|
||||||
|
- [x] Migration script run successfully
|
||||||
|
- [x] Service layer complete (SLMM client, device controller, scheduler)
|
||||||
|
- [x] API routers created (projects, locations, scheduler)
|
||||||
|
- [x] Navigation updated
|
||||||
|
- [x] Main overview page created
|
||||||
|
- [x] Routes registered in main.py
|
||||||
|
- [x] Scheduler service integrated
|
||||||
|
- [ ] Partial templates created
|
||||||
|
- [ ] Project dashboard page created
|
||||||
|
- [ ] Location management UI
|
||||||
|
- [ ] Unit assignment UI
|
||||||
|
- [ ] Scheduler UI (agenda view)
|
||||||
|
- [ ] SLMM download endpoint implemented
|
||||||
|
- [ ] Full workflow tested end-to-end
|
||||||
|
- [ ] SFM client implemented (future)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Last Updated**: 2026-01-12
|
||||||
|
|
||||||
|
**Database Status**: ✅ Initialized
|
||||||
|
|
||||||
|
**Backend Status**: ✅ Complete
|
||||||
|
|
||||||
|
**Frontend Status**: 🟡 Partial (overview page only)
|
||||||
|
|
||||||
|
**Ready for Testing**: ✅ Yes (basic functionality)
|
||||||
17
docs/archive/README.md
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
# Terra-View Documentation Archive
|
||||||
|
|
||||||
|
This directory contains old documentation files that are no longer actively maintained but preserved for historical reference.
|
||||||
|
|
||||||
|
## Archived Documents
|
||||||
|
|
||||||
|
### PROJECTS_SYSTEM_IMPLEMENTATION.md
|
||||||
|
Early implementation notes for the projects system. Superseded by current documentation in main docs directory.
|
||||||
|
|
||||||
|
### .aider.chat.history.md
|
||||||
|
AI assistant chat history from development sessions. Contains context and decision-making process.
|
||||||
|
|
||||||
|
## Note
|
||||||
|
|
||||||
|
These documents may contain outdated information. For current documentation, see:
|
||||||
|
- [Main README](../../README.md)
|
||||||
|
- [Active Documentation](../)
|
||||||
37
migrate_watcher_agents.py
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
"""
|
||||||
|
Migration: add watcher_agents table.
|
||||||
|
|
||||||
|
Safe to run multiple times (idempotent).
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sqlite3
|
||||||
|
import os
|
||||||
|
|
||||||
|
DB_PATH = os.path.join(os.path.dirname(__file__), "data", "seismo.db")
|
||||||
|
|
||||||
|
|
||||||
|
def migrate():
|
||||||
|
con = sqlite3.connect(DB_PATH)
|
||||||
|
cur = con.cursor()
|
||||||
|
|
||||||
|
cur.execute("""
|
||||||
|
CREATE TABLE IF NOT EXISTS watcher_agents (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
source_type TEXT NOT NULL,
|
||||||
|
version TEXT,
|
||||||
|
last_seen DATETIME,
|
||||||
|
status TEXT NOT NULL DEFAULT 'unknown',
|
||||||
|
ip_address TEXT,
|
||||||
|
log_tail TEXT,
|
||||||
|
update_pending INTEGER NOT NULL DEFAULT 0,
|
||||||
|
update_version TEXT
|
||||||
|
)
|
||||||
|
""")
|
||||||
|
|
||||||
|
con.commit()
|
||||||
|
con.close()
|
||||||
|
print("Migration complete: watcher_agents table ready.")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
migrate()
|
||||||
19
rebuild-dev.sh
Executable file
@@ -0,0 +1,19 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Dev rebuild script — increments build number, rebuilds and restarts terra-view
|
||||||
|
set -e
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
BUILD_FILE="$SCRIPT_DIR/build_number.txt"
|
||||||
|
|
||||||
|
# Read and increment build number
|
||||||
|
BUILD_NUMBER=$(cat "$BUILD_FILE" 2>/dev/null || echo "0")
|
||||||
|
BUILD_NUMBER=$((BUILD_NUMBER + 1))
|
||||||
|
echo "$BUILD_NUMBER" > "$BUILD_FILE"
|
||||||
|
|
||||||
|
echo "Building terra-view dev (build #$BUILD_NUMBER)..."
|
||||||
|
|
||||||
|
cd "$SCRIPT_DIR"
|
||||||
|
docker compose build --build-arg BUILD_NUMBER="$BUILD_NUMBER" terra-view
|
||||||
|
docker compose up -d terra-view
|
||||||
|
|
||||||
|
echo "Done — terra-view v0.6.1-$BUILD_NUMBER is running on :1001"
|
||||||
12
rebuild-prod.sh
Executable file
@@ -0,0 +1,12 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Production rebuild script — rebuilds and restarts terra-view on :8001
|
||||||
|
set -e
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
cd "$SCRIPT_DIR"
|
||||||
|
|
||||||
|
echo "Building terra-view production..."
|
||||||
|
docker compose -f docker-compose.yml build terra-view
|
||||||
|
docker compose -f docker-compose.yml up -d terra-view
|
||||||
|
|
||||||
|
echo "Done — terra-view production is running on :8001"
|
||||||
@@ -6,3 +6,5 @@ python-multipart==0.0.6
|
|||||||
jinja2==3.1.2
|
jinja2==3.1.2
|
||||||
aiofiles==23.2.1
|
aiofiles==23.2.1
|
||||||
Pillow==10.1.0
|
Pillow==10.1.0
|
||||||
|
httpx==0.25.2
|
||||||
|
openpyxl==3.1.2
|
||||||
|
|||||||
@@ -1,6 +1,23 @@
|
|||||||
unit_id,unit_type,deployed,retired,note,project_id,location
|
unit_id,device_type,unit_type,deployed,retired,note,project_id,location,address,coordinates,last_calibrated,next_calibration_due,deployed_with_modem_id,ip_address,phone_number,hardware_model,slm_host,slm_tcp_port,slm_ftp_port,slm_model,slm_serial_number,slm_frequency_weighting,slm_time_weighting,slm_measurement_range
|
||||||
BE1234,series3,true,false,Primary unit at main site,PROJ-001,San Francisco CA
|
# ============================================
|
||||||
BE5678,series3,true,false,Backup sensor,PROJ-001,Los Angeles CA
|
# SEISMOGRAPHS (device_type=seismograph)
|
||||||
BE9012,series3,false,false,In maintenance,PROJ-002,Workshop
|
# ============================================
|
||||||
BE3456,series3,true,false,,PROJ-003,New York NY
|
BE1234,seismograph,series3,true,false,Primary unit at main site,PROJ-001,San Francisco CA,123 Market St,37.7749;-122.4194,2025-06-15,2026-06-15,MDM001,,,,,,,,,,,
|
||||||
BE7890,series3,false,true,Decommissioned 2024,,Storage
|
BE5678,seismograph,series3,true,false,Backup sensor,PROJ-001,Los Angeles CA,456 Sunset Blvd,34.0522;-118.2437,2025-03-01,2026-03-01,MDM002,,,,,,,,,,,
|
||||||
|
BE9012,seismograph,series4,false,false,In maintenance - needs calibration,PROJ-002,Workshop,789 Industrial Way,,,,,,,,,,,,,,
|
||||||
|
BE3456,seismograph,series3,true,false,,PROJ-003,New York NY,101 Broadway,40.7128;-74.0060,2025-01-10,2026-01-10,,,,,,,,,,,
|
||||||
|
BE7890,seismograph,series3,false,true,Decommissioned 2024,,Storage,Warehouse B,,,,,,,,,,,,,,,
|
||||||
|
# ============================================
|
||||||
|
# MODEMS (device_type=modem)
|
||||||
|
# ============================================
|
||||||
|
MDM001,modem,,true,false,Cradlepoint at SF site,PROJ-001,San Francisco CA,123 Market St,37.7749;-122.4194,,,,,192.168.1.100,+1-555-0101,IBR900,,,,,,,
|
||||||
|
MDM002,modem,,true,false,Sierra Wireless at LA site,PROJ-001,Los Angeles CA,456 Sunset Blvd,34.0522;-118.2437,,,,,10.0.0.50,+1-555-0102,RV55,,,,,,,
|
||||||
|
MDM003,modem,,false,false,Spare modem in storage,,,Storage,Warehouse A,,,,,,+1-555-0103,IBR600,,,,,,,
|
||||||
|
MDM004,modem,,true,false,NYC backup modem,PROJ-003,New York NY,101 Broadway,40.7128;-74.0060,,,,,172.16.0.25,+1-555-0104,IBR1700,,,,,,,
|
||||||
|
# ============================================
|
||||||
|
# SOUND LEVEL METERS (device_type=slm)
|
||||||
|
# ============================================
|
||||||
|
SLM001,slm,,true,false,NL-43 at construction site A,PROJ-004,Downtown Site,500 Main St,40.7589;-73.9851,,,,,,,,192.168.10.101,2255,21,NL-43,12345678,A,F,30-130 dB
|
||||||
|
SLM002,slm,,true,false,NL-43 at construction site B,PROJ-004,Midtown Site,600 Park Ave,40.7614;-73.9776,,,MDM004,,,,,192.168.10.102,2255,21,NL-43,12345679,A,S,30-130 dB
|
||||||
|
SLM003,slm,,false,false,NL-53 spare unit,,,Storage,Warehouse A,,,,,,,,,,,NL-53,98765432,C,F,25-138 dB
|
||||||
|
SLM004,slm,,true,false,NL-43 nighttime monitoring,PROJ-005,Residential Area,200 Quiet Lane,40.7484;-73.9857,,,,,,,,10.0.5.50,2255,21,NL-43,11112222,A,S,30-130 dB
|
||||||
|
|||||||
|
20
scripts/README.md
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# Terra-View Utility Scripts
|
||||||
|
|
||||||
|
This directory contains utility scripts for database operations, testing, and maintenance.
|
||||||
|
|
||||||
|
## Scripts
|
||||||
|
|
||||||
|
### create_test_db.py
|
||||||
|
Generate a realistic test database with sample data.
|
||||||
|
|
||||||
|
Usage: python scripts/create_test_db.py
|
||||||
|
|
||||||
|
### rename_unit.py
|
||||||
|
Rename a unit ID across all tables.
|
||||||
|
|
||||||
|
Usage: python scripts/rename_unit.py <old_id> <new_id>
|
||||||
|
|
||||||
|
### sync_slms_to_slmm.py
|
||||||
|
Manually sync all SLM devices from Terra-View to SLMM.
|
||||||
|
|
||||||
|
Usage: python scripts/sync_slms_to_slmm.py
|
||||||
39
scripts/add_slm_ftp_port.py
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Add slm_ftp_port column to roster table for FTP data retrieval port
|
||||||
|
"""
|
||||||
|
|
||||||
|
from sqlalchemy import create_engine, text
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Determine database based on environment
|
||||||
|
ENVIRONMENT = os.getenv("ENVIRONMENT", "production")
|
||||||
|
if ENVIRONMENT == "development":
|
||||||
|
DB_URL = "sqlite:///./data-dev/seismo_fleet.db"
|
||||||
|
else:
|
||||||
|
DB_URL = "sqlite:///./data/seismo_fleet.db"
|
||||||
|
|
||||||
|
def add_ftp_port_column():
|
||||||
|
print(f"Adding slm_ftp_port column to {DB_URL}...")
|
||||||
|
print("=" * 60)
|
||||||
|
|
||||||
|
engine = create_engine(DB_URL, connect_args={"check_same_thread": False})
|
||||||
|
|
||||||
|
with engine.connect() as conn:
|
||||||
|
try:
|
||||||
|
# Try to add the column
|
||||||
|
conn.execute(text("ALTER TABLE roster ADD COLUMN slm_ftp_port INTEGER"))
|
||||||
|
conn.commit()
|
||||||
|
print("✓ Added column: slm_ftp_port (INTEGER)")
|
||||||
|
except Exception as e:
|
||||||
|
if "duplicate column name" in str(e).lower():
|
||||||
|
print(" Column slm_ftp_port already exists, skipping")
|
||||||
|
else:
|
||||||
|
print(f"✗ Error adding slm_ftp_port: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
print("=" * 60)
|
||||||
|
print("Migration completed!")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
add_ftp_port_column()
|
||||||