diff --git a/.dockerignore b/.dockerignore index f26b729..6fae4b7 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,3 +1,5 @@ +docker-compose.override.yml + # Python cache / compiled __pycache__ *.pyc @@ -28,6 +30,7 @@ ENV/ # Runtime data (mounted volumes) data/ +data-dev/ # Editors / OS junk .vscode/ diff --git a/.gitignore b/.gitignore index 4f189d8..466d45f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,16 @@ +# Terra-View Specifics +# Dev build counter (local only, never commit) +build_number.txt + +# SQLite database files +*.db +*.db-journal +data/ +data-dev/ +.aider* +.aider* +docker-compose.override.yml + # Byte-compiled / optimized / DLL files __pycache__/ *.py[codz] @@ -206,10 +219,3 @@ marimo/_static/ marimo/_lsp/ __marimo__/ -# Seismo Fleet Manager -# SQLite database files -*.db -*.db-journal -data/ -.aider* -.aider* diff --git a/CHANGELOG.md b/CHANGELOG.md index 7307c17..756074f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,60 @@ All notable changes to Terra-View will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.7.0] - 2026-03-07 + +### Added +- **Project Status Management**: Projects can now be placed `on_hold` or `archived`, with automatic cancellation of pending scheduled actions +- **Hard Delete Projects**: Support for permanently deleting projects, in addition to soft-delete with auto-pruning +- **Vibration Location Detail**: New dedicated template for vibration project location detail views +- **Vibration Project Isolation**: Vibration projects no longer show SLM-specific project tabs +- **Manual SD Card Data Upload**: Upload offline NRL data directly from SD card via ZIP or multi-file select + - Accepts `.rnd`/`.rnh` files; parses `.rnh` metadata for session start/stop times, serial number, and store name + - Creates `MonitoringSession` and `DataFile` records automatically; no unit assignment required + - Upload panel on NRL detail Data Files tab with inline feedback and auto-refresh via HTMX +- **Standalone SLM Type**: New SLM device mode that operates without a modem (direct IP connection) +- **NL32 Data Support**: Report generator and web viewer now support NL32 measurement data format +- **Combined Report Wizard**: Multi-session combined Excel report generation tool + - Wizard UI grouped by location with period type badges (day/night) + - Each selected session produces one `.xlsx` in a ZIP archive + - Period type filtering: day sessions keep last calendar date (7AM–6:59PM); night sessions span both days (7PM–6:59AM) +- **Combined Report Preview**: Interactive spreadsheet-style preview before generating combined reports +- **Chart Preview**: Live chart preview in the report generator matching final report styling +- **SLM Model Schemas**: Per-model configuration schemas for NL32, NL43, NL53 devices +- **Data Collection Mode**: Projects now store a data collection mode field with UI controls and migration + +### Changed +- **MonitoringSession rename**: `RecordingSession` renamed to `MonitoringSession` throughout codebase; DB table renamed from `recording_sessions` to `monitoring_sessions` + - Migration: `backend/migrate_rename_recording_to_monitoring_sessions.py` +- **Combined Report Split Logic**: Separate days now generate separate `.xlsx` files; NRLs remain one per sheet +- **Mass Upload Parsing**: Smarter file filtering — no longer imports unneeded Lp files or `.xlsx` files +- **SLM Start Time Grace Period**: 15-minute grace window added so data starting at session start time is included +- **NL32 Date Parsing**: Date now read from `start_time` field instead of file metadata +- **Project Data Labels**: Improved Jinja filters and UI label clarity for project data views + +### Fixed +- **Dev/Prod Separation**: Dev server now uses Docker Compose override; production deployment no longer affected by dev config +- **SLM Modal**: Bench/deploy toggle now correctly shown in SLM unit modal +- **Auto-Downloaded Files**: Files downloaded by scheduler now appear in project file listings +- **Duplicate Download**: Removed duplicate file download that occurred following a scheduled stop +- **SLMM Environment Variables**: `TCP_IDLE_TTL` and `TCP_MAX_AGE` now correctly passed to SLMM service via docker-compose + +### Technical Details +- `session_label` and `period_type` stored on `monitoring_sessions` table (migration: `migrate_add_session_period_type.py`) +- `device_model` stored on `monitoring_sessions` table (migration: `migrate_add_session_device_model.py`) +- Upload endpoint: `POST /api/projects/{project_id}/nrl/{location_id}/upload-data` +- ZIP filename format: `{session_label}_{project_name}_report.xlsx` (label first) + +### Migration Notes +Run the following migration scripts once per database before deploying: +```bash +python backend/migrate_rename_recording_to_monitoring_sessions.py +python backend/migrate_add_session_period_type.py +python backend/migrate_add_session_device_model.py +``` + +--- + ## [0.6.1] - 2026-02-16 ### Added @@ -445,6 +499,7 @@ No database migration required for v0.4.0. All new features use existing databas - Photo management per unit - Automated status categorization (OK/Pending/Missing) +[0.7.0]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.6.1...v0.7.0 [0.6.0]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.5.1...v0.6.0 [0.5.1]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.5.0...v0.5.1 [0.5.0]: https://github.com/serversdwn/seismo-fleet-manager/compare/v0.4.4...v0.5.0 diff --git a/Dockerfile b/Dockerfile index 3486caa..1f4f806 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,9 @@ FROM python:3.11-slim +# Build number for dev builds (injected via --build-arg) +ARG BUILD_NUMBER=0 +ENV BUILD_NUMBER=${BUILD_NUMBER} + # Set working directory WORKDIR /app diff --git a/README.md b/README.md index c32e9dd..3ea2995 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Terra-View v0.6.1 +# Terra-View v0.7.0 Backend API and HTMX-powered web interface for managing a mixed fleet of seismographs and field modems. Track deployments, monitor health in real time, merge roster intent with incoming telemetry, and control your fleet through a unified database and dashboard. ## Features @@ -496,6 +496,16 @@ docker compose down -v ## Release Highlights +### v0.7.0 — 2026-03-07 +- **Project Status Management**: On-hold and archived project states with automatic cancellation of pending actions +- **Manual SD Card Upload**: Upload offline NRL/SLM data directly from SD card (ZIP or multi-file); auto-creates monitoring sessions from `.rnh` metadata +- **Combined Report Wizard**: Multi-session Excel report generation with location grouping, period type filtering, and ZIP download +- **NL32 Support**: Report generator and web viewer now handle NL32 measurement data +- **Chart Preview**: Live chart preview in the report generator matching final output styling +- **Standalone SLM Mode**: SLMs can now be configured without a paired modem (direct IP) +- **Vibration Project Isolation**: Vibration project views no longer show SLM-specific tabs +- **MonitoringSession Rename**: `RecordingSession` renamed to `MonitoringSession` throughout; run migration before deploying + ### v0.6.1 — 2026-02-16 - **One-Off Recording Schedules**: Schedule single recordings with specific start/end datetimes - **Bidirectional Pairing Sync**: Device-modem pairing now updates both sides automatically @@ -584,11 +594,13 @@ MIT ## Version -**Current: 0.6.1** — One-off recording schedules, bidirectional pairing sync, scheduler timezone fix (2026-02-16) +**Current: 0.7.0** — Project status management, manual SD card upload, combined report wizard, NL32 support, MonitoringSession rename (2026-03-07) -Previous: 0.6.0 — Calendar & reservation mode, device pairing interface, calibration UX overhaul, modem dashboard enhancements (2026-02-06) +Previous: 0.6.1 — One-off recording schedules, bidirectional pairing sync, scheduler timezone fix (2026-02-16) -Previous: 0.5.1 — Dashboard schedule view with today's actions panel, new Terra-View branding and logo rework (2026-01-27) +0.6.0 — Calendar & reservation mode, device pairing interface, calibration UX overhaul, modem dashboard enhancements (2026-02-06) + +0.5.1 — Dashboard schedule view with today's actions panel, new Terra-View branding and logo rework (2026-01-27) 0.4.4 — Recurring schedules, alerting UI, report templates + RND viewer, and SLM workflow polish (2026-01-23) diff --git a/backend/init_projects_db.py b/backend/init_projects_db.py index 68802c7..4b239c6 100644 --- a/backend/init_projects_db.py +++ b/backend/init_projects_db.py @@ -18,7 +18,7 @@ from backend.models import ( MonitoringLocation, UnitAssignment, ScheduledAction, - RecordingSession, + MonitoringSession, DataFile, ) from datetime import datetime diff --git a/backend/main.py b/backend/main.py index 07f0fc9..fe95f59 100644 --- a/backend/main.py +++ b/backend/main.py @@ -30,7 +30,11 @@ Base.metadata.create_all(bind=engine) ENVIRONMENT = os.getenv("ENVIRONMENT", "production") # Initialize FastAPI app -VERSION = "0.6.1" +VERSION = "0.7.0" +if ENVIRONMENT == "development": + _build = os.getenv("BUILD_NUMBER", "0") + if _build and _build != "0": + VERSION = f"{VERSION}-{_build}" app = FastAPI( title="Seismo Fleet Manager", description="Backend API for managing seismograph fleet status", @@ -312,7 +316,7 @@ async def nrl_detail_page( db: Session = Depends(get_db) ): """NRL (Noise Recording Location) detail page with tabs""" - from backend.models import Project, MonitoringLocation, UnitAssignment, RosterUnit, RecordingSession, DataFile + from backend.models import Project, MonitoringLocation, UnitAssignment, RosterUnit, MonitoringSession, DataFile from sqlalchemy import and_ # Get project @@ -348,23 +352,33 @@ async def nrl_detail_page( assigned_unit = db.query(RosterUnit).filter_by(id=assignment.unit_id).first() # Get session count - session_count = db.query(RecordingSession).filter_by(location_id=location_id).count() + session_count = db.query(MonitoringSession).filter_by(location_id=location_id).count() # Get file count (DataFile links to session, not directly to location) file_count = db.query(DataFile).join( - RecordingSession, - DataFile.session_id == RecordingSession.id - ).filter(RecordingSession.location_id == location_id).count() + MonitoringSession, + DataFile.session_id == MonitoringSession.id + ).filter(MonitoringSession.location_id == location_id).count() # Check for active session - active_session = db.query(RecordingSession).filter( + active_session = db.query(MonitoringSession).filter( and_( - RecordingSession.location_id == location_id, - RecordingSession.status == "recording" + MonitoringSession.location_id == location_id, + MonitoringSession.status == "recording" ) ).first() - return templates.TemplateResponse("nrl_detail.html", { + # Parse connection_mode from location_metadata JSON + import json as _json + connection_mode = "connected" + try: + meta = _json.loads(location.location_metadata or "{}") + connection_mode = meta.get("connection_mode", "connected") + except Exception: + pass + + template = "vibration_location_detail.html" if location.location_type == "vibration" else "nrl_detail.html" + return templates.TemplateResponse(template, { "request": request, "project_id": project_id, "location_id": location_id, @@ -375,6 +389,7 @@ async def nrl_detail_page( "session_count": session_count, "file_count": file_count, "active_session": active_session, + "connection_mode": connection_mode, }) diff --git a/backend/migrate_add_project_data_collection_mode.py b/backend/migrate_add_project_data_collection_mode.py new file mode 100644 index 0000000..dbac4d6 --- /dev/null +++ b/backend/migrate_add_project_data_collection_mode.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 +""" +Migration: Add data_collection_mode column to projects table. + +Values: + "remote" — units have modems; data pulled via FTP/scheduler automatically + "manual" — no modem; SD cards retrieved daily and uploaded by hand + +All existing projects are backfilled to "manual" (safe conservative default). + +Run once inside the Docker container: + docker exec terra-view python3 backend/migrate_add_project_data_collection_mode.py +""" +from pathlib import Path + +DB_PATH = Path("data/seismo_fleet.db") + + +def migrate(): + import sqlite3 + + if not DB_PATH.exists(): + print(f"Database not found at {DB_PATH}. Are you running from /home/serversdown/terra-view?") + return + + conn = sqlite3.connect(DB_PATH) + conn.row_factory = sqlite3.Row + cur = conn.cursor() + + # ── 1. Add column (idempotent) ─────────────────────────────────────────── + cur.execute("PRAGMA table_info(projects)") + existing_cols = {row["name"] for row in cur.fetchall()} + + if "data_collection_mode" not in existing_cols: + cur.execute("ALTER TABLE projects ADD COLUMN data_collection_mode TEXT DEFAULT 'manual'") + conn.commit() + print("✓ Added column data_collection_mode to projects") + else: + print("○ Column data_collection_mode already exists — skipping ALTER TABLE") + + # ── 2. Backfill NULLs to 'manual' ──────────────────────────────────────── + cur.execute("UPDATE projects SET data_collection_mode = 'manual' WHERE data_collection_mode IS NULL") + updated = cur.rowcount + conn.commit() + conn.close() + + if updated: + print(f"✓ Backfilled {updated} project(s) to data_collection_mode='manual'.") + print("Migration complete.") + + +if __name__ == "__main__": + migrate() diff --git a/backend/migrate_add_project_deleted_at.py b/backend/migrate_add_project_deleted_at.py new file mode 100644 index 0000000..d15ed34 --- /dev/null +++ b/backend/migrate_add_project_deleted_at.py @@ -0,0 +1,56 @@ +""" +Migration: Add deleted_at column to projects table + +Adds columns: +- projects.deleted_at: Timestamp set when status='deleted'; data hard-deleted after 60 days +""" + +import sqlite3 +import sys +from pathlib import Path + + +def migrate(db_path: str): + """Run the migration.""" + print(f"Migrating database: {db_path}") + + conn = sqlite3.connect(db_path) + cursor = conn.cursor() + + try: + cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='projects'") + if not cursor.fetchone(): + print("projects table does not exist. Skipping migration.") + return + + cursor.execute("PRAGMA table_info(projects)") + existing_cols = {row[1] for row in cursor.fetchall()} + + if 'deleted_at' not in existing_cols: + print("Adding deleted_at column to projects...") + cursor.execute("ALTER TABLE projects ADD COLUMN deleted_at DATETIME") + else: + print("deleted_at column already exists. Skipping.") + + conn.commit() + print("Migration completed successfully!") + + except Exception as e: + print(f"Migration failed: {e}") + conn.rollback() + raise + finally: + conn.close() + + +if __name__ == "__main__": + db_path = "./data/terra-view.db" + + if len(sys.argv) > 1: + db_path = sys.argv[1] + + if not Path(db_path).exists(): + print(f"Database not found: {db_path}") + sys.exit(1) + + migrate(db_path) diff --git a/backend/migrate_add_session_device_model.py b/backend/migrate_add_session_device_model.py new file mode 100644 index 0000000..b18cc4e --- /dev/null +++ b/backend/migrate_add_session_device_model.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python3 +""" +Migration: Add device_model column to monitoring_sessions table. + +Records which physical SLM model produced each session's data (e.g. "NL-43", +"NL-53", "NL-32"). Used by report generation to apply the correct parsing +logic without re-opening files to detect format. + +Run once inside the Docker container: + docker exec terra-view python3 backend/migrate_add_session_device_model.py + +Backfill strategy for existing rows: + 1. If session.unit_id is set, use roster.slm_model for that unit. + 2. Else, peek at the first .rnd file in the session: presence of the 'LAeq' + column header identifies AU2 / NL-32 format. + Sessions where neither hint is available remain NULL — the file-content + fallback in report code handles them transparently. +""" +import csv +import io +from pathlib import Path + +DB_PATH = Path("data/seismo_fleet.db") + + +def _peek_first_row(abs_path: Path) -> dict: + """Read only the header + first data row of an RND file. Very cheap.""" + try: + with open(abs_path, "r", encoding="utf-8", errors="replace") as f: + reader = csv.DictReader(f) + return next(reader, None) or {} + except Exception: + return {} + + +def _detect_model_from_rnd(abs_path: Path) -> str | None: + """Return 'NL-32' if file uses AU2 column format, else None.""" + row = _peek_first_row(abs_path) + if "LAeq" in row: + return "NL-32" + return None + + +def migrate(): + import sqlite3 + + if not DB_PATH.exists(): + print(f"Database not found at {DB_PATH}. Are you running from /home/serversdown/terra-view?") + return + + conn = sqlite3.connect(DB_PATH) + conn.row_factory = sqlite3.Row + cur = conn.cursor() + + # ── 1. Add column (idempotent) ─────────────────────────────────────────── + cur.execute("PRAGMA table_info(monitoring_sessions)") + existing_cols = {row["name"] for row in cur.fetchall()} + + if "device_model" not in existing_cols: + cur.execute("ALTER TABLE monitoring_sessions ADD COLUMN device_model TEXT") + conn.commit() + print("✓ Added column device_model to monitoring_sessions") + else: + print("○ Column device_model already exists — skipping ALTER TABLE") + + # ── 2. Backfill existing NULL rows ─────────────────────────────────────── + cur.execute( + "SELECT id, unit_id FROM monitoring_sessions WHERE device_model IS NULL" + ) + sessions = cur.fetchall() + print(f"Backfilling {len(sessions)} session(s) with device_model=NULL...") + + updated = skipped = 0 + for row in sessions: + session_id = row["id"] + unit_id = row["unit_id"] + device_model = None + + # Strategy A: look up unit's slm_model from the roster + if unit_id: + cur.execute( + "SELECT slm_model FROM roster WHERE id = ?", (unit_id,) + ) + unit_row = cur.fetchone() + if unit_row and unit_row["slm_model"]: + device_model = unit_row["slm_model"] + + # Strategy B: detect from first .rnd file in the session + if device_model is None: + cur.execute( + """SELECT file_path FROM data_files + WHERE session_id = ? + AND lower(file_path) LIKE '%.rnd' + LIMIT 1""", + (session_id,), + ) + file_row = cur.fetchone() + if file_row: + abs_path = Path("data") / file_row["file_path"] + device_model = _detect_model_from_rnd(abs_path) + # None here means NL-43/NL-53 format (or unreadable file) — + # leave as NULL so the existing fallback applies. + + if device_model: + cur.execute( + "UPDATE monitoring_sessions SET device_model = ? WHERE id = ?", + (device_model, session_id), + ) + updated += 1 + else: + skipped += 1 + + conn.commit() + conn.close() + + print(f"✓ Backfilled {updated} session(s) with a device_model.") + if skipped: + print( + f" {skipped} session(s) left as NULL " + "(no unit link and no AU2 file hint — NL-43/NL-53 or unknown; " + "file-content detection applies at report time)." + ) + print("Migration complete.") + + +if __name__ == "__main__": + migrate() diff --git a/backend/migrate_add_session_period_type.py b/backend/migrate_add_session_period_type.py new file mode 100644 index 0000000..386325b --- /dev/null +++ b/backend/migrate_add_session_period_type.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python3 +""" +Migration: Add session_label and period_type columns to monitoring_sessions. + +session_label - user-editable display name, e.g. "NRL-1 Sun 2/23 Night" +period_type - one of: weekday_day | weekday_night | weekend_day | weekend_night + Auto-derived from started_at when NULL. + +Period definitions (used in report stats table): + weekday_day Mon-Fri 07:00-22:00 -> Daytime (7AM-10PM) + weekday_night Mon-Fri 22:00-07:00 -> Nighttime (10PM-7AM) + weekend_day Sat-Sun 07:00-22:00 -> Daytime (7AM-10PM) + weekend_night Sat-Sun 22:00-07:00 -> Nighttime (10PM-7AM) + +Run once inside the Docker container: + docker exec terra-view python3 backend/migrate_add_session_period_type.py +""" +from pathlib import Path +from datetime import datetime + +DB_PATH = Path("data/seismo_fleet.db") + + +def _derive_period_type(started_at_str: str) -> str | None: + """Derive period_type from a started_at ISO datetime string.""" + if not started_at_str: + return None + try: + dt = datetime.fromisoformat(started_at_str) + except ValueError: + return None + is_weekend = dt.weekday() >= 5 # 5=Sat, 6=Sun + is_night = dt.hour >= 22 or dt.hour < 7 + if is_weekend: + return "weekend_night" if is_night else "weekend_day" + else: + return "weekday_night" if is_night else "weekday_day" + + +def _build_label(started_at_str: str, location_name: str | None, period_type: str | None) -> str | None: + """Build a human-readable session label.""" + if not started_at_str: + return None + try: + dt = datetime.fromisoformat(started_at_str) + except ValueError: + return None + + day_abbr = dt.strftime("%a") # Mon, Tue, Sun, etc. + date_str = dt.strftime("%-m/%-d") # 2/23 + + period_labels = { + "weekday_day": "Day", + "weekday_night": "Night", + "weekend_day": "Day", + "weekend_night": "Night", + } + period_str = period_labels.get(period_type or "", "") + + parts = [] + if location_name: + parts.append(location_name) + parts.append(f"{day_abbr} {date_str}") + if period_str: + parts.append(period_str) + return " — ".join(parts) + + +def migrate(): + import sqlite3 + + if not DB_PATH.exists(): + print(f"Database not found at {DB_PATH}. Are you running from /home/serversdown/terra-view?") + return + + conn = sqlite3.connect(DB_PATH) + conn.row_factory = sqlite3.Row + cur = conn.cursor() + + # 1. Add columns (idempotent) + cur.execute("PRAGMA table_info(monitoring_sessions)") + existing_cols = {row["name"] for row in cur.fetchall()} + + for col, typedef in [("session_label", "TEXT"), ("period_type", "TEXT")]: + if col not in existing_cols: + cur.execute(f"ALTER TABLE monitoring_sessions ADD COLUMN {col} {typedef}") + conn.commit() + print(f"✓ Added column {col} to monitoring_sessions") + else: + print(f"○ Column {col} already exists — skipping ALTER TABLE") + + # 2. Backfill existing rows + cur.execute( + """SELECT ms.id, ms.started_at, ms.location_id + FROM monitoring_sessions ms + WHERE ms.period_type IS NULL OR ms.session_label IS NULL""" + ) + sessions = cur.fetchall() + print(f"Backfilling {len(sessions)} session(s)...") + + updated = 0 + for row in sessions: + session_id = row["id"] + started_at = row["started_at"] + location_id = row["location_id"] + + # Look up location name + location_name = None + if location_id: + cur.execute("SELECT name FROM monitoring_locations WHERE id = ?", (location_id,)) + loc_row = cur.fetchone() + if loc_row: + location_name = loc_row["name"] + + period_type = _derive_period_type(started_at) + label = _build_label(started_at, location_name, period_type) + + cur.execute( + "UPDATE monitoring_sessions SET period_type = ?, session_label = ? WHERE id = ?", + (period_type, label, session_id), + ) + updated += 1 + + conn.commit() + conn.close() + print(f"✓ Backfilled {updated} session(s).") + print("Migration complete.") + + +if __name__ == "__main__": + migrate() diff --git a/backend/migrate_rename_recording_to_monitoring_sessions.py b/backend/migrate_rename_recording_to_monitoring_sessions.py new file mode 100644 index 0000000..475ed67 --- /dev/null +++ b/backend/migrate_rename_recording_to_monitoring_sessions.py @@ -0,0 +1,54 @@ +""" +Migration: Rename recording_sessions table to monitoring_sessions + +Renames the table and updates the model name from RecordingSession to MonitoringSession. +Run once per database: python backend/migrate_rename_recording_to_monitoring_sessions.py +""" + +import sqlite3 +import sys +from pathlib import Path + + +def migrate(db_path: str): + """Run the migration.""" + print(f"Migrating database: {db_path}") + + conn = sqlite3.connect(db_path) + cursor = conn.cursor() + + try: + cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='recording_sessions'") + if not cursor.fetchone(): + cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='monitoring_sessions'") + if cursor.fetchone(): + print("monitoring_sessions table already exists. Skipping migration.") + else: + print("recording_sessions table does not exist. Skipping migration.") + return + + print("Renaming recording_sessions -> monitoring_sessions...") + cursor.execute("ALTER TABLE recording_sessions RENAME TO monitoring_sessions") + + conn.commit() + print("Migration completed successfully!") + + except Exception as e: + print(f"Migration failed: {e}") + conn.rollback() + raise + finally: + conn.close() + + +if __name__ == "__main__": + db_path = "./data/terra-view.db" + + if len(sys.argv) > 1: + db_path = sys.argv[1] + + if not Path(db_path).exists(): + print(f"Database not found: {db_path}") + sys.exit(1) + + migrate(db_path) diff --git a/backend/models.py b/backend/models.py index 49ec9af..1c0a39d 100644 --- a/backend/models.py +++ b/backend/models.py @@ -155,7 +155,12 @@ class Project(Base): name = Column(String, nullable=False, unique=True) # Project/site name (e.g., "RKM Hall") description = Column(Text, nullable=True) project_type_id = Column(String, nullable=False) # FK to ProjectType.id - status = Column(String, default="active") # active, completed, archived + status = Column(String, default="active") # active, on_hold, completed, archived, deleted + + # Data collection mode: how field data reaches Terra-View. + # "remote" — units have modems; data pulled via FTP/scheduler automatically + # "manual" — no modem; SD cards retrieved daily and uploaded by hand + data_collection_mode = Column(String, default="manual") # remote | manual # Project metadata client_name = Column(String, nullable=True, index=True) # Client name (e.g., "PJ Dick") @@ -166,6 +171,7 @@ class Project(Base): created_at = Column(DateTime, default=datetime.utcnow) updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) + deleted_at = Column(DateTime, nullable=True) # Set when status='deleted'; hard delete scheduled after 60 days class MonitoringLocation(Base): @@ -244,17 +250,21 @@ class ScheduledAction(Base): created_at = Column(DateTime, default=datetime.utcnow) -class RecordingSession(Base): +class MonitoringSession(Base): """ - Recording sessions: tracks actual monitoring sessions. - Created when recording starts, updated when it stops. + Monitoring sessions: tracks actual monitoring sessions. + Created when monitoring starts, updated when it stops. """ - __tablename__ = "recording_sessions" + __tablename__ = "monitoring_sessions" id = Column(String, primary_key=True, index=True) # UUID project_id = Column(String, nullable=False, index=True) # FK to Project.id location_id = Column(String, nullable=False, index=True) # FK to MonitoringLocation.id - unit_id = Column(String, nullable=False, index=True) # FK to RosterUnit.id + unit_id = Column(String, nullable=True, index=True) # FK to RosterUnit.id (nullable for offline uploads) + + # Physical device model that produced this session's data (e.g. "NL-43", "NL-53", "NL-32"). + # Null for older records; report code falls back to file-content detection when null. + device_model = Column(String, nullable=True) session_type = Column(String, nullable=False) # sound | vibration started_at = Column(DateTime, nullable=False) @@ -262,6 +272,14 @@ class RecordingSession(Base): duration_seconds = Column(Integer, nullable=True) status = Column(String, default="recording") # recording, completed, failed + # Human-readable label auto-derived from date/location, editable by user. + # e.g. "NRL-1 — Sun 2/23 — Night" + session_label = Column(String, nullable=True) + + # Period classification for report stats columns. + # weekday_day | weekday_night | weekend_day | weekend_night + period_type = Column(String, nullable=True) + # Snapshot of device configuration at recording time session_metadata = Column(Text, nullable=True) # JSON @@ -277,7 +295,7 @@ class DataFile(Base): __tablename__ = "data_files" id = Column(String, primary_key=True, index=True) # UUID - session_id = Column(String, nullable=False, index=True) # FK to RecordingSession.id + session_id = Column(String, nullable=False, index=True) # FK to MonitoringSession.id file_path = Column(String, nullable=False) # Relative to data/Projects/ file_type = Column(String, nullable=False) # wav, csv, mseed, json diff --git a/backend/routers/dashboard.py b/backend/routers/dashboard.py index c9e61bb..4d36f52 100644 --- a/backend/routers/dashboard.py +++ b/backend/routers/dashboard.py @@ -1,5 +1,6 @@ from fastapi import APIRouter, Request, Depends from sqlalchemy.orm import Session +from sqlalchemy import and_ from datetime import datetime, timedelta from backend.database import get_db @@ -48,10 +49,18 @@ def dashboard_todays_actions(request: Request, db: Session = Depends(get_db)): today_start_utc = today_start_local.astimezone(ZoneInfo("UTC")).replace(tzinfo=None) today_end_utc = today_end_local.astimezone(ZoneInfo("UTC")).replace(tzinfo=None) + # Exclude actions from paused/removed projects + paused_project_ids = [ + p.id for p in db.query(Project.id).filter( + Project.status.in_(["on_hold", "archived", "deleted"]) + ).all() + ] + # Query today's actions actions = db.query(ScheduledAction).filter( ScheduledAction.scheduled_time >= today_start_utc, ScheduledAction.scheduled_time < today_end_utc, + ScheduledAction.project_id.notin_(paused_project_ids), ).order_by(ScheduledAction.scheduled_time.asc()).all() # Enrich with location/project info and parse results diff --git a/backend/routers/project_locations.py b/backend/routers/project_locations.py index 54d36b1..44fcdd5 100644 --- a/backend/routers/project_locations.py +++ b/backend/routers/project_locations.py @@ -14,6 +14,12 @@ from typing import Optional import uuid import json +from fastapi import UploadFile, File +import zipfile +import hashlib +import io +from pathlib import Path + from backend.database import get_db from backend.models import ( Project, @@ -21,13 +27,47 @@ from backend.models import ( MonitoringLocation, UnitAssignment, RosterUnit, - RecordingSession, + MonitoringSession, + DataFile, ) from backend.templates_config import templates router = APIRouter(prefix="/api/projects/{project_id}", tags=["project-locations"]) +# ============================================================================ +# Session period helpers +# ============================================================================ + +def _derive_period_type(dt: datetime) -> str: + """ + Classify a session start time into one of four period types. + Night = 22:00–07:00, Day = 07:00–22:00. + Weekend = Saturday (5) or Sunday (6). + """ + is_weekend = dt.weekday() >= 5 + is_night = dt.hour >= 22 or dt.hour < 7 + if is_weekend: + return "weekend_night" if is_night else "weekend_day" + return "weekday_night" if is_night else "weekday_day" + + +def _build_session_label(dt: datetime, location_name: str, period_type: str) -> str: + """Build a human-readable session label, e.g. 'NRL-1 — Sun 2/23 — Night'. + Uses started_at date as-is; user can correct period_type in the wizard. + """ + day_abbr = dt.strftime("%a") + date_str = f"{dt.month}/{dt.day}" + period_str = { + "weekday_day": "Day", + "weekday_night": "Night", + "weekend_day": "Day", + "weekend_night": "Night", + }.get(period_type, "") + parts = [p for p in [location_name, f"{day_abbr} {date_str}", period_str] if p] + return " — ".join(parts) + + # ============================================================================ # Monitoring Locations CRUD # ============================================================================ @@ -70,8 +110,8 @@ async def get_project_locations( if assignment: assigned_unit = db.query(RosterUnit).filter_by(id=assignment.unit_id).first() - # Count recording sessions - session_count = db.query(RecordingSession).filter_by( + # Count monitoring sessions + session_count = db.query(MonitoringSession).filter_by( location_id=location.id ).count() @@ -370,19 +410,19 @@ async def unassign_unit( if not assignment: raise HTTPException(status_code=404, detail="Assignment not found") - # Check if there are active recording sessions - active_sessions = db.query(RecordingSession).filter( + # Check if there are active monitoring sessions + active_sessions = db.query(MonitoringSession).filter( and_( - RecordingSession.location_id == assignment.location_id, - RecordingSession.unit_id == assignment.unit_id, - RecordingSession.status == "recording", + MonitoringSession.location_id == assignment.location_id, + MonitoringSession.unit_id == assignment.unit_id, + MonitoringSession.status == "recording", ) ).count() if active_sessions > 0: raise HTTPException( status_code=400, - detail="Cannot unassign unit with active recording sessions. Stop recording first.", + detail="Cannot unassign unit with active monitoring sessions. Stop monitoring first.", ) assignment.status = "completed" @@ -451,14 +491,12 @@ async def get_nrl_sessions( db: Session = Depends(get_db), ): """ - Get recording sessions for a specific NRL. + Get monitoring sessions for a specific NRL. Returns HTML partial with session list. """ - from backend.models import RecordingSession, RosterUnit - - sessions = db.query(RecordingSession).filter_by( + sessions = db.query(MonitoringSession).filter_by( location_id=location_id - ).order_by(RecordingSession.started_at.desc()).all() + ).order_by(MonitoringSession.started_at.desc()).all() # Enrich with unit details sessions_data = [] @@ -491,14 +529,12 @@ async def get_nrl_files( Get data files for a specific NRL. Returns HTML partial with file list. """ - from backend.models import DataFile, RecordingSession - - # Join DataFile with RecordingSession to filter by location_id + # Join DataFile with MonitoringSession to filter by location_id files = db.query(DataFile).join( - RecordingSession, - DataFile.session_id == RecordingSession.id + MonitoringSession, + DataFile.session_id == MonitoringSession.id ).filter( - RecordingSession.location_id == location_id + MonitoringSession.location_id == location_id ).order_by(DataFile.created_at.desc()).all() # Enrich with session details @@ -506,7 +542,7 @@ async def get_nrl_files( for file in files: session = None if file.session_id: - session = db.query(RecordingSession).filter_by(id=file.session_id).first() + session = db.query(MonitoringSession).filter_by(id=file.session_id).first() files_data.append({ "file": file, @@ -519,3 +555,310 @@ async def get_nrl_files( "location_id": location_id, "files": files_data, }) + + +# ============================================================================ +# Manual SD Card Data Upload +# ============================================================================ + +def _parse_rnh(content: bytes) -> dict: + """ + Parse a Rion .rnh metadata file (INI-style with [Section] headers). + Returns a dict of key metadata fields. + """ + result = {} + try: + text = content.decode("utf-8", errors="replace") + for line in text.splitlines(): + line = line.strip() + if not line or line.startswith("["): + continue + if "," in line: + key, _, value = line.partition(",") + key = key.strip() + value = value.strip() + if key == "Serial Number": + result["serial_number"] = value + elif key == "Store Name": + result["store_name"] = value + elif key == "Index Number": + result["index_number"] = value + elif key == "Measurement Start Time": + result["start_time_str"] = value + elif key == "Measurement Stop Time": + result["stop_time_str"] = value + elif key == "Total Measurement Time": + result["total_time_str"] = value + except Exception: + pass + return result + + +def _parse_rnh_datetime(s: str): + """Parse RNH datetime string: '2026/02/17 19:00:19' -> datetime""" + from datetime import datetime + if not s: + return None + try: + return datetime.strptime(s.strip(), "%Y/%m/%d %H:%M:%S") + except Exception: + return None + + +def _classify_file(filename: str) -> str: + """Classify a file by name into a DataFile file_type.""" + name = filename.lower() + if name.endswith(".rnh"): + return "log" + if name.endswith(".rnd"): + return "measurement" + if name.endswith(".zip"): + return "archive" + return "data" + + +@router.post("/nrl/{location_id}/upload-data") +async def upload_nrl_data( + project_id: str, + location_id: str, + db: Session = Depends(get_db), + files: list[UploadFile] = File(...), +): + """ + Manually upload SD card data for an offline NRL. + + Accepts either: + - A single .zip file (the Auto_#### folder zipped) — auto-extracted + - Multiple .rnd / .rnh files selected directly from the SD card folder + + Creates a MonitoringSession from .rnh metadata and DataFile records + for each measurement file. No unit assignment required. + """ + from datetime import datetime + + # Verify project and location exist + location = db.query(MonitoringLocation).filter_by( + id=location_id, project_id=project_id + ).first() + if not location: + raise HTTPException(status_code=404, detail="Location not found") + + # --- Step 1: Normalize to (filename, bytes) list --- + file_entries: list[tuple[str, bytes]] = [] + + if len(files) == 1 and files[0].filename.lower().endswith(".zip"): + raw = await files[0].read() + try: + with zipfile.ZipFile(io.BytesIO(raw)) as zf: + for info in zf.infolist(): + if info.is_dir(): + continue + name = Path(info.filename).name # strip folder path + if not name: + continue + file_entries.append((name, zf.read(info))) + except zipfile.BadZipFile: + raise HTTPException(status_code=400, detail="Uploaded file is not a valid ZIP archive.") + else: + for uf in files: + data = await uf.read() + file_entries.append((uf.filename, data)) + + if not file_entries: + raise HTTPException(status_code=400, detail="No usable files found in upload.") + + # --- Step 1b: Filter to only relevant files --- + # Keep: .rnh (metadata) and measurement .rnd files + # NL-43 generates two .rnd types: _Leq_ (15-min averages, wanted) and _Lp_ (1-sec granular, skip) + # AU2 (NL-23/older Rion) generates a single Au2_####.rnd per session — always keep those + # Drop: _Lp_ .rnd, .xlsx, .mp3, and anything else + def _is_wanted(fname: str) -> bool: + n = fname.lower() + if n.endswith(".rnh"): + return True + if n.endswith(".rnd"): + if "_leq_" in n: # NL-43 Leq file + return True + if n.startswith("au2_"): # AU2 format (NL-23) — always Leq equivalent + return True + if "_lp" not in n and "_leq_" not in n: + # Unknown .rnd format — include it so we don't silently drop data + return True + return False + + file_entries = [(fname, fbytes) for fname, fbytes in file_entries if _is_wanted(fname)] + + if not file_entries: + raise HTTPException(status_code=400, detail="No usable .rnd or .rnh files found. Expected NL-43 _Leq_ files or AU2 format .rnd files.") + + # --- Step 2: Find and parse .rnh metadata --- + rnh_meta = {} + for fname, fbytes in file_entries: + if fname.lower().endswith(".rnh"): + rnh_meta = _parse_rnh(fbytes) + break + + started_at = _parse_rnh_datetime(rnh_meta.get("start_time_str")) or datetime.utcnow() + stopped_at = _parse_rnh_datetime(rnh_meta.get("stop_time_str")) + duration_seconds = None + if started_at and stopped_at: + duration_seconds = int((stopped_at - started_at).total_seconds()) + + store_name = rnh_meta.get("store_name", "") + serial_number = rnh_meta.get("serial_number", "") + index_number = rnh_meta.get("index_number", "") + + # --- Step 3: Create MonitoringSession --- + period_type = _derive_period_type(started_at) if started_at else None + session_label = _build_session_label(started_at, location.name, period_type) if started_at else None + + session_id = str(uuid.uuid4()) + monitoring_session = MonitoringSession( + id=session_id, + project_id=project_id, + location_id=location_id, + unit_id=None, + session_type="sound", + started_at=started_at, + stopped_at=stopped_at, + duration_seconds=duration_seconds, + status="completed", + session_label=session_label, + period_type=period_type, + session_metadata=json.dumps({ + "source": "manual_upload", + "store_name": store_name, + "serial_number": serial_number, + "index_number": index_number, + }), + ) + db.add(monitoring_session) + db.commit() + db.refresh(monitoring_session) + + # --- Step 4: Write files to disk and create DataFile records --- + output_dir = Path("data/Projects") / project_id / session_id + output_dir.mkdir(parents=True, exist_ok=True) + + leq_count = 0 + lp_count = 0 + metadata_count = 0 + files_imported = 0 + + for fname, fbytes in file_entries: + file_type = _classify_file(fname) + fname_lower = fname.lower() + + # Track counts for summary + if fname_lower.endswith(".rnd"): + if "_leq_" in fname_lower: + leq_count += 1 + elif "_lp" in fname_lower: + lp_count += 1 + elif fname_lower.endswith(".rnh"): + metadata_count += 1 + + # Write to disk + dest = output_dir / fname + dest.write_bytes(fbytes) + + # Compute checksum + checksum = hashlib.sha256(fbytes).hexdigest() + + # Store relative path from data/ dir + rel_path = str(dest.relative_to("data")) + + data_file = DataFile( + id=str(uuid.uuid4()), + session_id=session_id, + file_path=rel_path, + file_type=file_type, + file_size_bytes=len(fbytes), + downloaded_at=datetime.utcnow(), + checksum=checksum, + file_metadata=json.dumps({ + "source": "manual_upload", + "original_filename": fname, + "store_name": store_name, + }), + ) + db.add(data_file) + files_imported += 1 + + db.commit() + + return { + "success": True, + "session_id": session_id, + "files_imported": files_imported, + "leq_files": leq_count, + "lp_files": lp_count, + "metadata_files": metadata_count, + "store_name": store_name, + "started_at": started_at.isoformat() if started_at else None, + "stopped_at": stopped_at.isoformat() if stopped_at else None, + } + + +# ============================================================================ +# NRL Live Status (connected NRLs only) +# ============================================================================ + +@router.get("/nrl/{location_id}/live-status", response_class=HTMLResponse) +async def get_nrl_live_status( + project_id: str, + location_id: str, + request: Request, + db: Session = Depends(get_db), +): + """ + Fetch cached status from SLMM for the unit assigned to this NRL and + return a compact HTML status card. Used in the NRL overview tab for + connected NRLs. Gracefully shows an offline message if SLMM is unreachable. + """ + import os + import httpx + + # Find the assigned unit + assignment = db.query(UnitAssignment).filter( + and_( + UnitAssignment.location_id == location_id, + UnitAssignment.status == "active", + ) + ).first() + + if not assignment: + return templates.TemplateResponse("partials/projects/nrl_live_status.html", { + "request": request, + "status": None, + "error": "No unit assigned", + }) + + unit = db.query(RosterUnit).filter_by(id=assignment.unit_id).first() + if not unit: + return templates.TemplateResponse("partials/projects/nrl_live_status.html", { + "request": request, + "status": None, + "error": "Assigned unit not found", + }) + + slmm_base = os.getenv("SLMM_BASE_URL", "http://localhost:8100") + status_data = None + error_msg = None + + try: + async with httpx.AsyncClient(timeout=5.0) as client: + resp = await client.get(f"{slmm_base}/api/nl43/{unit.id}/status") + if resp.status_code == 200: + status_data = resp.json() + else: + error_msg = f"SLMM returned {resp.status_code}" + except Exception as e: + error_msg = "SLMM unreachable" + + return templates.TemplateResponse("partials/projects/nrl_live_status.html", { + "request": request, + "unit": unit, + "status": status_data, + "error": error_msg, + }) diff --git a/backend/routers/projects.py b/backend/routers/projects.py index 2fcf0f0..29e71cd 100644 --- a/backend/routers/projects.py +++ b/backend/routers/projects.py @@ -23,12 +23,18 @@ import io from backend.utils.timezone import utc_to_local, format_local_datetime from backend.database import get_db +from fastapi import UploadFile, File +import zipfile +import hashlib +import pathlib as _pathlib + from backend.models import ( Project, ProjectType, MonitoringLocation, UnitAssignment, - RecordingSession, + MonitoringSession, + DataFile, ScheduledAction, RecurringSchedule, RosterUnit, @@ -39,6 +45,316 @@ router = APIRouter(prefix="/api/projects", tags=["projects"]) logger = logging.getLogger(__name__) +# ============================================================================ +# RND file normalization — maps AU2 (older Rion) column names to the NL-43 +# equivalents so report generation and the web viewer work for both formats. +# AU2 files: LAeq, LAmax, LAmin, LA01, LA10, LA50, LA90, LA95, LCpeak +# NL-43 files: Leq(Main), Lmax(Main), Lmin(Main), LN1(Main) … Lpeak(Main) +# ============================================================================ + +_AU2_TO_NL43 = { + "LAeq": "Leq(Main)", + "LAmax": "Lmax(Main)", + "LAmin": "Lmin(Main)", + "LCpeak": "Lpeak(Main)", + "LA01": "LN1(Main)", + "LA10": "LN2(Main)", + "LA50": "LN3(Main)", + "LA90": "LN4(Main)", + "LA95": "LN5(Main)", + # Time column differs too + "Time": "Start Time", +} + + +def _normalize_rnd_rows(rows: list[dict]) -> tuple[list[dict], bool]: + """ + Detect AU2-format RND rows (by presence of 'LAeq' key) and remap column + names to NL-43 equivalents. Returns (normalized_rows, was_au2_format). + If already NL-43 format the rows are returned unchanged. + """ + if not rows: + return rows, False + if "LAeq" not in rows[0]: + return rows, False # already NL-43 format + + normalized = [] + for row in rows: + new_row = {} + for k, v in row.items(): + new_row[_AU2_TO_NL43.get(k, k)] = v + normalized.append(new_row) + return normalized, True + + +def _peek_rnd_headers(file_path) -> list[dict]: + """Read just the first data row of an RND file to check column names cheaply.""" + import csv as _csv + try: + with open(file_path, 'r', encoding='utf-8', errors='replace') as f: + reader = _csv.DictReader(f) + row = next(reader, None) + return [row] if row else [] + except Exception: + return [] + + +def _is_leq_file(file_path: str, rows: list[dict]) -> bool: + """ + Return True if this RND file contains Leq (15-min averaged) data. + Accepts NL-43 Leq files (_Leq_ in path) and AU2 files (LAeq column or + Leq(Main) column after normalisation). + """ + if "_Leq_" in file_path: + return True + if rows and ("LAeq" in rows[0] or "Leq(Main)" in rows[0]): + return True + return False + + +def _filter_rnd_rows( + rows: list[dict], + filter_start_time: str, + filter_end_time: str, + filter_start_date: str, + filter_end_date: str, +) -> list[dict]: + """Filter RND data rows by time window and/or date range. Handles overnight ranges. + + Grace window: intervals starting up to 15 minutes before the filter start time are + included. This covers the common case where a unit is deployed slightly early + (e.g. set up at 6:50 for a 7:00 PM start) and the first interval begins just before + the nominal window. The grace window applies only to the start boundary. + """ + if not filter_start_time and not filter_end_time and not filter_start_date and not filter_end_date: + return rows + + _GRACE_MINUTES = 15 + + filtered = [] + + start_hour = start_minute = end_hour = end_minute = None + if filter_start_time: + try: + parts = filter_start_time.split(':') + start_hour = int(parts[0]) + start_minute = int(parts[1]) if len(parts) > 1 else 0 + except (ValueError, IndexError): + pass + + if filter_end_time: + try: + parts = filter_end_time.split(':') + end_hour = int(parts[0]) + end_minute = int(parts[1]) if len(parts) > 1 else 0 + except (ValueError, IndexError): + pass + + start_dt = end_dt = None + if filter_start_date: + try: + start_dt = datetime.strptime(filter_start_date, '%Y-%m-%d').date() + except ValueError: + pass + if filter_end_date: + try: + end_dt = datetime.strptime(filter_end_date, '%Y-%m-%d').date() + except ValueError: + pass + + for row in rows: + start_time_str = row.get('Start Time', '') + if not start_time_str: + continue + + try: + dt = datetime.strptime(start_time_str, '%Y/%m/%d %H:%M:%S') + row_date = dt.date() + row_hour = dt.hour + row_minute = dt.minute + + if start_dt and row_date < start_dt: + continue + if end_dt and row_date > end_dt: + continue + + if start_hour is not None and end_hour is not None: + row_time_minutes = row_hour * 60 + row_minute + start_time_minutes = start_hour * 60 + start_minute + end_time_minutes = end_hour * 60 + end_minute + # Effective start allows up to 15 min early (wraps midnight correctly) + effective_start = (start_time_minutes - _GRACE_MINUTES) % (24 * 60) + + if start_time_minutes > end_time_minutes: + # Overnight range (e.g., 19:00-07:00) + # With grace: effective start may be e.g. 18:45 + if effective_start > end_time_minutes: + # Grace window doesn't wrap past midnight itself + in_window = (row_time_minutes >= effective_start or row_time_minutes < end_time_minutes) + else: + # Grace window wraps midnight (rare: start near 00:00) + in_window = (row_time_minutes >= effective_start and row_time_minutes < end_time_minutes) + if not in_window: + continue + else: + # Same-day range (e.g., 07:00-19:00) + if not (effective_start <= row_time_minutes < end_time_minutes): + continue + + filtered.append(row) + except ValueError: + filtered.append(row) + + return filtered + + +def _read_rnd_file_rows(file_path_str: str) -> list[dict]: + """Read and parse a single RND CSV file into a list of cleaned row dicts.""" + import csv as _csv + from pathlib import Path as _Path + + file_path = _Path("data") / file_path_str + if not file_path.exists(): + return [] + + try: + with open(file_path, 'r', encoding='utf-8', errors='replace') as f: + content = f.read() + rows = [] + reader = _csv.DictReader(io.StringIO(content)) + for row in reader: + cleaned_row = {} + for key, value in row.items(): + if key: + cleaned_key = key.strip() + cleaned_value = value.strip() if value else '' + if cleaned_value and cleaned_value not in ['-.-', '-', '']: + try: + cleaned_value = float(cleaned_value) + except ValueError: + pass + elif cleaned_value in ['-.-', '-']: + cleaned_value = None + cleaned_row[cleaned_key] = cleaned_value + rows.append(cleaned_row) + return rows + except Exception: + return [] + + +def _build_combined_location_data( + project_id: str, + db, + start_time: str = "", + end_time: str = "", + start_date: str = "", + end_date: str = "", + enabled_locations: list = None, +) -> dict: + """ + Read all Leq RND files for a project, apply time/date filters, and return + per-location spreadsheet data ready for the wizard preview. + + Returns: + { + "project": Project, + "location_data": [ + { + "location_name": str, + "raw_count": int, + "filtered_count": int, + "spreadsheet_data": [[idx, date, time, lmax, ln1, ln2, ""], ...] + }, + ... + ] + } + Raises HTTPException 404 if project not found or no Leq files exist. + """ + project = db.query(Project).filter_by(id=project_id).first() + if not project: + raise HTTPException(status_code=404, detail="Project not found") + + sessions = db.query(MonitoringSession).filter_by(project_id=project_id).all() + + # Group Leq files by location + location_files: dict = {} + for session in sessions: + files = db.query(DataFile).filter_by(session_id=session.id).all() + for file in files: + if not file.file_path or not file.file_path.lower().endswith('.rnd'): + continue + from pathlib import Path as _Path + abs_path = _Path("data") / file.file_path + peek = _peek_rnd_headers(abs_path) + if not _is_leq_file(file.file_path, peek): + continue + location = db.query(MonitoringLocation).filter_by(id=session.location_id).first() if session.location_id else None + loc_name = location.name if location else f"Session {session.id[:8]}" + if loc_name not in location_files: + location_files[loc_name] = [] + location_files[loc_name].append(file) + + if not location_files: + raise HTTPException(status_code=404, detail="No Leq measurement files found in project.") + + # Filter by enabled_locations if specified + if enabled_locations: + location_files = {k: v for k, v in location_files.items() if k in enabled_locations} + if not location_files: + raise HTTPException(status_code=404, detail="None of the selected locations have Leq files.") + + location_data = [] + for loc_name, files in sorted(location_files.items()): + all_rows = [] + for file in files: + rows = _read_rnd_file_rows(file.file_path) + rows, _ = _normalize_rnd_rows(rows) + all_rows.extend(rows) + + if not all_rows: + continue + + all_rows.sort(key=lambda r: r.get('Start Time', '')) + raw_count = len(all_rows) + + filtered_rows = _filter_rnd_rows(all_rows, start_time, end_time, start_date, end_date) + + spreadsheet_data = [] + for idx, row in enumerate(filtered_rows, 1): + start_time_str = row.get('Start Time', '') + date_str = time_str = '' + if start_time_str: + try: + dt = datetime.strptime(start_time_str, '%Y/%m/%d %H:%M:%S') + date_str = dt.strftime('%Y-%m-%d') + time_str = dt.strftime('%H:%M') + except ValueError: + date_str = start_time_str + + lmax = row.get('Lmax(Main)', '') + ln1 = row.get('LN1(Main)', '') + ln2 = row.get('LN2(Main)', '') + + spreadsheet_data.append([ + idx, + date_str, + time_str, + lmax if lmax else '', + ln1 if ln1 else '', + ln2 if ln2 else '', + '', + ]) + + location_data.append({ + "location_name": loc_name, + "raw_count": raw_count, + "filtered_count": len(filtered_rows), + "spreadsheet_data": spreadsheet_data, + }) + + return {"project": project, "location_data": location_data} + + # ============================================================================ # Project List & Overview # ============================================================================ @@ -57,9 +373,11 @@ async def get_projects_list( """ query = db.query(Project) - # Filter by status if provided + # Filter by status if provided; otherwise exclude soft-deleted projects if status: query = query.filter(Project.status == status) + else: + query = query.filter(Project.status != "deleted") # Filter by project type if provided if project_type_id: @@ -87,10 +405,10 @@ async def get_projects_list( ).scalar() # Count active sessions - active_session_count = db.query(func.count(RecordingSession.id)).filter( + active_session_count = db.query(func.count(MonitoringSession.id)).filter( and_( - RecordingSession.project_id == project.id, - RecordingSession.status == "recording", + MonitoringSession.project_id == project.id, + MonitoringSession.status == "recording", ) ).scalar() @@ -118,9 +436,10 @@ async def get_projects_stats(request: Request, db: Session = Depends(get_db)): Get summary statistics for projects overview. Returns HTML partial with stat cards. """ - # Count projects by status - total_projects = db.query(func.count(Project.id)).scalar() + # Count projects by status (exclude deleted) + total_projects = db.query(func.count(Project.id)).filter(Project.status != "deleted").scalar() active_projects = db.query(func.count(Project.id)).filter_by(status="active").scalar() + on_hold_projects = db.query(func.count(Project.id)).filter_by(status="on_hold").scalar() completed_projects = db.query(func.count(Project.id)).filter_by(status="completed").scalar() # Count total locations across all projects @@ -132,7 +451,7 @@ async def get_projects_stats(request: Request, db: Session = Depends(get_db)): ).scalar() # Count active recording sessions - active_sessions = db.query(func.count(RecordingSession.id)).filter_by( + active_sessions = db.query(func.count(MonitoringSession.id)).filter_by( status="recording" ).scalar() @@ -140,6 +459,7 @@ async def get_projects_stats(request: Request, db: Session = Depends(get_db)): "request": request, "total_projects": total_projects, "active_projects": active_projects, + "on_hold_projects": on_hold_projects, "completed_projects": completed_projects, "total_locations": total_locations, "assigned_units": assigned_units, @@ -178,13 +498,13 @@ async def search_projects( if not q.strip(): # Return recent active projects when no search term projects = db.query(Project).filter( - Project.status != "archived" + Project.status.notin_(["archived", "deleted"]) ).order_by(Project.updated_at.desc()).limit(limit).all() else: search_term = f"%{q}%" projects = db.query(Project).filter( and_( - Project.status != "archived", + Project.status.notin_(["archived", "deleted"]), or_( Project.project_number.ilike(search_term), Project.client_name.ilike(search_term), @@ -223,13 +543,13 @@ async def search_projects_json( """ if not q.strip(): projects = db.query(Project).filter( - Project.status != "archived" + Project.status.notin_(["archived", "deleted"]) ).order_by(Project.updated_at.desc()).limit(limit).all() else: search_term = f"%{q}%" projects = db.query(Project).filter( and_( - Project.status != "archived", + Project.status.notin_(["archived", "deleted"]), or_( Project.project_number.ilike(search_term), Project.client_name.ilike(search_term), @@ -310,6 +630,7 @@ async def get_project(project_id: str, db: Session = Depends(get_db)): "site_coordinates": project.site_coordinates, "start_date": project.start_date.isoformat() if project.start_date else None, "end_date": project.end_date.isoformat() if project.end_date else None, + "data_collection_mode": project.data_collection_mode or "manual", "created_at": project.created_at.isoformat(), "updated_at": project.updated_at.isoformat(), } @@ -338,6 +659,14 @@ async def update_project( project.description = data["description"] if "status" in data: project.status = data["status"] + # Cancel pending scheduled actions when archiving + if data["status"] == "archived": + db.query(ScheduledAction).filter( + and_( + ScheduledAction.project_id == project_id, + ScheduledAction.execution_status == "pending", + ) + ).update({"execution_status": "cancelled"}) if "client_name" in data: project.client_name = data["client_name"] if "site_address" in data: @@ -348,6 +677,8 @@ async def update_project( project.start_date = datetime.fromisoformat(data["start_date"]) if data["start_date"] else None if "end_date" in data: project.end_date = datetime.fromisoformat(data["end_date"]) if data["end_date"] else None + if "data_collection_mode" in data and data["data_collection_mode"] in ("remote", "manual"): + project.data_collection_mode = data["data_collection_mode"] project.updated_at = datetime.utcnow() @@ -359,18 +690,93 @@ async def update_project( @router.delete("/{project_id}") async def delete_project(project_id: str, db: Session = Depends(get_db)): """ - Delete a project (soft delete by archiving). + Soft-delete a project. Sets status='deleted' and records deleted_at timestamp. + Data will be permanently removed after 60 days (or via /permanent endpoint). """ project = db.query(Project).filter_by(id=project_id).first() if not project: raise HTTPException(status_code=404, detail="Project not found") - project.status = "archived" + project.status = "deleted" + project.deleted_at = datetime.utcnow() project.updated_at = datetime.utcnow() + # Cancel all pending scheduled actions + db.query(ScheduledAction).filter( + and_( + ScheduledAction.project_id == project_id, + ScheduledAction.execution_status == "pending", + ) + ).update({"execution_status": "cancelled"}) + db.commit() - return {"success": True, "message": "Project archived successfully"} + return {"success": True, "message": "Project deleted. Data will be permanently removed after 60 days."} + + +@router.delete("/{project_id}/permanent") +async def permanently_delete_project(project_id: str, db: Session = Depends(get_db)): + """ + Hard-delete a project and all related data. Only allowed when status='deleted'. + Removes: locations, assignments, sessions, scheduled actions, recurring schedules. + """ + project = db.query(Project).filter_by(id=project_id).first() + if not project: + raise HTTPException(status_code=404, detail="Project not found") + if project.status != "deleted": + raise HTTPException(status_code=400, detail="Project must be soft-deleted before permanent deletion.") + + # Delete related data + db.query(RecurringSchedule).filter_by(project_id=project_id).delete() + db.query(ScheduledAction).filter_by(project_id=project_id).delete() + db.query(MonitoringSession).filter_by(project_id=project_id).delete() + db.query(UnitAssignment).filter_by(project_id=project_id).delete() + db.query(MonitoringLocation).filter_by(project_id=project_id).delete() + db.delete(project) + db.commit() + + return {"success": True, "message": "Project permanently deleted."} + + +@router.post("/{project_id}/hold") +async def hold_project(project_id: str, db: Session = Depends(get_db)): + """ + Put a project on hold. Pauses without archiving; assignments and schedules remain. + """ + project = db.query(Project).filter_by(id=project_id).first() + if not project: + raise HTTPException(status_code=404, detail="Project not found") + + project.status = "on_hold" + project.updated_at = datetime.utcnow() + + # Cancel pending scheduled actions so they don't appear in dashboards or fire + db.query(ScheduledAction).filter( + and_( + ScheduledAction.project_id == project_id, + ScheduledAction.execution_status == "pending", + ) + ).update({"execution_status": "cancelled"}) + + db.commit() + + return {"success": True, "message": "Project put on hold."} + + +@router.post("/{project_id}/unhold") +async def unhold_project(project_id: str, db: Session = Depends(get_db)): + """ + Resume a project that was on hold. + """ + project = db.query(Project).filter_by(id=project_id).first() + if not project: + raise HTTPException(status_code=404, detail="Project not found") + + project.status = "active" + project.updated_at = datetime.utcnow() + db.commit() + + return {"success": True, "message": "Project resumed."} # ============================================================================ @@ -414,18 +820,18 @@ async def get_project_dashboard( }) # Get active recording sessions - active_sessions = db.query(RecordingSession).filter( + active_sessions = db.query(MonitoringSession).filter( and_( - RecordingSession.project_id == project_id, - RecordingSession.status == "recording", + MonitoringSession.project_id == project_id, + MonitoringSession.status == "recording", ) ).all() # Get completed sessions count - completed_sessions_count = db.query(func.count(RecordingSession.id)).filter( + completed_sessions_count = db.query(func.count(MonitoringSession.id)).filter( and_( - RecordingSession.project_id == project_id, - RecordingSession.status == "completed", + MonitoringSession.project_id == project_id, + MonitoringSession.status == "completed", ) ).scalar() @@ -504,26 +910,26 @@ async def get_project_units( location = db.query(MonitoringLocation).filter_by(id=assignment.location_id).first() # Count sessions for this assignment - session_count = db.query(func.count(RecordingSession.id)).filter_by( + session_count = db.query(func.count(MonitoringSession.id)).filter_by( location_id=assignment.location_id, unit_id=assignment.unit_id, ).scalar() # Count files from sessions file_count = db.query(func.count(DataFile.id)).join( - RecordingSession, - DataFile.session_id == RecordingSession.id + MonitoringSession, + DataFile.session_id == MonitoringSession.id ).filter( - RecordingSession.location_id == assignment.location_id, - RecordingSession.unit_id == assignment.unit_id, + MonitoringSession.location_id == assignment.location_id, + MonitoringSession.unit_id == assignment.unit_id, ).scalar() # Check if currently recording - active_session = db.query(RecordingSession).filter( + active_session = db.query(MonitoringSession).filter( and_( - RecordingSession.location_id == assignment.location_id, - RecordingSession.unit_id == assignment.unit_id, - RecordingSession.status == "recording", + MonitoringSession.location_id == assignment.location_id, + MonitoringSession.unit_id == assignment.unit_id, + MonitoringSession.status == "recording", ) ).first() @@ -610,10 +1016,14 @@ async def get_project_schedules( "result": result_data, }) + project = db.query(Project).filter_by(id=project_id).first() + project_status = project.status if project else "active" + return templates.TemplateResponse("partials/projects/schedule_list.html", { "request": request, "project_id": project_id, "schedules_by_date": schedules_by_date, + "project_status": project_status, }) @@ -706,13 +1116,13 @@ async def get_project_sessions( Returns HTML partial with session list. Optional status filter: recording, completed, paused, failed """ - query = db.query(RecordingSession).filter_by(project_id=project_id) + query = db.query(MonitoringSession).filter_by(project_id=project_id) # Filter by status if provided if status: - query = query.filter(RecordingSession.status == status) + query = query.filter(MonitoringSession.status == status) - sessions = query.order_by(RecordingSession.started_at.desc()).all() + sessions = query.order_by(MonitoringSession.started_at.desc()).all() # Enrich with unit and location details sessions_data = [] @@ -804,18 +1214,19 @@ async def ftp_download_to_server( raise HTTPException(status_code=400, detail="Missing unit_id or remote_path") # Get or create active session for this location/unit - session = db.query(RecordingSession).filter( + session = db.query(MonitoringSession).filter( and_( - RecordingSession.project_id == project_id, - RecordingSession.location_id == location_id, - RecordingSession.unit_id == unit_id, - RecordingSession.status.in_(["recording", "paused"]) + MonitoringSession.project_id == project_id, + MonitoringSession.location_id == location_id, + MonitoringSession.unit_id == unit_id, + MonitoringSession.status.in_(["recording", "paused"]) ) ).first() # If no active session, create one if not session: - session = RecordingSession( + _ftp_unit = db.query(RosterUnit).filter_by(id=unit_id).first() + session = MonitoringSession( id=str(uuid.uuid4()), project_id=project_id, location_id=location_id, @@ -824,6 +1235,7 @@ async def ftp_download_to_server( status="completed", started_at=datetime.utcnow(), stopped_at=datetime.utcnow(), + device_model=_ftp_unit.slm_model if _ftp_unit else None, session_metadata='{"source": "ftp_download", "note": "Auto-created for FTP download"}' ) db.add(session) @@ -969,18 +1381,19 @@ async def ftp_download_folder_to_server( raise HTTPException(status_code=400, detail="Missing unit_id or remote_path") # Get or create active session for this location/unit - session = db.query(RecordingSession).filter( + session = db.query(MonitoringSession).filter( and_( - RecordingSession.project_id == project_id, - RecordingSession.location_id == location_id, - RecordingSession.unit_id == unit_id, - RecordingSession.status.in_(["recording", "paused"]) + MonitoringSession.project_id == project_id, + MonitoringSession.location_id == location_id, + MonitoringSession.unit_id == unit_id, + MonitoringSession.status.in_(["recording", "paused"]) ) ).first() # If no active session, create one if not session: - session = RecordingSession( + _ftp_unit = db.query(RosterUnit).filter_by(id=unit_id).first() + session = MonitoringSession( id=str(uuid.uuid4()), project_id=project_id, location_id=location_id, @@ -989,6 +1402,7 @@ async def ftp_download_folder_to_server( status="completed", started_at=datetime.utcnow(), stopped_at=datetime.utcnow(), + device_model=_ftp_unit.slm_model if _ftp_unit else None, session_metadata='{"source": "ftp_folder_download", "note": "Auto-created for FTP folder download"}' ) db.add(session) @@ -1140,9 +1554,9 @@ async def get_unified_files( import json # Get all sessions for this project - sessions = db.query(RecordingSession).filter_by( + sessions = db.query(MonitoringSession).filter_by( project_id=project_id - ).order_by(RecordingSession.started_at.desc()).all() + ).order_by(MonitoringSession.started_at.desc()).all() sessions_data = [] for session in sessions: @@ -1219,7 +1633,7 @@ async def download_project_file( raise HTTPException(status_code=404, detail="File not found") # Verify file belongs to this project - session = db.query(RecordingSession).filter_by(id=file_record.session_id).first() + session = db.query(MonitoringSession).filter_by(id=file_record.session_id).first() if not session or session.project_id != project_id: raise HTTPException(status_code=403, detail="File does not belong to this project") @@ -1253,7 +1667,7 @@ async def download_session_files( import zipfile # Verify session belongs to this project - session = db.query(RecordingSession).filter_by(id=session_id).first() + session = db.query(MonitoringSession).filter_by(id=session_id).first() if not session: raise HTTPException(status_code=404, detail="Session not found") if session.project_id != project_id: @@ -1321,7 +1735,7 @@ async def delete_project_file( raise HTTPException(status_code=404, detail="File not found") # Verify file belongs to this project - session = db.query(RecordingSession).filter_by(id=file_record.session_id).first() + session = db.query(MonitoringSession).filter_by(id=file_record.session_id).first() if not session or session.project_id != project_id: raise HTTPException(status_code=403, detail="File does not belong to this project") @@ -1351,7 +1765,7 @@ async def delete_session( from pathlib import Path # Verify session belongs to this project - session = db.query(RecordingSession).filter_by(id=session_id).first() + session = db.query(MonitoringSession).filter_by(id=session_id).first() if not session: raise HTTPException(status_code=404, detail="Session not found") if session.project_id != project_id: @@ -1380,6 +1794,34 @@ async def delete_session( }) +VALID_PERIOD_TYPES = {"weekday_day", "weekday_night", "weekend_day", "weekend_night"} + +@router.patch("/{project_id}/sessions/{session_id}") +async def patch_session( + project_id: str, + session_id: str, + data: dict, + db: Session = Depends(get_db), +): + """Update session_label and/or period_type on a monitoring session.""" + session = db.query(MonitoringSession).filter_by(id=session_id).first() + if not session: + raise HTTPException(status_code=404, detail="Session not found") + if session.project_id != project_id: + raise HTTPException(status_code=403, detail="Session does not belong to this project") + + if "session_label" in data: + session.session_label = str(data["session_label"]).strip() or None + if "period_type" in data: + pt = data["period_type"] + if pt and pt not in VALID_PERIOD_TYPES: + raise HTTPException(status_code=400, detail=f"Invalid period_type. Must be one of: {', '.join(sorted(VALID_PERIOD_TYPES))}") + session.period_type = pt or None + + db.commit() + return JSONResponse({"status": "success", "session_label": session.session_label, "period_type": session.period_type}) + + @router.get("/{project_id}/files/{file_id}/view-rnd", response_class=HTMLResponse) async def view_rnd_file( request: Request, @@ -1400,7 +1842,7 @@ async def view_rnd_file( raise HTTPException(status_code=404, detail="File not found") # Verify file belongs to this project - session = db.query(RecordingSession).filter_by(id=file_record.session_id).first() + session = db.query(MonitoringSession).filter_by(id=file_record.session_id).first() if not session or session.project_id != project_id: raise HTTPException(status_code=403, detail="File does not belong to this project") @@ -1442,6 +1884,7 @@ async def view_rnd_file( "unit": unit, "metadata": metadata, "filename": file_path.name, + "is_leq": _is_leq_file(str(file_record.file_path), _peek_rnd_headers(file_path)), }) @@ -1466,7 +1909,7 @@ async def get_rnd_data( raise HTTPException(status_code=404, detail="File not found") # Verify file belongs to this project - session = db.query(RecordingSession).filter_by(id=file_record.session_id).first() + session = db.query(MonitoringSession).filter_by(id=file_record.session_id).first() if not session or session.project_id != project_id: raise HTTPException(status_code=403, detail="File does not belong to this project") @@ -1506,11 +1949,16 @@ async def get_rnd_data( cleaned_row[cleaned_key] = cleaned_value rows.append(cleaned_row) + # Normalise AU2-format columns to NL-43 names + rows, _was_au2 = _normalize_rnd_rows(rows) + if _was_au2: + headers = list(rows[0].keys()) if rows else headers + # Detect file type (Leq vs Lp) based on columns file_type = 'unknown' if headers: header_str = ','.join(headers).lower() - if 'leq' in header_str: + if 'leq(main)' in header_str or 'laeq' in header_str: file_type = 'leq' # Time-averaged data elif 'lp(main)' in header_str or 'lp (main)' in header_str: file_type = 'lp' # Instantaneous data @@ -1590,6 +2038,7 @@ async def generate_excel_report( import openpyxl from openpyxl.chart import LineChart, Reference from openpyxl.chart.label import DataLabelList + from openpyxl.chart.shapes import GraphicalProperties from openpyxl.styles import Font, Alignment, Border, Side, PatternFill from openpyxl.utils import get_column_letter except ImportError: @@ -1604,7 +2053,7 @@ async def generate_excel_report( raise HTTPException(status_code=404, detail="File not found") # Verify file belongs to this project - session = db.query(RecordingSession).filter_by(id=file_record.session_id).first() + session = db.query(MonitoringSession).filter_by(id=file_record.session_id).first() if not session or session.project_id != project_id: raise HTTPException(status_code=403, detail="File does not belong to this project") @@ -1617,15 +2066,7 @@ async def generate_excel_report( if not file_path.exists(): raise HTTPException(status_code=404, detail="File not found on disk") - # Validate this is a Leq file (contains '_Leq_' in path) - # Lp files (instantaneous 100ms readings) don't have the LN percentile data needed for reports - if '_Leq_' not in file_record.file_path: - raise HTTPException( - status_code=400, - detail="Reports can only be generated from Leq files (15-minute averaged data). This appears to be an Lp (instantaneous) file." - ) - - # Read and parse the Leq RND file + # Read and parse the RND file try: with open(file_path, 'r', encoding='utf-8', errors='replace') as f: content = f.read() @@ -1651,6 +2092,18 @@ async def generate_excel_report( if not rnd_rows: raise HTTPException(status_code=400, detail="No data found in RND file") + # Normalise AU2-format columns to NL-43 names + rnd_rows, _ = _normalize_rnd_rows(rnd_rows) + + # Validate this is a Leq file — Lp files lack the LN percentile data + if not _is_leq_file(file_record.file_path, rnd_rows): + raise HTTPException( + status_code=400, + detail="Reports can only be generated from Leq files (15-minute averaged data). This appears to be an Lp (instantaneous) file." + ) + + except HTTPException: + raise except Exception as e: logger.error(f"Error reading RND file: {e}") raise HTTPException(status_code=500, detail=f"Error reading file: {str(e)}") @@ -1753,213 +2206,233 @@ async def generate_excel_report( ws = wb.active ws.title = "Sound Level Data" - # Define styles - title_font = Font(bold=True, size=14) - header_font = Font(bold=True, size=10) - thin_border = Border( - left=Side(style='thin'), - right=Side(style='thin'), - top=Side(style='thin'), - bottom=Side(style='thin') - ) - header_fill = PatternFill(start_color="DAEEF3", end_color="DAEEF3", fill_type="solid") + # --- Styles --- + f_title = Font(name='Arial', bold=True, size=12) + f_data = Font(name='Arial', size=10) + f_bold = Font(name='Arial', bold=True, size=10) - # Row 1: Report title + thin = Side(style='thin') + dbl = Side(style='double') + + # Header row: double top border; leftmost/rightmost cells get double outer edge + hdr_inner = Border(left=thin, right=thin, top=dbl, bottom=thin) + hdr_left = Border(left=dbl, right=thin, top=dbl, bottom=thin) + hdr_right = Border(left=thin, right=dbl, top=dbl, bottom=thin) + # Last data row: double bottom border + last_inner = Border(left=thin, right=thin, top=thin, bottom=dbl) + last_left = Border(left=dbl, right=thin, top=thin, bottom=dbl) + last_right = Border(left=thin, right=dbl, top=thin, bottom=dbl) + # Normal data rows + data_inner = Border(left=thin, right=thin, top=thin, bottom=thin) + data_left = Border(left=dbl, right=thin, top=thin, bottom=thin) + data_right = Border(left=thin, right=dbl, top=thin, bottom=thin) + + hdr_fill = PatternFill(start_color="F2F2F2", end_color="F2F2F2", fill_type="solid") + center = Alignment(horizontal='center', vertical='center', wrap_text=True) + left = Alignment(horizontal='left', vertical='center') + right = Alignment(horizontal='right', vertical='center') + + # Column widths from Soundstudyexample.xlsx NRL_1 (sheet2) + # A B C D E F G H I J K L M N O P + for col_i, col_w in zip(range(1, 17), [9.43, 10.14, 8.14, 12.86, 10.86, 10.86, 25.0, 6.43, 12.43, 12.43, 10.0, 14.71, 8.0, 6.43, 6.43, 6.43]): + ws.column_dimensions[get_column_letter(col_i)].width = col_w + + # --- Header rows 1-6 --- final_project_name = project_name if project_name else (project.name if project else "") - final_title = report_title - if final_project_name: - final_title = f"{report_title} - {final_project_name}" - ws['A1'] = final_title - ws['A1'].font = title_font + final_location = location_name if location_name else (location.name if location else "") + final_title = f"{report_title} - {final_project_name}" if final_project_name else report_title + ws.merge_cells('A1:G1') + ws['A1'] = final_title + ws['A1'].font = f_title; ws['A1'].alignment = center + ws.row_dimensions[1].height = 15.75 - # Row 2: Client name (if provided) - if client_name: - ws['A2'] = f"Client: {client_name}" - ws['A2'].font = Font(italic=True, size=10) + ws.row_dimensions[2].height = 15 - # Row 3: Location name - final_location = location_name - if not final_location and location: - final_location = location.name - if final_location: - ws['A3'] = final_location - ws['A3'].font = Font(bold=True, size=11) + ws.merge_cells('A3:G3') + ws['A3'] = final_location + ws['A3'].font = f_title; ws['A3'].alignment = center + ws.row_dimensions[3].height = 15.75 - # Row 4: Time filter info (if applied) - if start_time and end_time: - filter_info = f"Time Filter: {start_time} - {end_time}" - if start_date or end_date: - filter_info += f" | Date Range: {start_date or 'start'} to {end_date or 'end'}" - filter_info += f" | {len(rnd_rows)} of {original_count} rows" - ws['A4'] = filter_info - ws['A4'].font = Font(italic=True, size=9, color="666666") + ws.row_dimensions[4].height = 15 - # Row 7: Headers - headers = ['Test Increment #', 'Date', 'Time', 'LAmax (dBA)', 'LA01 (dBA)', 'LA10 (dBA)', 'Comments'] - for col, header in enumerate(headers, 1): - cell = ws.cell(row=7, column=col, value=header) - cell.font = header_font - cell.border = thin_border - cell.fill = header_fill - cell.alignment = Alignment(horizontal='center') + date_range_str = '' + if start_date or end_date: + date_range_str = f"{start_date or ''} to {end_date or ''}" + elif start_time and end_time: + date_range_str = f"{start_time} - {end_time}" + ws.merge_cells('A5:G5') + ws['A5'] = date_range_str + ws['A5'].font = f_data; ws['A5'].alignment = center + ws.row_dimensions[5].height = 15.75 - # Set column widths - column_widths = [16, 12, 10, 12, 12, 12, 40] - for i, width in enumerate(column_widths, 1): - ws.column_dimensions[get_column_letter(i)].width = width + hdr_labels = ['Interval #', 'Date', 'Time', 'LAmax (dBA)', 'LA01 (dBA)', 'LA10 (dBA)', 'Comments'] + for col, label in enumerate(hdr_labels, 1): + cell = ws.cell(row=6, column=col, value=label) + cell.font = f_bold; cell.fill = hdr_fill; cell.alignment = center + cell.border = hdr_left if col == 1 else (hdr_right if col == 7 else hdr_inner) + ws.row_dimensions[6].height = 39 - # Data rows starting at row 8 - data_start_row = 8 + # --- Data rows starting at row 7 --- + data_start_row = 7 + parsed_rows = [] for idx, row in enumerate(rnd_rows, 1): - data_row = data_start_row + idx - 1 + dr = data_start_row + idx - 1 + is_last = (idx == len(rnd_rows)) + b_left = last_left if is_last else data_left + b_inner = last_inner if is_last else data_inner + b_right = last_right if is_last else data_right - # Test Increment # - ws.cell(row=data_row, column=1, value=idx).border = thin_border + c = ws.cell(row=dr, column=1, value=idx) + c.font = f_data; c.alignment = center; c.border = b_left - # Parse the Start Time to get Date and Time start_time_str = row.get('Start Time', '') + row_dt = None if start_time_str: try: - # Format: "2025/12/26 20:23:38" - dt = datetime.strptime(start_time_str, '%Y/%m/%d %H:%M:%S') - ws.cell(row=data_row, column=2, value=dt.date()) - ws.cell(row=data_row, column=3, value=dt.time()) + row_dt = datetime.strptime(start_time_str, '%Y/%m/%d %H:%M:%S') + c2 = ws.cell(row=dr, column=2, value=row_dt.strftime('%m/%d/%y')) + c3 = ws.cell(row=dr, column=3, value=row_dt.strftime('%H:%M')) except ValueError: - ws.cell(row=data_row, column=2, value=start_time_str) - ws.cell(row=data_row, column=3, value='') + c2 = ws.cell(row=dr, column=2, value=start_time_str) + c3 = ws.cell(row=dr, column=3, value='') else: - ws.cell(row=data_row, column=2, value='') - ws.cell(row=data_row, column=3, value='') + c2 = ws.cell(row=dr, column=2, value='') + c3 = ws.cell(row=dr, column=3, value='') + c2.font = f_data; c2.alignment = center; c2.border = b_inner + c3.font = f_data; c3.alignment = center; c3.border = b_inner - # LAmax - from Lmax(Main) lmax = row.get('Lmax(Main)') - ws.cell(row=data_row, column=4, value=lmax if lmax else '').border = thin_border + ln1 = row.get('LN1(Main)') + ln2 = row.get('LN2(Main)') + for col_idx, val in [(4, lmax), (5, ln1), (6, ln2)]: + c = ws.cell(row=dr, column=col_idx, value=val if isinstance(val, (int, float)) else '') + c.font = f_data; c.alignment = center; c.border = b_inner - # LA01 - from LN1(Main) - ln1 = row.get('LN1(Main)') - ws.cell(row=data_row, column=5, value=ln1 if ln1 else '').border = thin_border + c = ws.cell(row=dr, column=7, value='') + c.font = f_data; c.alignment = left; c.border = b_right + ws.row_dimensions[dr].height = 15 - # LA10 - from LN2(Main) - ln2 = row.get('LN2(Main)') - ws.cell(row=data_row, column=6, value=ln2 if ln2 else '').border = thin_border - - # Comments (empty for now, can be populated) - ws.cell(row=data_row, column=7, value='').border = thin_border - - # Apply borders to date/time cells - ws.cell(row=data_row, column=2).border = thin_border - ws.cell(row=data_row, column=3).border = thin_border + if row_dt and isinstance(lmax, (int, float)) and isinstance(ln1, (int, float)) and isinstance(ln2, (int, float)): + parsed_rows.append((row_dt, lmax, ln1, ln2)) data_end_row = data_start_row + len(rnd_rows) - 1 - # Add Line Chart + # --- Chart anchored at H4, spanning H4:P29 --- chart = LineChart() - chart.title = f"{final_location or 'Sound Level Data'} - Background Noise Study" - chart.style = 10 + chart.title = f"{final_location} - {final_title}" if final_location else final_title + chart.style = 2 chart.y_axis.title = "Sound Level (dBA)" - chart.x_axis.title = "Test Increment" - chart.height = 12 - chart.width = 20 - - # Data references (LAmax, LA01, LA10 are columns D, E, F) - data_ref = Reference(ws, min_col=4, min_row=7, max_col=6, max_row=data_end_row) - categories = Reference(ws, min_col=1, min_row=data_start_row, max_row=data_end_row) + chart.x_axis.title = "Time Period (15 Minute Intervals)" + # 9 cols × 0.70" = 6.3" wide; H4:P29 = 25 rows at ~15pt ≈ 16.5cm tall + chart.height = 12.7 + chart.width = 15.7 + data_ref = Reference(ws, min_col=4, min_row=6, max_col=6, max_row=data_end_row) + categories = Reference(ws, min_col=3, min_row=data_start_row, max_row=data_end_row) chart.add_data(data_ref, titles_from_data=True) chart.set_categories(categories) - # Style the series if len(chart.series) >= 3: - chart.series[0].graphicalProperties.line.solidFill = "FF0000" # LAmax - Red - chart.series[1].graphicalProperties.line.solidFill = "00B050" # LA01 - Green - chart.series[2].graphicalProperties.line.solidFill = "0070C0" # LA10 - Blue + chart.series[0].graphicalProperties.line.solidFill = "C00000" + chart.series[0].graphicalProperties.line.width = 15875 + chart.series[1].graphicalProperties.line.solidFill = "00B050" + chart.series[1].graphicalProperties.line.width = 19050 + chart.series[2].graphicalProperties.line.solidFill = "0070C0" + chart.series[2].graphicalProperties.line.width = 19050 - # Position chart to the right of data - ws.add_chart(chart, "I3") + _plot_border = GraphicalProperties() + _plot_border.ln.solidFill = "000000" + _plot_border.ln.w = 12700 + chart.plot_area.spPr = _plot_border + ws.add_chart(chart, "H4") - # Add summary statistics section below the data - summary_row = data_end_row + 3 - ws.cell(row=summary_row, column=1, value="Summary Statistics").font = Font(bold=True, size=12) + # --- Stats table: note at I28-I29, headers at I31, data rows 32-34 --- + note1 = ws.cell(row=28, column=9, value="Note: Averages are calculated by determining the arithmetic average ") + note1.font = f_data; note1.alignment = left + ws.merge_cells(start_row=28, start_column=9, end_row=28, end_column=14) + note2 = ws.cell(row=29, column=9, value="for each specified range of time intervals.") + note2.font = f_data; note2.alignment = left + ws.merge_cells(start_row=29, start_column=9, end_row=29, end_column=14) - # Calculate time-period statistics - time_periods = { - 'Evening (7PM-10PM)': [], - 'Nighttime (10PM-7AM)': [], - 'Morning (7AM-12PM)': [], - 'Daytime (12PM-7PM)': [] - } + # Table header row 31 + med = Side(style='medium') + tbl_top_left = Border(left=med, right=Side(style='thin'), top=med, bottom=Side(style='thin')) + tbl_top_mid = Border(left=Side(style='thin'), right=Side(style='thin'), top=med, bottom=Side(style='thin')) + tbl_top_right = Border(left=Side(style='thin'), right=med, top=med, bottom=Side(style='thin')) + tbl_mid_left = Border(left=med, right=Side(style='thin'), top=Side(style='thin'), bottom=Side(style='thin')) + tbl_mid_mid = Border(left=Side(style='thin'), right=Side(style='thin'), top=Side(style='thin'), bottom=Side(style='thin')) + tbl_mid_right = Border(left=Side(style='thin'), right=med, top=Side(style='thin'), bottom=Side(style='thin')) + tbl_bot_left = Border(left=med, right=Side(style='thin'), top=Side(style='thin'), bottom=med) + tbl_bot_mid = Border(left=Side(style='thin'), right=Side(style='thin'), top=Side(style='thin'), bottom=med) + tbl_bot_right = Border(left=Side(style='thin'), right=med, top=Side(style='thin'), bottom=med) + # No vertical divider between value and dBA cells + tbl_top_val = Border(left=Side(style='thin'), right=Side(), top=med, bottom=Side(style='thin')) + tbl_top_unit = Border(left=Side(), right=Side(style='thin'), top=med, bottom=Side(style='thin')) + tbl_top_rval = Border(left=Side(style='thin'), right=Side(), top=med, bottom=Side(style='thin')) + tbl_top_runit = Border(left=Side(), right=med, top=med, bottom=Side(style='thin')) + tbl_mid_val = Border(left=Side(style='thin'), right=Side(), top=Side(style='thin'), bottom=Side(style='thin')) + tbl_mid_unit = Border(left=Side(), right=Side(style='thin'), top=Side(style='thin'), bottom=Side(style='thin')) + tbl_mid_rval = Border(left=Side(style='thin'), right=Side(), top=Side(style='thin'), bottom=Side(style='thin')) + tbl_mid_runit = Border(left=Side(), right=med, top=Side(style='thin'), bottom=Side(style='thin')) + tbl_bot_val = Border(left=Side(style='thin'), right=Side(), top=Side(style='thin'), bottom=med) + tbl_bot_unit = Border(left=Side(), right=Side(style='thin'), top=Side(style='thin'), bottom=med) + tbl_bot_rval = Border(left=Side(style='thin'), right=Side(), top=Side(style='thin'), bottom=med) + tbl_bot_runit = Border(left=Side(), right=med, top=Side(style='thin'), bottom=med) - for row in rnd_rows: - start_time_str = row.get('Start Time', '') - if start_time_str: - try: - dt = datetime.strptime(start_time_str, '%Y/%m/%d %H:%M:%S') - hour = dt.hour + hdr_fill_tbl = PatternFill(start_color="F2F2F2", end_color="F2F2F2", fill_type="solid") - lmax = row.get('Lmax(Main)') - ln1 = row.get('LN1(Main)') - ln2 = row.get('LN2(Main)') + # Header row: blank | Evening | Nighttime + c = ws.cell(row=31, column=9, value=""); c.border = tbl_top_left; c.font = f_bold + c = ws.cell(row=31, column=10, value="Evening (7PM to 10PM)") + c.font = f_bold; c.alignment = Alignment(horizontal='center', vertical='center', wrap_text=True) + c.border = tbl_top_mid; c.fill = hdr_fill_tbl + ws.merge_cells(start_row=31, start_column=10, end_row=31, end_column=11) + c = ws.cell(row=31, column=12, value="Nighttime (10PM to 7AM)") + c.font = f_bold; c.alignment = Alignment(horizontal='center', vertical='center', wrap_text=True) + c.border = tbl_top_right; c.fill = hdr_fill_tbl + ws.merge_cells(start_row=31, start_column=12, end_row=31, end_column=13) + ws.row_dimensions[31].height = 15 - if isinstance(lmax, (int, float)) and isinstance(ln1, (int, float)) and isinstance(ln2, (int, float)): - data_point = {'lmax': lmax, 'ln1': ln1, 'ln2': ln2} + evening = [(lmax, ln1, ln2) for dt, lmax, ln1, ln2 in parsed_rows if 19 <= dt.hour < 22] + nighttime = [(lmax, ln1, ln2) for dt, lmax, ln1, ln2 in parsed_rows if dt.hour >= 22 or dt.hour < 7] - if 19 <= hour < 22: - time_periods['Evening (7PM-10PM)'].append(data_point) - elif hour >= 22 or hour < 7: - time_periods['Nighttime (10PM-7AM)'].append(data_point) - elif 7 <= hour < 12: - time_periods['Morning (7AM-12PM)'].append(data_point) - else: # 12-19 - time_periods['Daytime (12PM-7PM)'].append(data_point) - except ValueError: - continue + def _avg(vals): return round(sum(vals) / len(vals), 1) if vals else None + def _max(vals): return round(max(vals), 1) if vals else None - # Summary table headers - summary_row += 2 - summary_headers = ['Time Period', 'Samples', 'LAmax Avg', 'LA01 Avg', 'LA10 Avg'] - for col, header in enumerate(summary_headers, 1): - cell = ws.cell(row=summary_row, column=col, value=header) - cell.font = header_font - cell.fill = header_fill - cell.border = thin_border + def write_stat(row_num, label, eve_val, night_val, is_last=False): + bl = tbl_bot_left if is_last else tbl_mid_left + bm = tbl_bot_mid if is_last else tbl_mid_mid + br = tbl_bot_right if is_last else tbl_mid_right + lbl = ws.cell(row=row_num, column=9, value=label) + lbl.font = f_data; lbl.border = bl + lbl.alignment = Alignment(horizontal='left', vertical='center') + ev_str = f"{eve_val} dBA" if eve_val is not None else "" + ev = ws.cell(row=row_num, column=10, value=ev_str) + ev.font = f_bold; ev.border = bm + ev.alignment = Alignment(horizontal='center', vertical='center') + ws.merge_cells(start_row=row_num, start_column=10, end_row=row_num, end_column=11) + ni_str = f"{night_val} dBA" if night_val is not None else "" + ni = ws.cell(row=row_num, column=12, value=ni_str) + ni.font = f_bold; ni.border = br + ni.alignment = Alignment(horizontal='center', vertical='center') + ws.merge_cells(start_row=row_num, start_column=12, end_row=row_num, end_column=13) - # Summary data - summary_row += 1 - for period_name, samples in time_periods.items(): - ws.cell(row=summary_row, column=1, value=period_name).border = thin_border - ws.cell(row=summary_row, column=2, value=len(samples)).border = thin_border + write_stat(32, "LAmax", _max([v[0] for v in evening]), _max([v[0] for v in nighttime])) + write_stat(33, "LA01 Average",_avg([v[1] for v in evening]), _avg([v[1] for v in nighttime])) + write_stat(34, "LA10 Average",_avg([v[2] for v in evening]), _avg([v[2] for v in nighttime]), is_last=True) - if samples: - avg_lmax = sum(s['lmax'] for s in samples) / len(samples) - avg_ln1 = sum(s['ln1'] for s in samples) / len(samples) - avg_ln2 = sum(s['ln2'] for s in samples) / len(samples) - ws.cell(row=summary_row, column=3, value=round(avg_lmax, 1)).border = thin_border - ws.cell(row=summary_row, column=4, value=round(avg_ln1, 1)).border = thin_border - ws.cell(row=summary_row, column=5, value=round(avg_ln2, 1)).border = thin_border - else: - ws.cell(row=summary_row, column=3, value='-').border = thin_border - ws.cell(row=summary_row, column=4, value='-').border = thin_border - ws.cell(row=summary_row, column=5, value='-').border = thin_border - - summary_row += 1 - - # Overall summary - summary_row += 1 - ws.cell(row=summary_row, column=1, value='Overall').font = Font(bold=True) - ws.cell(row=summary_row, column=1).border = thin_border - ws.cell(row=summary_row, column=2, value=len(rnd_rows)).border = thin_border - - all_lmax = [r.get('Lmax(Main)') for r in rnd_rows if isinstance(r.get('Lmax(Main)'), (int, float))] - all_ln1 = [r.get('LN1(Main)') for r in rnd_rows if isinstance(r.get('LN1(Main)'), (int, float))] - all_ln2 = [r.get('LN2(Main)') for r in rnd_rows if isinstance(r.get('LN2(Main)'), (int, float))] - - if all_lmax: - ws.cell(row=summary_row, column=3, value=round(sum(all_lmax) / len(all_lmax), 1)).border = thin_border - if all_ln1: - ws.cell(row=summary_row, column=4, value=round(sum(all_ln1) / len(all_ln1), 1)).border = thin_border - if all_ln2: - ws.cell(row=summary_row, column=5, value=round(sum(all_ln2) / len(all_ln2), 1)).border = thin_border + # --- Page setup: portrait, letter, template margins --- + from openpyxl.worksheet.properties import PageSetupProperties + ws.sheet_properties.pageSetUpPr = PageSetupProperties(fitToPage=False) + ws.page_setup.orientation = 'portrait' + ws.page_setup.paperSize = 1 # Letter + ws.page_margins.left = 0.75 + ws.page_margins.right = 0.75 + ws.page_margins.top = 1.0 + ws.page_margins.bottom = 1.0 + ws.page_margins.header = 0.5 + ws.page_margins.footer = 0.5 # Save to buffer output = io.BytesIO() @@ -2010,7 +2483,7 @@ async def preview_report_data( raise HTTPException(status_code=404, detail="File not found") # Verify file belongs to this project - session = db.query(RecordingSession).filter_by(id=file_record.session_id).first() + session = db.query(MonitoringSession).filter_by(id=file_record.session_id).first() if not session or session.project_id != project_id: raise HTTPException(status_code=403, detail="File does not belong to this project") @@ -2023,14 +2496,7 @@ async def preview_report_data( if not file_path.exists(): raise HTTPException(status_code=404, detail="File not found on disk") - # Validate this is a Leq file - if '_Leq_' not in file_record.file_path: - raise HTTPException( - status_code=400, - detail="Reports can only be generated from Leq files (15-minute averaged data)." - ) - - # Read and parse the Leq RND file + # Read and parse the RND file try: with open(file_path, 'r', encoding='utf-8', errors='replace') as f: content = f.read() @@ -2056,6 +2522,16 @@ async def preview_report_data( if not rnd_rows: raise HTTPException(status_code=400, detail="No data found in RND file") + rnd_rows, _ = _normalize_rnd_rows(rnd_rows) + + if not _is_leq_file(file_record.file_path, rnd_rows): + raise HTTPException( + status_code=400, + detail="Reports can only be generated from Leq files (15-minute averaged data)." + ) + + except HTTPException: + raise except Exception as e: logger.error(f"Error reading RND file: {e}") raise HTTPException(status_code=500, detail=f"Error reading file: {str(e)}") @@ -2143,7 +2619,7 @@ async def preview_report_data( try: dt = datetime.strptime(start_time_str, '%Y/%m/%d %H:%M:%S') date_str = dt.strftime('%Y-%m-%d') - time_str = dt.strftime('%H:%M:%S') + time_str = dt.strftime('%H:%M') except ValueError: date_str = start_time_str time_str = '' @@ -2167,7 +2643,7 @@ async def preview_report_data( final_location = location_name if location_name else (location.name if location else "") # Get templates for the dropdown - templates = db.query(ReportTemplate).all() + report_templates = db.query(ReportTemplate).all() return templates.TemplateResponse("report_preview.html", { "request": request, @@ -2187,7 +2663,7 @@ async def preview_report_data( "end_date": end_date, "original_count": original_count, "filtered_count": len(rnd_rows), - "templates": templates, + "templates": report_templates, }) @@ -2208,6 +2684,7 @@ async def generate_report_from_preview( try: import openpyxl from openpyxl.chart import LineChart, Reference + from openpyxl.chart.shapes import GraphicalProperties from openpyxl.styles import Font, Alignment, Border, Side, PatternFill from openpyxl.utils import get_column_letter except ImportError: @@ -2218,7 +2695,7 @@ async def generate_report_from_preview( if not file_record: raise HTTPException(status_code=404, detail="File not found") - session = db.query(RecordingSession).filter_by(id=file_record.session_id).first() + session = db.query(MonitoringSession).filter_by(id=file_record.session_id).first() if not session or session.project_id != project_id: raise HTTPException(status_code=403, detail="File does not belong to this project") @@ -2241,84 +2718,208 @@ async def generate_report_from_preview( ws = wb.active ws.title = "Sound Level Data" - # Styles - title_font = Font(bold=True, size=14) - header_font = Font(bold=True, size=10) - thin_border = Border( - left=Side(style='thin'), - right=Side(style='thin'), - top=Side(style='thin'), - bottom=Side(style='thin') - ) - header_fill = PatternFill(start_color="DAEEF3", end_color="DAEEF3", fill_type="solid") + # --- Styles --- + f_title = Font(name='Arial', bold=True, size=12) + f_data = Font(name='Arial', size=10) + f_bold = Font(name='Arial', bold=True, size=10) - # Row 1: Title + thin = Side(style='thin') + dbl = Side(style='double') + + hdr_inner = Border(left=thin, right=thin, top=dbl, bottom=thin) + hdr_left = Border(left=dbl, right=thin, top=dbl, bottom=thin) + hdr_right = Border(left=thin, right=dbl, top=dbl, bottom=thin) + last_inner = Border(left=thin, right=thin, top=thin, bottom=dbl) + last_left = Border(left=dbl, right=thin, top=thin, bottom=dbl) + last_right = Border(left=thin, right=dbl, top=thin, bottom=dbl) + data_inner = Border(left=thin, right=thin, top=thin, bottom=thin) + data_left = Border(left=dbl, right=thin, top=thin, bottom=thin) + data_right = Border(left=thin, right=dbl, top=thin, bottom=thin) + + hdr_fill = PatternFill(start_color="F2F2F2", end_color="F2F2F2", fill_type="solid") + center = Alignment(horizontal='center', vertical='center', wrap_text=True) + left = Alignment(horizontal='left', vertical='center') + right = Alignment(horizontal='right', vertical='center') + + # Column widths from Soundstudyexample.xlsx NRL_1 (sheet2) + # A B C D E F G H I J K L M N O P + for col_i, col_w in zip(range(1, 17), [9.43, 10.14, 8.14, 12.86, 10.86, 10.86, 25.0, 6.43, 12.43, 12.43, 10.0, 14.71, 8.0, 6.43, 6.43, 6.43]): + ws.column_dimensions[get_column_letter(col_i)].width = col_w + + # --- Header rows 1-6 --- final_title = f"{report_title} - {project_name}" if project_name else report_title - ws['A1'] = final_title - ws['A1'].font = title_font + ws.merge_cells('A1:G1') + ws['A1'] = final_title + ws['A1'].font = f_title; ws['A1'].alignment = center + ws.row_dimensions[1].height = 15.75 + ws.row_dimensions[2].height = 15 - # Row 2: Client - if client_name: - ws['A2'] = f"Client: {client_name}" - ws['A2'].font = Font(italic=True, size=10) + ws.merge_cells('A3:G3') + ws['A3'] = location_name + ws['A3'].font = f_title; ws['A3'].alignment = center + ws.row_dimensions[3].height = 15.75 + ws.row_dimensions[4].height = 15 - # Row 3: Location - if location_name: - ws['A3'] = location_name - ws['A3'].font = Font(bold=True, size=11) + ws.merge_cells('A5:G5') + ws['A5'] = time_filter + ws['A5'].font = f_data; ws['A5'].alignment = center + ws.row_dimensions[5].height = 15.75 - # Row 4: Time filter info - if time_filter: - ws['A4'] = time_filter - ws['A4'].font = Font(italic=True, size=9, color="666666") + hdr_labels = ['Interval #', 'Date', 'Time', 'LAmax (dBA)', 'LA01 (dBA)', 'LA10 (dBA)', 'Comments'] + for col, label in enumerate(hdr_labels, 1): + cell = ws.cell(row=6, column=col, value=label) + cell.font = f_bold; cell.fill = hdr_fill; cell.alignment = center + cell.border = hdr_left if col == 1 else (hdr_right if col == 7 else hdr_inner) + ws.row_dimensions[6].height = 39 - # Row 7: Headers - headers = ['Test Increment #', 'Date', 'Time', 'LAmax (dBA)', 'LA01 (dBA)', 'LA10 (dBA)', 'Comments'] - for col, header in enumerate(headers, 1): - cell = ws.cell(row=7, column=col, value=header) - cell.font = header_font - cell.border = thin_border - cell.fill = header_fill - cell.alignment = Alignment(horizontal='center') - - # Column widths - column_widths = [16, 12, 10, 12, 12, 12, 40] - for i, width in enumerate(column_widths, 1): - ws.column_dimensions[get_column_letter(i)].width = width - - # Data rows - data_start_row = 8 - for idx, row_data in enumerate(spreadsheet_data): - data_row = data_start_row + idx + # --- Data rows starting at row 7 --- + data_start_row = 7 + parsed_rows = [] + for idx, row_data in enumerate(spreadsheet_data, 1): + dr = data_start_row + idx - 1 + is_last = (idx == len(spreadsheet_data)) + b_left = last_left if is_last else data_left + b_inner = last_inner if is_last else data_inner + b_right = last_right if is_last else data_right + col_borders = [b_left] + [b_inner] * 5 + [b_right] + col_aligns = [center, center, center, center, center, center, left] for col, value in enumerate(row_data, 1): - cell = ws.cell(row=data_row, column=col, value=value if value != '' else None) - cell.border = thin_border + cell = ws.cell(row=dr, column=col, value=value if value != '' else None) + cell.font = f_data + cell.border = col_borders[col - 1] if col <= 7 else b_inner + cell.alignment = col_aligns[col - 1] if col <= 7 else left + ws.row_dimensions[dr].height = 15 + + try: + time_str = row_data[2] if len(row_data) > 2 else '' + lmax_v = row_data[3] if len(row_data) > 3 else '' + ln1_v = row_data[4] if len(row_data) > 4 else '' + ln2_v = row_data[5] if len(row_data) > 5 else '' + if time_str and isinstance(lmax_v, (int, float)): + try: + row_dt = datetime.strptime(time_str, '%H:%M') + except ValueError: + row_dt = datetime.strptime(time_str, '%H:%M:%S') + parsed_rows.append((row_dt, float(lmax_v), float(ln1_v), float(ln2_v))) + except (ValueError, TypeError): + pass data_end_row = data_start_row + len(spreadsheet_data) - 1 - # Add chart if we have data - if len(spreadsheet_data) > 0: + # --- Chart anchored at H4, spanning H4:P29 --- + if spreadsheet_data: chart = LineChart() - chart.title = f"{location_name or 'Sound Level Data'} - Background Noise Study" - chart.style = 10 + chart.title = f"{location_name} - {final_title}" if location_name else final_title + chart.style = 2 chart.y_axis.title = "Sound Level (dBA)" - chart.x_axis.title = "Test Increment" - chart.height = 12 - chart.width = 20 - - data_ref = Reference(ws, min_col=4, min_row=7, max_col=6, max_row=data_end_row) - categories = Reference(ws, min_col=1, min_row=data_start_row, max_row=data_end_row) - + chart.x_axis.title = "Time Period (15 Minute Intervals)" + chart.height = 12.7 + chart.width = 15.7 + data_ref = Reference(ws, min_col=4, min_row=6, max_col=6, max_row=data_end_row) + categories = Reference(ws, min_col=3, min_row=data_start_row, max_row=data_end_row) chart.add_data(data_ref, titles_from_data=True) chart.set_categories(categories) - if len(chart.series) >= 3: - chart.series[0].graphicalProperties.line.solidFill = "FF0000" + chart.series[0].graphicalProperties.line.solidFill = "C00000" + chart.series[0].graphicalProperties.line.width = 15875 chart.series[1].graphicalProperties.line.solidFill = "00B050" + chart.series[1].graphicalProperties.line.width = 19050 chart.series[2].graphicalProperties.line.solidFill = "0070C0" + chart.series[2].graphicalProperties.line.width = 19050 + _plot_border = GraphicalProperties() + _plot_border.ln.solidFill = "000000" + _plot_border.ln.w = 12700 + chart.plot_area.spPr = _plot_border + ws.add_chart(chart, "H4") - ws.add_chart(chart, "I3") + # --- Stats block starting at I28 --- + # Stats table: note at I28-I29, headers at I31, data rows 32-34, border row 35 + note1 = ws.cell(row=28, column=9, value="Note: Averages are calculated by determining the arithmetic average ") + note1.font = f_data; note1.alignment = left + ws.merge_cells(start_row=28, start_column=9, end_row=28, end_column=14) + note2 = ws.cell(row=29, column=9, value="for each specified range of time intervals.") + note2.font = f_data; note2.alignment = left + ws.merge_cells(start_row=29, start_column=9, end_row=29, end_column=14) + + # Table header row 31 + med = Side(style='medium') + tbl_top_left = Border(left=med, right=Side(style='thin'), top=med, bottom=Side(style='thin')) + tbl_top_mid = Border(left=Side(style='thin'), right=Side(style='thin'), top=med, bottom=Side(style='thin')) + tbl_top_right = Border(left=Side(style='thin'), right=med, top=med, bottom=Side(style='thin')) + tbl_mid_left = Border(left=med, right=Side(style='thin'), top=Side(style='thin'), bottom=Side(style='thin')) + tbl_mid_mid = Border(left=Side(style='thin'), right=Side(style='thin'), top=Side(style='thin'), bottom=Side(style='thin')) + tbl_mid_right = Border(left=Side(style='thin'), right=med, top=Side(style='thin'), bottom=Side(style='thin')) + tbl_bot_left = Border(left=med, right=Side(style='thin'), top=Side(style='thin'), bottom=med) + tbl_bot_mid = Border(left=Side(style='thin'), right=Side(style='thin'), top=Side(style='thin'), bottom=med) + tbl_bot_right = Border(left=Side(style='thin'), right=med, top=Side(style='thin'), bottom=med) + # No vertical divider between value and dBA cells + tbl_top_val = Border(left=Side(style='thin'), right=Side(), top=med, bottom=Side(style='thin')) + tbl_top_unit = Border(left=Side(), right=Side(style='thin'), top=med, bottom=Side(style='thin')) + tbl_top_rval = Border(left=Side(style='thin'), right=Side(), top=med, bottom=Side(style='thin')) + tbl_top_runit = Border(left=Side(), right=med, top=med, bottom=Side(style='thin')) + tbl_mid_val = Border(left=Side(style='thin'), right=Side(), top=Side(style='thin'), bottom=Side(style='thin')) + tbl_mid_unit = Border(left=Side(), right=Side(style='thin'), top=Side(style='thin'), bottom=Side(style='thin')) + tbl_mid_rval = Border(left=Side(style='thin'), right=Side(), top=Side(style='thin'), bottom=Side(style='thin')) + tbl_mid_runit = Border(left=Side(), right=med, top=Side(style='thin'), bottom=Side(style='thin')) + tbl_bot_val = Border(left=Side(style='thin'), right=Side(), top=Side(style='thin'), bottom=med) + tbl_bot_unit = Border(left=Side(), right=Side(style='thin'), top=Side(style='thin'), bottom=med) + tbl_bot_rval = Border(left=Side(style='thin'), right=Side(), top=Side(style='thin'), bottom=med) + tbl_bot_runit = Border(left=Side(), right=med, top=Side(style='thin'), bottom=med) + + hdr_fill_tbl = PatternFill(start_color="F2F2F2", end_color="F2F2F2", fill_type="solid") + + # Header row: blank | Evening | Nighttime + c = ws.cell(row=31, column=9, value=""); c.border = tbl_top_left; c.font = f_bold + c = ws.cell(row=31, column=10, value="Evening (7PM to 10PM)") + c.font = f_bold; c.alignment = Alignment(horizontal='center', vertical='center', wrap_text=True) + c.border = tbl_top_mid; c.fill = hdr_fill_tbl + ws.merge_cells(start_row=31, start_column=10, end_row=31, end_column=11) + c = ws.cell(row=31, column=12, value="Nighttime (10PM to 7AM)") + c.font = f_bold; c.alignment = Alignment(horizontal='center', vertical='center', wrap_text=True) + c.border = tbl_top_right; c.fill = hdr_fill_tbl + ws.merge_cells(start_row=31, start_column=12, end_row=31, end_column=13) + ws.row_dimensions[31].height = 15 + + evening2 = [(lmax, ln1, ln2) for dt, lmax, ln1, ln2 in parsed_rows if 19 <= dt.hour < 22] + nighttime2 = [(lmax, ln1, ln2) for dt, lmax, ln1, ln2 in parsed_rows if dt.hour >= 22 or dt.hour < 7] + + def _avg2(vals): return round(sum(vals) / len(vals), 1) if vals else None + def _max2(vals): return round(max(vals), 1) if vals else None + + def write_stat2(row_num, label, eve_val, night_val, is_last=False): + bl = tbl_bot_left if is_last else tbl_mid_left + bm = tbl_bot_mid if is_last else tbl_mid_mid + br = tbl_bot_right if is_last else tbl_mid_right + lbl = ws.cell(row=row_num, column=9, value=label) + lbl.font = f_data; lbl.border = bl + lbl.alignment = Alignment(horizontal='left', vertical='center') + ev_str = f"{eve_val} dBA" if eve_val is not None else "" + ev = ws.cell(row=row_num, column=10, value=ev_str) + ev.font = f_bold; ev.border = bm + ev.alignment = Alignment(horizontal='center', vertical='center') + ws.merge_cells(start_row=row_num, start_column=10, end_row=row_num, end_column=11) + ni_str = f"{night_val} dBA" if night_val is not None else "" + ni = ws.cell(row=row_num, column=12, value=ni_str) + ni.font = f_bold; ni.border = br + ni.alignment = Alignment(horizontal='center', vertical='center') + ws.merge_cells(start_row=row_num, start_column=12, end_row=row_num, end_column=13) + + write_stat2(32, "LAmax", _max2([v[0] for v in evening2]), _max2([v[0] for v in nighttime2])) + write_stat2(33, "LA01 Average",_avg2([v[1] for v in evening2]), _avg2([v[1] for v in nighttime2])) + write_stat2(34, "LA10 Average",_avg2([v[2] for v in evening2]), _avg2([v[2] for v in nighttime2]), is_last=True) + + # Page setup: portrait, letter, template margins + from openpyxl.worksheet.properties import PageSetupProperties + ws.sheet_properties.pageSetUpPr = PageSetupProperties(fitToPage=False) + ws.page_setup.orientation = 'portrait' + ws.page_setup.paperSize = 1 + ws.page_margins.left = 0.75 + ws.page_margins.right = 0.75 + ws.page_margins.top = 1.0 + ws.page_margins.bottom = 1.0 + ws.page_margins.header = 0.5 + ws.page_margins.footer = 0.5 # Save to buffer output = io.BytesIO() @@ -2366,6 +2967,7 @@ async def generate_combined_excel_report( try: import openpyxl from openpyxl.chart import LineChart, Reference + from openpyxl.chart.shapes import GraphicalProperties from openpyxl.styles import Font, Alignment, Border, Side, PatternFill from openpyxl.utils import get_column_letter except ImportError: @@ -2380,7 +2982,7 @@ async def generate_combined_excel_report( raise HTTPException(status_code=404, detail="Project not found") # Get all sessions with measurement files - sessions = db.query(RecordingSession).filter_by(project_id=project_id).all() + sessions = db.query(MonitoringSession).filter_by(project_id=project_id).all() # Collect all Leq RND files grouped by location # Only include files with '_Leq_' in the path (15-minute averaged data) @@ -2389,9 +2991,12 @@ async def generate_combined_excel_report( for session in sessions: files = db.query(DataFile).filter_by(session_id=session.id).all() for file in files: - # Only include Leq files for reports (contain '_Leq_' in path) - is_leq_file = file.file_path and '_Leq_' in file.file_path and file.file_path.endswith('.rnd') - if is_leq_file: + if not file.file_path or not file.file_path.lower().endswith('.rnd'): + continue + from pathlib import Path as _Path + abs_path = _Path("data") / file.file_path + peek = _peek_rnd_headers(abs_path) + if _is_leq_file(file.file_path, peek): location = db.query(MonitoringLocation).filter_by(id=session.location_id).first() if session.location_id else None location_name = location.name if location else f"Session {session.id[:8]}" @@ -2406,21 +3011,30 @@ async def generate_combined_excel_report( if not location_files: raise HTTPException(status_code=404, detail="No Leq measurement files found in project. Reports require Leq data (files with '_Leq_' in the name).") - # Define styles - title_font = Font(bold=True, size=14) - header_font = Font(bold=True, size=10) - thin_border = Border( - left=Side(style='thin'), - right=Side(style='thin'), - top=Side(style='thin'), - bottom=Side(style='thin') - ) - header_fill = PatternFill(start_color="DAEEF3", end_color="DAEEF3", fill_type="solid") + # --- Shared styles --- + f_title = Font(name='Arial', bold=True, size=12) + f_data = Font(name='Arial', size=10) + f_bold = Font(name='Arial', bold=True, size=10) + + thin = Side(style='thin') + dbl = Side(style='double') + + hdr_inner = Border(left=thin, right=thin, top=dbl, bottom=thin) + hdr_left = Border(left=dbl, right=thin, top=dbl, bottom=thin) + hdr_right = Border(left=thin, right=dbl, top=dbl, bottom=thin) + last_inner = Border(left=thin, right=thin, top=thin, bottom=dbl) + last_left = Border(left=dbl, right=thin, top=thin, bottom=dbl) + last_right = Border(left=thin, right=dbl, top=thin, bottom=dbl) + data_inner = Border(left=thin, right=thin, top=thin, bottom=thin) + data_left = Border(left=dbl, right=thin, top=thin, bottom=thin) + data_right = Border(left=thin, right=dbl, top=thin, bottom=thin) + + hdr_fill = PatternFill(start_color="F2F2F2", end_color="F2F2F2", fill_type="solid") + center = Alignment(horizontal='center', vertical='center', wrap_text=True) + left_a = Alignment(horizontal='left', vertical='center') # Create Excel workbook wb = openpyxl.Workbook() - - # Remove default sheet wb.remove(wb.active) # Track all data for summary @@ -2428,33 +3042,34 @@ async def generate_combined_excel_report( # Create a sheet for each location for location_name, file_list in location_files.items(): - # Sanitize sheet name (max 31 chars, no special chars) safe_sheet_name = "".join(c for c in location_name if c.isalnum() or c in (' ', '-', '_'))[:31] ws = wb.create_sheet(title=safe_sheet_name) - # Row 1: Report title + # Column widths from Soundstudyexample.xlsx NRL_1 (sheet2) + # A B C D E F G H I J K L M N O P + for col_i, col_w in zip(range(1, 17), [9.43, 10.14, 8.14, 12.86, 10.86, 10.86, 25.0, 6.43, 12.43, 12.43, 10.0, 14.71, 8.0, 6.43, 6.43, 6.43]): + ws.column_dimensions[get_column_letter(col_i)].width = col_w + final_title = f"{report_title} - {project.name}" - ws['A1'] = final_title - ws['A1'].font = title_font ws.merge_cells('A1:G1') + ws['A1'] = final_title + ws['A1'].font = f_title; ws['A1'].alignment = center + ws.row_dimensions[1].height = 15.75 + ws.row_dimensions[2].height = 15 - # Row 3: Location name + ws.merge_cells('A3:G3') ws['A3'] = location_name - ws['A3'].font = Font(bold=True, size=11) + ws['A3'].font = f_title; ws['A3'].alignment = center + ws.row_dimensions[3].height = 15.75 + ws.row_dimensions[4].height = 15 + ws.row_dimensions[5].height = 15.75 - # Row 7: Headers - headers = ['Test Increment #', 'Date', 'Time', 'LAmax (dBA)', 'LA01 (dBA)', 'LA10 (dBA)', 'Comments'] - for col, header in enumerate(headers, 1): - cell = ws.cell(row=7, column=col, value=header) - cell.font = header_font - cell.border = thin_border - cell.fill = header_fill - cell.alignment = Alignment(horizontal='center') - - # Set column widths - column_widths = [16, 12, 10, 12, 12, 12, 40] - for i, width in enumerate(column_widths, 1): - ws.column_dimensions[get_column_letter(i)].width = width + hdr_labels = ['Interval #', 'Date', 'Time', 'LAmax (dBA)', 'LA01 (dBA)', 'LA10 (dBA)', 'Comments'] + for col, label in enumerate(hdr_labels, 1): + cell = ws.cell(row=6, column=col, value=label) + cell.font = f_bold; cell.fill = hdr_fill; cell.alignment = center + cell.border = hdr_left if col == 1 else (hdr_right if col == 7 else hdr_inner) + ws.row_dimensions[6].height = 39 # Combine data from all files for this location all_rnd_rows = [] @@ -2467,9 +3082,9 @@ async def generate_combined_excel_report( try: with open(file_path, 'r', encoding='utf-8', errors='replace') as f: - content = f.read() + content_f = f.read() - reader = csv.DictReader(io.StringIO(content)) + reader = csv.DictReader(io.StringIO(content_f)) for row in reader: cleaned_row = {} for key, value in row.items(): @@ -2492,66 +3107,153 @@ async def generate_combined_excel_report( if not all_rnd_rows: continue - # Sort by start time all_rnd_rows.sort(key=lambda r: r.get('Start Time', '')) - # Data rows starting at row 8 - data_start_row = 8 + data_start_row = 7 + parsed_rows_c = [] for idx, row in enumerate(all_rnd_rows, 1): - data_row = data_start_row + idx - 1 + dr = data_start_row + idx - 1 + is_last = (idx == len(all_rnd_rows)) + b_left = last_left if is_last else data_left + b_inner = last_inner if is_last else data_inner + b_right = last_right if is_last else data_right - ws.cell(row=data_row, column=1, value=idx).border = thin_border + c = ws.cell(row=dr, column=1, value=idx) + c.font = f_data; c.alignment = center; c.border = b_left start_time_str = row.get('Start Time', '') + row_dt = None if start_time_str: try: - dt = datetime.strptime(start_time_str, '%Y/%m/%d %H:%M:%S') - ws.cell(row=data_row, column=2, value=dt.date()) - ws.cell(row=data_row, column=3, value=dt.time()) + row_dt = datetime.strptime(start_time_str, '%Y/%m/%d %H:%M:%S') + c2 = ws.cell(row=dr, column=2, value=row_dt.strftime('%m/%d/%y')) + c3 = ws.cell(row=dr, column=3, value=row_dt.strftime('%H:%M')) except ValueError: - ws.cell(row=data_row, column=2, value=start_time_str) - ws.cell(row=data_row, column=3, value='') + c2 = ws.cell(row=dr, column=2, value=start_time_str) + c3 = ws.cell(row=dr, column=3, value='') else: - ws.cell(row=data_row, column=2, value='') - ws.cell(row=data_row, column=3, value='') + c2 = ws.cell(row=dr, column=2, value='') + c3 = ws.cell(row=dr, column=3, value='') + c2.font = f_data; c2.alignment = center; c2.border = b_inner + c3.font = f_data; c3.alignment = center; c3.border = b_inner lmax = row.get('Lmax(Main)') - ws.cell(row=data_row, column=4, value=lmax if lmax else '').border = thin_border + ln1 = row.get('LN1(Main)') + ln2 = row.get('LN2(Main)') + for col_idx, val in [(4, lmax), (5, ln1), (6, ln2)]: + c = ws.cell(row=dr, column=col_idx, value=val if isinstance(val, (int, float)) else '') + c.font = f_data; c.alignment = center; c.border = b_inner - ln1 = row.get('LN1(Main)') - ws.cell(row=data_row, column=5, value=ln1 if ln1 else '').border = thin_border + c = ws.cell(row=dr, column=7, value='') + c.font = f_data; c.alignment = left_a; c.border = b_right + ws.row_dimensions[dr].height = 15 - ln2 = row.get('LN2(Main)') - ws.cell(row=data_row, column=6, value=ln2 if ln2 else '').border = thin_border - - ws.cell(row=data_row, column=7, value='').border = thin_border - ws.cell(row=data_row, column=2).border = thin_border - ws.cell(row=data_row, column=3).border = thin_border + if row_dt and isinstance(lmax, (int, float)) and isinstance(ln1, (int, float)) and isinstance(ln2, (int, float)): + parsed_rows_c.append((row_dt, lmax, ln1, ln2)) data_end_row = data_start_row + len(all_rnd_rows) - 1 - # Add Line Chart chart = LineChart() - chart.title = f"{location_name}" - chart.style = 10 + chart.title = f"{location_name} - {final_title}" + chart.style = 2 chart.y_axis.title = "Sound Level (dBA)" - chart.x_axis.title = "Test Increment" - chart.height = 12 - chart.width = 20 - - data_ref = Reference(ws, min_col=4, min_row=7, max_col=6, max_row=data_end_row) - categories = Reference(ws, min_col=1, min_row=data_start_row, max_row=data_end_row) + chart.x_axis.title = "Time Period (15 Minute Intervals)" + chart.height = 12.7 + chart.width = 15.7 + data_ref = Reference(ws, min_col=4, min_row=6, max_col=6, max_row=data_end_row) + categories = Reference(ws, min_col=3, min_row=data_start_row, max_row=data_end_row) chart.add_data(data_ref, titles_from_data=True) chart.set_categories(categories) if len(chart.series) >= 3: - chart.series[0].graphicalProperties.line.solidFill = "FF0000" + chart.series[0].graphicalProperties.line.solidFill = "C00000" + chart.series[0].graphicalProperties.line.width = 15875 chart.series[1].graphicalProperties.line.solidFill = "00B050" + chart.series[1].graphicalProperties.line.width = 19050 chart.series[2].graphicalProperties.line.solidFill = "0070C0" + chart.series[2].graphicalProperties.line.width = 19050 - ws.add_chart(chart, "I3") + _plot_border = GraphicalProperties() + _plot_border.ln.solidFill = "000000" + _plot_border.ln.w = 12700 + chart.plot_area.spPr = _plot_border + ws.add_chart(chart, "H4") + # Stats table: note at I28-I29, headers at I31, data rows 32-34, border row 35 + note1 = ws.cell(row=28, column=9, value="Note: Averages are calculated by determining the arithmetic average ") + note1.font = f_data; note1.alignment = left_a + ws.merge_cells(start_row=28, start_column=9, end_row=28, end_column=14) + note2 = ws.cell(row=29, column=9, value="for each specified range of time intervals.") + note2.font = f_data; note2.alignment = left_a + ws.merge_cells(start_row=29, start_column=9, end_row=29, end_column=14) + + # Table header row 31 + med = Side(style='medium') + tbl_top_left = Border(left=med, right=Side(style='thin'), top=med, bottom=Side(style='thin')) + tbl_top_mid = Border(left=Side(style='thin'), right=Side(style='thin'), top=med, bottom=Side(style='thin')) + tbl_top_right = Border(left=Side(style='thin'), right=med, top=med, bottom=Side(style='thin')) + tbl_mid_left = Border(left=med, right=Side(style='thin'), top=Side(style='thin'), bottom=Side(style='thin')) + tbl_mid_mid = Border(left=Side(style='thin'), right=Side(style='thin'), top=Side(style='thin'), bottom=Side(style='thin')) + tbl_mid_right = Border(left=Side(style='thin'), right=med, top=Side(style='thin'), bottom=Side(style='thin')) + tbl_bot_left = Border(left=med, right=Side(style='thin'), top=Side(style='thin'), bottom=med) + tbl_bot_mid = Border(left=Side(style='thin'), right=Side(style='thin'), top=Side(style='thin'), bottom=med) + tbl_bot_right = Border(left=Side(style='thin'), right=med, top=Side(style='thin'), bottom=med) + + hdr_fill_tbl = PatternFill(start_color="F2F2F2", end_color="F2F2F2", fill_type="solid") + + # Header row: blank | Evening | Nighttime + c = ws.cell(row=31, column=9, value=""); c.border = tbl_top_left; c.font = f_bold + c = ws.cell(row=31, column=10, value="Evening (7PM to 10PM)") + c.font = f_bold; c.alignment = Alignment(horizontal='center', vertical='center', wrap_text=True) + c.border = tbl_top_mid; c.fill = hdr_fill_tbl + ws.merge_cells(start_row=31, start_column=10, end_row=31, end_column=11) + c = ws.cell(row=31, column=12, value="Nighttime (10PM to 7AM)") + c.font = f_bold; c.alignment = Alignment(horizontal='center', vertical='center', wrap_text=True) + c.border = tbl_top_right; c.fill = hdr_fill_tbl + ws.merge_cells(start_row=31, start_column=12, end_row=31, end_column=13) + ws.row_dimensions[31].height = 15 + + evening_c = [(lmax, ln1, ln2) for dt, lmax, ln1, ln2 in parsed_rows_c if 19 <= dt.hour < 22] + nighttime_c = [(lmax, ln1, ln2) for dt, lmax, ln1, ln2 in parsed_rows_c if dt.hour >= 22 or dt.hour < 7] + + def _avg_c(vals): return round(sum(vals) / len(vals), 1) if vals else None + def _max_c(vals): return round(max(vals), 1) if vals else None + + def write_stat_c(row_num, label, eve_val, night_val, is_last=False): + bl = tbl_bot_left if is_last else tbl_mid_left + bm = tbl_bot_mid if is_last else tbl_mid_mid + br = tbl_bot_right if is_last else tbl_mid_right + lbl = ws.cell(row=row_num, column=9, value=label) + lbl.font = f_data; lbl.border = bl + lbl.alignment = Alignment(horizontal='left', vertical='center') + ev_str = f"{eve_val} dBA" if eve_val is not None else "" + ev = ws.cell(row=row_num, column=10, value=ev_str) + ev.font = f_bold; ev.border = bm + ev.alignment = Alignment(horizontal='center', vertical='center') + ws.merge_cells(start_row=row_num, start_column=10, end_row=row_num, end_column=11) + ni_str = f"{night_val} dBA" if night_val is not None else "" + ni = ws.cell(row=row_num, column=12, value=ni_str) + ni.font = f_bold; ni.border = br + ni.alignment = Alignment(horizontal='center', vertical='center') + ws.merge_cells(start_row=row_num, start_column=12, end_row=row_num, end_column=13) + + write_stat_c(32, "LAmax", _max_c([v[0] for v in evening_c]), _max_c([v[0] for v in nighttime_c])) + write_stat_c(33, "LA01 Average",_avg_c([v[1] for v in evening_c]), _avg_c([v[1] for v in nighttime_c])) + write_stat_c(34, "LA10 Average",_avg_c([v[2] for v in evening_c]), _avg_c([v[2] for v in nighttime_c]), is_last=True) + + from openpyxl.worksheet.properties import PageSetupProperties + ws.sheet_properties.pageSetUpPr = PageSetupProperties(fitToPage=False) + ws.page_setup.orientation = 'portrait' + ws.page_setup.paperSize = 1 + ws.page_margins.left = 0.75 + ws.page_margins.right = 0.75 + ws.page_margins.top = 1.0 + ws.page_margins.bottom = 1.0 + ws.page_margins.header = 0.5 + ws.page_margins.footer = 0.5 + + # Calculate summary for this location # Calculate summary for this location all_lmax = [r.get('Lmax(Main)') for r in all_rnd_rows if isinstance(r.get('Lmax(Main)'), (int, float))] all_ln1 = [r.get('LN1(Main)') for r in all_rnd_rows if isinstance(r.get('LN1(Main)'), (int, float))] @@ -2606,6 +3308,993 @@ async def generate_combined_excel_report( ) +# ============================================================================ +# Combined Report Wizard — config page, preview page, and generate endpoint +# ============================================================================ + +@router.get("/{project_id}/combined-report-wizard", response_class=HTMLResponse) +async def combined_report_wizard( + request: Request, + project_id: str, + db: Session = Depends(get_db), +): + """Configuration page for the combined multi-location report wizard.""" + from backend.models import ReportTemplate + from pathlib import Path as _Path + + project = db.query(Project).filter_by(id=project_id).first() + if not project: + raise HTTPException(status_code=404, detail="Project not found") + + sessions = db.query(MonitoringSession).filter_by(project_id=project_id).order_by(MonitoringSession.started_at).all() + + # Build location -> sessions list, only including sessions that have Leq files + location_sessions: dict = {} # loc_name -> list of session dicts + for session in sessions: + files = db.query(DataFile).filter_by(session_id=session.id).all() + has_leq = False + for file in files: + if not file.file_path or not file.file_path.lower().endswith('.rnd'): + continue + abs_path = _Path("data") / file.file_path + peek = _peek_rnd_headers(abs_path) + if _is_leq_file(file.file_path, peek): + has_leq = True + break + if not has_leq: + continue + + location = db.query(MonitoringLocation).filter_by(id=session.location_id).first() if session.location_id else None + loc_name = location.name if location else f"Session {session.id[:8]}" + + if loc_name not in location_sessions: + location_sessions[loc_name] = [] + + # Build a display date and day-of-week from started_at + date_display = "" + day_of_week = "" + if session.started_at: + date_display = session.started_at.strftime("%-m/%-d/%Y") + day_of_week = session.started_at.strftime("%A") # Monday, Sunday, etc. + + location_sessions[loc_name].append({ + "session_id": session.id, + "session_label": session.session_label or "", + "date_display": date_display, + "day_of_week": day_of_week, + "started_at": session.started_at.isoformat() if session.started_at else "", + "stopped_at": session.stopped_at.isoformat() if session.stopped_at else "", + "duration_h": (session.duration_seconds // 3600) if session.duration_seconds else 0, + "duration_m": ((session.duration_seconds % 3600) // 60) if session.duration_seconds else 0, + "period_type": session.period_type or "", + "status": session.status, + }) + + locations = [ + {"name": name, "sessions": sess_list} + for name, sess_list in sorted(location_sessions.items()) + ] + + report_templates = db.query(ReportTemplate).all() + + return templates.TemplateResponse("combined_report_wizard.html", { + "request": request, + "project": project, + "project_id": project_id, + "locations": locations, + "locations_json": json.dumps(locations), + "report_templates": report_templates, + }) + + +def _build_location_data_from_sessions(project_id: str, db, selected_session_ids: list) -> dict: + """ + Build per-location spreadsheet data using an explicit list of session IDs. + Only rows from those sessions are included. Per-session period_type is + stored on each row so the report can filter stats correctly. + """ + from pathlib import Path as _Path + + project = db.query(Project).filter_by(id=project_id).first() + if not project: + raise HTTPException(status_code=404, detail="Project not found") + + if not selected_session_ids: + raise HTTPException(status_code=400, detail="No sessions selected.") + + # Load every requested session — one entry per (session_id, loc_name) pair. + # Keyed by session_id so overnight sessions are never split by calendar date. + session_entries: dict = {} # session_id -> {loc_name, session_label, period_type, rows[]} + + for session_id in selected_session_ids: + session = db.query(MonitoringSession).filter_by(id=session_id, project_id=project_id).first() + if not session: + continue + location = db.query(MonitoringLocation).filter_by(id=session.location_id).first() if session.location_id else None + loc_name = location.name if location else f"Session {session_id[:8]}" + + session_entries[session_id] = { + "loc_name": loc_name, + "session_label": session.session_label or "", + "period_type": session.period_type or "", + "started_at": session.started_at, + "rows": [], + } + + files = db.query(DataFile).filter_by(session_id=session_id).all() + for file in files: + if not file.file_path or not file.file_path.lower().endswith('.rnd'): + continue + abs_path = _Path("data") / file.file_path + peek = _peek_rnd_headers(abs_path) + if not _is_leq_file(file.file_path, peek): + continue + rows = _read_rnd_file_rows(file.file_path) + rows, _ = _normalize_rnd_rows(rows) + session_entries[session_id]["rows"].extend(rows) + + if not any(e["rows"] for e in session_entries.values()): + raise HTTPException(status_code=404, detail="No Leq data found in the selected sessions.") + + location_data = [] + for session_id in selected_session_ids: + entry = session_entries.get(session_id) + if not entry or not entry["rows"]: + continue + + loc_name = entry["loc_name"] + period_type = entry["period_type"] + raw_rows = sorted(entry["rows"], key=lambda r: r.get('Start Time', '')) + + # Parse all rows to datetimes first so we can apply period-aware filtering + parsed = [] + for row in raw_rows: + start_time_str = row.get('Start Time', '') + dt = None + if start_time_str: + try: + dt = datetime.strptime(start_time_str, '%Y/%m/%d %H:%M:%S') + except ValueError: + pass + parsed.append((dt, row)) + + # Determine which rows to keep based on period_type + is_day_session = period_type in ('weekday_day', 'weekend_day') + target_date = None + if is_day_session: + # Day: 07:00–18:59 only, restricted to the LAST calendar date that has daytime rows + daytime_dates = sorted({ + dt.date() for dt, row in parsed + if dt and 7 <= dt.hour < 19 + }) + target_date = daytime_dates[-1] if daytime_dates else None + filtered = [ + (dt, row) for dt, row in parsed + if dt and dt.date() == target_date and 7 <= dt.hour < 19 + ] + else: + # Night: 19:00–06:59, spanning both calendar days — no date restriction + filtered = [ + (dt, row) for dt, row in parsed + if dt and (dt.hour >= 19 or dt.hour < 7) + ] + + # Fall back to all rows if filtering removed everything + if not filtered: + filtered = parsed + + spreadsheet_data = [] + for idx, (dt, row) in enumerate(filtered, 1): + date_str = dt.strftime('%Y-%m-%d') if dt else '' + time_str = dt.strftime('%H:%M') if dt else '' + + lmax = row.get('Lmax(Main)', '') + ln1 = row.get('LN1(Main)', '') + ln2 = row.get('LN2(Main)', '') + + spreadsheet_data.append([ + idx, + date_str, + time_str, + lmax if lmax else '', + ln1 if ln1 else '', + ln2 if ln2 else '', + '', + period_type, # col index 7 — hidden, used by report gen for day/night bucketing + ]) + + # For the label/filename, use target_date (day sessions) or started_at (night sessions) + from datetime import timedelta as _td + started_at_dt = entry["started_at"] + if is_day_session and target_date: + # Use the actual target date from data filtering (last date with daytime rows) + label_dt = datetime.combine(target_date, datetime.min.time()) + else: + label_dt = started_at_dt + + # Rebuild session label using the correct label date + if label_dt and entry["loc_name"]: + period_str = {"weekday_day": "Day", "weekday_night": "Night", + "weekend_day": "Day", "weekend_night": "Night"}.get(period_type, "") + day_abbr = label_dt.strftime("%a") + date_label = f"{label_dt.month}/{label_dt.day}" + session_label = " — ".join(p for p in [loc_name, f"{day_abbr} {date_label}", period_str] if p) + else: + session_label = entry["session_label"] + + location_data.append({ + "session_id": session_id, + "location_name": loc_name, + "session_label": session_label, + "period_type": period_type, + "started_at": label_dt.isoformat() if label_dt else "", + "raw_count": len(raw_rows), + "filtered_count": len(filtered), + "spreadsheet_data": spreadsheet_data, + }) + + return {"project": project, "location_data": location_data} + + +@router.get("/{project_id}/combined-report-preview", response_class=HTMLResponse) +async def combined_report_preview( + request: Request, + project_id: str, + report_title: str = Query("Background Noise Study"), + project_name: str = Query(""), + client_name: str = Query(""), + selected_sessions: str = Query(""), # comma-separated session IDs + db: Session = Depends(get_db), +): + """Preview and edit combined report data before generating the Excel file.""" + session_ids = [s.strip() for s in selected_sessions.split(',') if s.strip()] if selected_sessions else [] + + result = _build_location_data_from_sessions(project_id, db, session_ids) + + project = result["project"] + location_data = result["location_data"] + total_rows = sum(loc["filtered_count"] for loc in location_data) + final_project_name = project_name if project_name else project.name + + return templates.TemplateResponse("combined_report_preview.html", { + "request": request, + "project": project, + "project_id": project_id, + "report_title": report_title, + "project_name": final_project_name, + "client_name": client_name, + "time_filter_desc": f"{len(session_ids)} session{'s' if len(session_ids) != 1 else ''} selected", + "location_data": location_data, + "locations_json": json.dumps(location_data), + "total_rows": total_rows, + }) + + +@router.post("/{project_id}/generate-combined-from-preview") +async def generate_combined_from_preview( + project_id: str, + data: dict, + db: Session = Depends(get_db), +): + """Generate combined Excel report from wizard-edited spreadsheet data. + + Produces one .xlsx per day (each with one sheet per location) packaged + into a single .zip file for download. + """ + try: + import openpyxl + from openpyxl.chart import LineChart, Reference + from openpyxl.chart.shapes import GraphicalProperties + from openpyxl.styles import Font, Alignment, Border, Side, PatternFill + from openpyxl.utils import get_column_letter + from openpyxl.worksheet.properties import PageSetupProperties + except ImportError: + raise HTTPException(status_code=500, detail="openpyxl is not installed. Run: pip install openpyxl") + + project = db.query(Project).filter_by(id=project_id).first() + if not project: + raise HTTPException(status_code=404, detail="Project not found") + + report_title = data.get("report_title", "Background Noise Study") + project_name = data.get("project_name", project.name) + client_name = data.get("client_name", "") + locations = data.get("locations", []) + + if not locations: + raise HTTPException(status_code=400, detail="No location data provided") + + # Shared styles + f_title = Font(name='Arial', bold=True, size=12) + f_bold = Font(name='Arial', bold=True, size=10) + f_data = Font(name='Arial', size=10) + thin = Side(style='thin') + dbl = Side(style='double') + med = Side(style='medium') + hdr_inner = Border(left=thin, right=thin, top=dbl, bottom=thin) + hdr_left = Border(left=dbl, right=thin, top=dbl, bottom=thin) + hdr_right = Border(left=thin, right=dbl, top=dbl, bottom=thin) + last_inner = Border(left=thin, right=thin, top=thin, bottom=dbl) + last_left = Border(left=dbl, right=thin, top=thin, bottom=dbl) + last_right = Border(left=thin, right=dbl, top=thin, bottom=dbl) + data_inner = Border(left=thin, right=thin, top=thin, bottom=thin) + data_left = Border(left=dbl, right=thin, top=thin, bottom=thin) + data_right = Border(left=thin, right=dbl, top=thin, bottom=thin) + hdr_fill = PatternFill(start_color="F2F2F2", end_color="F2F2F2", fill_type="solid") + center_a = Alignment(horizontal='center', vertical='center', wrap_text=True) + left_a = Alignment(horizontal='left', vertical='center') + thin_border = Border(left=thin, right=thin, top=thin, bottom=thin) + + tbl_top_left = Border(left=med, right=thin, top=med, bottom=thin) + tbl_top_mid = Border(left=thin, right=thin, top=med, bottom=thin) + tbl_top_right = Border(left=thin, right=med, top=med, bottom=thin) + tbl_mid_left = Border(left=med, right=thin, top=thin, bottom=thin) + tbl_mid_mid = Border(left=thin, right=thin, top=thin, bottom=thin) + tbl_mid_right = Border(left=thin, right=med, top=thin, bottom=thin) + tbl_bot_left = Border(left=med, right=thin, top=thin, bottom=med) + tbl_bot_mid = Border(left=thin, right=thin, top=thin, bottom=med) + tbl_bot_right = Border(left=thin, right=med, top=thin, bottom=med) + + col_widths = [9.43, 10.14, 8.14, 12.86, 10.86, 10.86, 25.0, 6.43, 18.0, 18.0, 14.0, 14.0, 10.0, 8.0, 6.43, 6.43] + + def _build_location_sheet(ws, loc_name, day_rows, final_title): + """Write one location's data onto ws. day_rows is a list of spreadsheet row arrays.""" + for col_i, col_w in zip(range(1, 17), col_widths): + ws.column_dimensions[get_column_letter(col_i)].width = col_w + + ws.merge_cells('A1:G1') + ws['A1'] = final_title + ws['A1'].font = f_title; ws['A1'].alignment = center_a + ws.row_dimensions[1].height = 15.75 + ws.row_dimensions[2].height = 15 + + ws.merge_cells('A3:G3') + ws['A3'] = loc_name + ws['A3'].font = f_title; ws['A3'].alignment = center_a + ws.row_dimensions[3].height = 15.75 + + # Row 4: date range derived from the data rows + def _fmt_date(d): + try: + from datetime import datetime as _dt + return _dt.strptime(d, '%Y-%m-%d').strftime('%-m/%-d/%y') + except Exception: + return d + + dates_in_data = sorted({ + row[1] for row in day_rows + if len(row) > 1 and row[1] + }) + if len(dates_in_data) >= 2: + date_label = f"{_fmt_date(dates_in_data[0])} to {_fmt_date(dates_in_data[-1])}" + elif len(dates_in_data) == 1: + date_label = _fmt_date(dates_in_data[0]) + else: + date_label = "" + ws.merge_cells('A4:G4') + ws['A4'] = date_label + ws['A4'].font = f_data; ws['A4'].alignment = center_a + ws.row_dimensions[4].height = 15 + ws.row_dimensions[5].height = 15.75 + + hdr_labels = ['Interval #', 'Date', 'Time', 'LAmax (dBA)', 'LA01 (dBA)', 'LA10 (dBA)', 'Comments'] + for col, label in enumerate(hdr_labels, 1): + cell = ws.cell(row=6, column=col, value=label) + cell.font = f_bold; cell.fill = hdr_fill; cell.alignment = center_a + cell.border = hdr_left if col == 1 else (hdr_right if col == 7 else hdr_inner) + ws.row_dimensions[6].height = 39 + + data_start_row = 7 + parsed_rows = [] + lmax_vals, ln1_vals, ln2_vals = [], [], [] + + for row_idx, row in enumerate(day_rows): + dr = data_start_row + row_idx + is_last = (row_idx == len(day_rows) - 1) + b_left = last_left if is_last else data_left + b_inner = last_inner if is_last else data_inner + b_right = last_right if is_last else data_right + + test_num = row[0] if len(row) > 0 else row_idx + 1 + date_val = _fmt_date(row[1]) if len(row) > 1 and row[1] else '' + time_val = row[2] if len(row) > 2 else '' + lmax = row[3] if len(row) > 3 else '' + ln1 = row[4] if len(row) > 4 else '' + ln2 = row[5] if len(row) > 5 else '' + comment = row[6] if len(row) > 6 else '' + row_period = row[7] if len(row) > 7 else '' # hidden period_type from session + + c = ws.cell(row=dr, column=1, value=test_num) + c.font = f_data; c.alignment = center_a; c.border = b_left + c = ws.cell(row=dr, column=2, value=date_val) + c.font = f_data; c.alignment = center_a; c.border = b_inner + c = ws.cell(row=dr, column=3, value=time_val) + c.font = f_data; c.alignment = center_a; c.border = b_inner + c = ws.cell(row=dr, column=4, value=lmax if lmax != '' else None) + c.font = f_data; c.alignment = center_a; c.border = b_inner + c = ws.cell(row=dr, column=5, value=ln1 if ln1 != '' else None) + c.font = f_data; c.alignment = center_a; c.border = b_inner + c = ws.cell(row=dr, column=6, value=ln2 if ln2 != '' else None) + c.font = f_data; c.alignment = center_a; c.border = b_inner + c = ws.cell(row=dr, column=7, value=comment) + c.font = f_data; c.alignment = left_a; c.border = b_right + ws.row_dimensions[dr].height = 15 + + if isinstance(lmax, (int, float)): + lmax_vals.append(lmax) + if isinstance(ln1, (int, float)): + ln1_vals.append(ln1) + if isinstance(ln2, (int, float)): + ln2_vals.append(ln2) + + if isinstance(lmax, (int, float)) and isinstance(ln1, (int, float)) and isinstance(ln2, (int, float)): + parsed_rows.append((row_period, time_val, float(lmax), float(ln1), float(ln2))) + + data_end_row = data_start_row + len(day_rows) - 1 + + chart = LineChart() + chart.title = f"{loc_name} - {final_title}" + chart.style = 2 + chart.y_axis.title = "Sound Level (dBA)" + chart.x_axis.title = "Time Period (15 Minute Intervals)" + chart.height = 12.7 + chart.width = 15.7 + + data_ref = Reference(ws, min_col=4, min_row=6, max_col=6, max_row=data_end_row) + categories = Reference(ws, min_col=3, min_row=data_start_row, max_row=data_end_row) + chart.add_data(data_ref, titles_from_data=True) + chart.set_categories(categories) + + if len(chart.series) >= 3: + chart.series[0].graphicalProperties.line.solidFill = "C00000" + chart.series[0].graphicalProperties.line.width = 15875 + chart.series[1].graphicalProperties.line.solidFill = "00B050" + chart.series[1].graphicalProperties.line.width = 19050 + chart.series[2].graphicalProperties.line.solidFill = "0070C0" + chart.series[2].graphicalProperties.line.width = 19050 + + _plot_border = GraphicalProperties() + _plot_border.ln.solidFill = "000000" + _plot_border.ln.w = 12700 + chart.plot_area.spPr = _plot_border + ws.add_chart(chart, "H4") + + hdr_fill_tbl = PatternFill(start_color="F2F2F2", end_color="F2F2F2", fill_type="solid") + + def _avg(vals): return round(sum(vals) / len(vals), 1) if vals else None + def _max(vals): return round(max(vals), 1) if vals else None + + # --- Period bucketing ------------------------------------------------ + # For night sessions: split into Evening (7PM–10PM) and Nighttime (10PM–7AM). + # For day sessions: single Daytime bucket. + PERIOD_TYPE_IS_DAY = {"weekday_day", "weekend_day"} + PERIOD_TYPE_IS_NIGHT = {"weekday_night", "weekend_night"} + + day_rows_data = [] + evening_rows_data = [] + night_rows_data = [] + + for pt, time_v, lmx, l1, l2 in parsed_rows: + if pt in PERIOD_TYPE_IS_DAY: + day_rows_data.append((lmx, l1, l2)) + elif pt in PERIOD_TYPE_IS_NIGHT: + # Split by time: Evening = 19:00–21:59, Nighttime = 22:00–06:59 + hour = 0 + if time_v and ':' in str(time_v): + try: + hour = int(str(time_v).split(':')[0]) + except ValueError: + pass + if 19 <= hour <= 21: + evening_rows_data.append((lmx, l1, l2)) + else: + night_rows_data.append((lmx, l1, l2)) + else: + day_rows_data.append((lmx, l1, l2)) + + all_candidate_periods = [ + ("Daytime (7AM to 10PM)", day_rows_data), + ("Evening (7PM to 10PM)", evening_rows_data), + ("Nighttime (10PM to 7AM)", night_rows_data), + ] + active_periods = [(label, rows) for label, rows in all_candidate_periods if rows] + if not active_periods: + active_periods = [("Daytime (7AM to 10PM)", [])] + + # --- Stats table — fixed position alongside the chart --- + note1 = ws.cell(row=28, column=9, + value="Note: Averages are calculated by determining the arithmetic average ") + note1.font = f_data; note1.alignment = left_a + ws.merge_cells(start_row=28, start_column=9, end_row=28, end_column=14) + note2 = ws.cell(row=29, column=9, + value="for each specified range of time intervals.") + note2.font = f_data; note2.alignment = left_a + ws.merge_cells(start_row=29, start_column=9, end_row=29, end_column=14) + + for r in [28, 29, 30, 31, 32, 33, 34]: + ws.row_dimensions[r].height = 15 + + tbl_hdr_row = 31 + tbl_data_row = 32 + + # Layout: col 9 = row label, then pairs: (10,11), (12,13), (14,15) + num_periods = len(active_periods) + period_start_cols = [10 + i * 2 for i in range(num_periods)] + + def _hdr_border(i, n): + return Border( + left=med if i == 0 else thin, + right=med if i == n - 1 else thin, + top=med, bottom=thin, + ) + + c = ws.cell(row=tbl_hdr_row, column=9, value=""); c.border = tbl_top_left; c.font = f_bold + + for i, (period_label, _) in enumerate(active_periods): + sc = period_start_cols[i] + c = ws.cell(row=tbl_hdr_row, column=sc, value=period_label) + c.font = f_bold + c.alignment = Alignment(horizontal='center', vertical='center', wrap_text=False) + c.border = _hdr_border(i, num_periods) + c.fill = hdr_fill_tbl + ws.merge_cells(start_row=tbl_hdr_row, start_column=sc, + end_row=tbl_hdr_row, end_column=sc + 1) + + def write_stat_dynamic(row_num, row_label, period_vals_list, is_last=False): + lbl = ws.cell(row=row_num, column=9, value=row_label) + lbl.font = f_data; lbl.border = tbl_bot_left if is_last else tbl_mid_left + lbl.alignment = Alignment(horizontal='left', vertical='center') + n = len(period_vals_list) + for i, val in enumerate(period_vals_list): + sc = period_start_cols[i] + val_str = f"{val} dBA" if val is not None else "" + c = ws.cell(row=row_num, column=sc, value=val_str) + c.font = f_bold + c.alignment = Alignment(horizontal='center', vertical='center') + c.border = Border( + left=med if i == 0 else thin, + right=med if i == n - 1 else thin, + top=tbl_bot_mid.top if is_last else tbl_mid_mid.top, + bottom=tbl_bot_mid.bottom if is_last else tbl_mid_mid.bottom, + ) + ws.merge_cells(start_row=row_num, start_column=sc, + end_row=row_num, end_column=sc + 1) + + write_stat_dynamic(tbl_data_row, "LAmax", + [_max([v[0] for v in rows]) for _, rows in active_periods]) + write_stat_dynamic(tbl_data_row + 1, "LA01 Average", + [_avg([v[1] for v in rows]) for _, rows in active_periods]) + write_stat_dynamic(tbl_data_row + 2, "LA10 Average", + [_avg([v[2] for v in rows]) for _, rows in active_periods], is_last=True) + + ws.sheet_properties.pageSetUpPr = PageSetupProperties(fitToPage=False) + ws.page_setup.orientation = 'portrait' + ws.page_setup.paperSize = 1 + ws.page_margins.left = 0.75 + ws.page_margins.right = 0.75 + ws.page_margins.top = 1.0 + ws.page_margins.bottom = 1.0 + ws.page_margins.header = 0.5 + ws.page_margins.footer = 0.5 + + return { + 'location': loc_name, + 'samples': len(day_rows), + 'lmax_avg': round(sum(lmax_vals) / len(lmax_vals), 1) if lmax_vals else None, + 'ln1_avg': round(sum(ln1_vals) / len(ln1_vals), 1) if ln1_vals else None, + 'ln2_avg': round(sum(ln2_vals) / len(ln2_vals), 1) if ln2_vals else None, + } + + def _build_summary_sheet(wb, day_label, project_name, loc_summaries): + summary_ws = wb.create_sheet(title="Summary") + summary_ws['A1'] = f"{report_title} - {project_name} - {day_label}" + summary_ws['A1'].font = f_title + summary_ws.merge_cells('A1:E1') + summary_headers = ['Location', 'Samples', 'LAmax Avg', 'LA01 Avg', 'LA10 Avg'] + for col, header in enumerate(summary_headers, 1): + cell = summary_ws.cell(row=3, column=col, value=header) + cell.font = f_bold; cell.fill = hdr_fill; cell.border = thin_border + for i, width in enumerate([30, 10, 12, 12, 12], 1): + summary_ws.column_dimensions[get_column_letter(i)].width = width + for idx, s in enumerate(loc_summaries, 4): + summary_ws.cell(row=idx, column=1, value=s['location']).border = thin_border + summary_ws.cell(row=idx, column=2, value=s['samples']).border = thin_border + summary_ws.cell(row=idx, column=3, value=s['lmax_avg'] or '-').border = thin_border + summary_ws.cell(row=idx, column=4, value=s['ln1_avg'] or '-').border = thin_border + summary_ws.cell(row=idx, column=5, value=s['ln2_avg'] or '-').border = thin_border + + # ---------------------------------------------------------------- + # Build one workbook per session (each location entry is one session) + # ---------------------------------------------------------------- + if not locations: + raise HTTPException(status_code=400, detail="No location data provided") + + project_name_clean = "".join(c for c in project_name if c.isalnum() or c in ('_', '-', ' ')).strip().replace(' ', '_') + final_title = f"{report_title} - {project_name}" + + zip_buffer = io.BytesIO() + with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zf: + for loc_info in locations: + loc_name = loc_info.get("location_name", "Unknown") + session_label = loc_info.get("session_label", "") + period_type = loc_info.get("period_type", "") + started_at_str = loc_info.get("started_at", "") + rows = loc_info.get("spreadsheet_data", []) + if not rows: + continue + + # Re-number interval # sequentially + for i, row in enumerate(rows): + if len(row) > 0: + row[0] = i + 1 + + wb = openpyxl.Workbook() + wb.remove(wb.active) + + safe_sheet = "".join(c for c in loc_name if c.isalnum() or c in (' ', '-', '_'))[:31] + ws = wb.create_sheet(title=safe_sheet) + summary = _build_location_sheet(ws, loc_name, rows, final_title) + + # Derive a date label for the summary sheet from started_at or first row + day_label = session_label or loc_name + if started_at_str: + try: + _dt = datetime.fromisoformat(started_at_str) + day_label = _dt.strftime('%-m/%-d/%Y') + if session_label: + day_label = session_label + except Exception: + pass + + _build_summary_sheet(wb, day_label, project_name, [summary]) + + xlsx_buf = io.BytesIO() + wb.save(xlsx_buf) + xlsx_buf.seek(0) + + # Build a clean filename from label or location+date + label_clean = session_label or loc_name + label_clean = "".join(c for c in label_clean if c.isalnum() or c in (' ', '-', '_', '/')).strip().replace(' ', '_').replace('/', '-') + xlsx_name = f"{label_clean}_{project_name_clean}_report.xlsx" + zf.writestr(xlsx_name, xlsx_buf.read()) + + zip_buffer.seek(0) + zip_filename = f"{project_name_clean}_reports.zip" + + return StreamingResponse( + zip_buffer, + media_type="application/zip", + headers={"Content-Disposition": f'attachment; filename="{zip_filename}"'} + ) + + +# ============================================================================ +# Project-level bulk upload (entire date-folder structure) +# ============================================================================ + +def _bulk_parse_rnh(content: bytes) -> dict: + """Parse a Rion .rnh metadata file for session start/stop times and device info.""" + result = {} + try: + text = content.decode("utf-8", errors="replace") + for line in text.splitlines(): + line = line.strip() + if not line or line.startswith("["): + continue + if "," in line: + key, _, value = line.partition(",") + key = key.strip() + value = value.strip() + mapping = { + "Serial Number": "serial_number", + "Store Name": "store_name", + "Index Number": "index_number", + # NL-43/NL-53 use "Measurement Start/Stop Time" + "Measurement Start Time": "start_time_str", + "Measurement Stop Time": "stop_time_str", + "Total Measurement Time": "total_time_str", + # AU2/NL-32 use bare "Start Time" / "Stop Time" + "Start Time": "start_time_str", + "Stop Time": "stop_time_str", + } + if key in mapping: + result[mapping[key]] = value + except Exception: + pass + return result + + +def _bulk_parse_datetime(s: str): + if not s: + return None + try: + return datetime.strptime(s.strip(), "%Y/%m/%d %H:%M:%S") + except Exception: + return None + + +def _bulk_classify_file(filename: str) -> str: + name = filename.lower() + if name.endswith(".rnh"): + return "log" + if name.endswith(".rnd"): + return "measurement" + if name.endswith(".mp3") or name.endswith(".wav") or name.endswith(".m4a"): + return "audio" + if name.endswith(".xlsx") or name.endswith(".xls") or name.endswith(".csv"): + return "data" + return "data" + + +# Files we skip entirely — already-converted outputs that don't need re-importing +_BULK_SKIP_EXTENSIONS = {".xlsx", ".xls"} + + +@router.post("/{project_id}/upload-all") +async def upload_all_project_data( + project_id: str, + request: Request, + db: Session = Depends(get_db), +): + """ + Bulk-import an entire structured data folder selected via webkitdirectory. + + Expected folder structure (flexible depth): + [date_folder]/[NRL_name]/[Auto_####]/ ← files here + -- OR -- + [NRL_name]/[Auto_####]/ ← files here (no date wrapper) + -- OR -- + [date_folder]/[NRL_name]/ ← files directly in NRL folder + + Each leaf folder group of .rnd/.rnh files becomes one MonitoringSession. + NRL folder names are matched case-insensitively to MonitoringLocation.name. + .mp3 files are stored as audio. .xlsx/.xls are skipped (already-converted). + Unmatched folders are reported but don't cause failure. + """ + form = await request.form() + + # Collect (relative_path, filename, bytes) for every uploaded file. + # The JS sends each file as "files" and its webkitRelativePath as "paths". + from collections import defaultdict + + uploaded_files = form.getlist("files") + uploaded_paths = form.getlist("paths") + + if not uploaded_files: + raise HTTPException(status_code=400, detail="No files received.") + + if len(uploaded_paths) != len(uploaded_files): + # Fallback: use bare filename if paths weren't sent + uploaded_paths = [f.filename for f in uploaded_files] + + project = db.query(Project).filter_by(id=project_id).first() + if not project: + raise HTTPException(status_code=404, detail="Project not found") + + # Load all sound monitoring locations for this project + locations = db.query(MonitoringLocation).filter_by( + project_id=project_id, + location_type="sound", + ).all() + + # Build a case-insensitive name → location map + loc_by_name: dict[str, MonitoringLocation] = { + loc.name.strip().lower(): loc for loc in locations + } + + def _normalize(s: str) -> str: + """Lowercase, strip spaces/hyphens/underscores for fuzzy comparison.""" + return s.lower().replace(" ", "").replace("-", "").replace("_", "") + + # Pre-build normalized keys for fuzzy matching + loc_by_normalized: dict[str, MonitoringLocation] = { + _normalize(loc.name): loc for loc in locations + } + + def _find_location_for_path(path: str): + """ + Walk path components from right and return first matching location. + Tries exact match first, then normalized (strips spaces/hyphens/underscores), + then checks if the location name *starts with* the normalized folder name. + e.g. folder "NRL 1" matches location "NRL1 - Test Location" + """ + components = path.replace("\\", "/").split("/") + for comp in reversed(components): + # Exact match + key = comp.strip().lower() + if key in loc_by_name: + return loc_by_name[key] + # Normalized match ("NRL 1" == "NRL1") + norm = _normalize(comp) + if norm in loc_by_normalized: + return loc_by_normalized[norm] + # Prefix match: location name starts with the folder component + # e.g. "NRL1" matches "NRL1 - Test Location" + for loc_norm, loc in loc_by_normalized.items(): + if loc_norm.startswith(norm) or norm.startswith(loc_norm): + return loc + return None + + def _session_group_key(parts: tuple) -> str: + """ + Determine the grouping key for a file path. + Files inside Auto_####/Auto_Leq/ or Auto_####/Auto_Lp_01/ are collapsed + up to their Auto_#### parent so they all land in the same session. + Only folder components are examined (not the filename, which is parts[-1]). + """ + # Only look at folder components — exclude the filename (last part) + folder_parts = parts[:-1] + auto_idx = None + for i, p in enumerate(folder_parts): + p_lower = p.lower() + if p_lower.startswith("auto_") and not p_lower.startswith("auto_leq") and not p_lower.startswith("auto_lp"): + auto_idx = i + if auto_idx is not None: + # Group key = everything up to and including Auto_#### + return "/".join(folder_parts[:auto_idx + 1]) + # Fallback: use the immediate parent folder + return "/".join(folder_parts) if folder_parts else "" + + # --- Group files by session key --- + groups: dict[str, list[tuple[str, bytes]]] = defaultdict(list) + + for rel_path, uf in zip(uploaded_paths, uploaded_files): + rel_path = rel_path.replace("\\", "/").strip("/") + parts = _pathlib.PurePosixPath(rel_path).parts + if not parts: + continue + fname = parts[-1] + # Skip already-converted Excel exports + if _pathlib.PurePosixPath(fname).suffix.lower() in _BULK_SKIP_EXTENSIONS: + continue + group_key = _session_group_key(parts) + data = await uf.read() + groups[group_key].append((fname, data)) + + # Aggregate by (location_id, date_label) so each Auto_#### group is one session + # key: (location_id or None, group_path) + session_results = [] + unmatched_paths = [] + total_files = 0 + total_sessions = 0 + + for group_path, file_list in sorted(groups.items()): + matched_loc = _find_location_for_path(group_path) + + if matched_loc is None: + unmatched_paths.append(group_path) + continue + + # Parse .rnh if present in this group + rnh_meta = {} + for fname, fbytes in file_list: + if fname.lower().endswith(".rnh"): + rnh_meta = _bulk_parse_rnh(fbytes) + break + + started_at = _bulk_parse_datetime(rnh_meta.get("start_time_str")) or datetime.utcnow() + stopped_at = _bulk_parse_datetime(rnh_meta.get("stop_time_str")) + duration_seconds = None + if started_at and stopped_at: + duration_seconds = int((stopped_at - started_at).total_seconds()) + + store_name = rnh_meta.get("store_name", "") + serial_number = rnh_meta.get("serial_number", "") + index_number = rnh_meta.get("index_number", "") + + # Detect device model from first RND file in this group (in-memory) + _bulk_device_model = None + for _fname, _fbytes in file_list: + if _fname.lower().endswith(".rnd"): + try: + import csv as _csv_dm, io as _io_dm + _text = _fbytes.decode("utf-8", errors="replace") + _reader = _csv_dm.DictReader(_io_dm.StringIO(_text)) + _first = next(_reader, None) + if _first and "LAeq" in _first: + _bulk_device_model = "NL-32" + # NL-43/NL-53 have no distinguishing marker vs each other + # at the format level; leave None for those. + except Exception: + pass + break + + session_id = str(uuid.uuid4()) + monitoring_session = MonitoringSession( + id=session_id, + project_id=project_id, + location_id=matched_loc.id, + unit_id=None, + session_type="sound", + started_at=started_at, + stopped_at=stopped_at, + duration_seconds=duration_seconds, + status="completed", + device_model=_bulk_device_model, + session_metadata=json.dumps({ + "source": "bulk_upload", + "group_path": group_path, + "store_name": store_name, + "serial_number": serial_number, + "index_number": index_number, + }), + ) + db.add(monitoring_session) + db.commit() + db.refresh(monitoring_session) + + # Write files + output_dir = _pathlib.Path("data/Projects") / project_id / session_id + output_dir.mkdir(parents=True, exist_ok=True) + + leq_count = 0 + lp_count = 0 + group_file_count = 0 + + for fname, fbytes in file_list: + fname_lower = fname.lower() + # For NL-43/NL-53 devices (not NL-32), skip Lp RND files — they are + # not needed for reports and only add noise. AU2/NL-32 files don't + # use this naming convention so they are unaffected. + if ( + fname_lower.endswith(".rnd") + and "_lp" in fname_lower + and _bulk_device_model != "NL-32" + ): + lp_count += 1 + continue + + file_type = _bulk_classify_file(fname) + if fname_lower.endswith(".rnd") and "_leq_" in fname_lower: + leq_count += 1 + + dest = output_dir / fname + dest.write_bytes(fbytes) + checksum = hashlib.sha256(fbytes).hexdigest() + rel_path = str(dest.relative_to("data")) + + data_file = DataFile( + id=str(uuid.uuid4()), + session_id=session_id, + file_path=rel_path, + file_type=file_type, + file_size_bytes=len(fbytes), + downloaded_at=datetime.utcnow(), + checksum=checksum, + file_metadata=json.dumps({ + "source": "bulk_upload", + "original_filename": fname, + "group_path": group_path, + "store_name": store_name, + }), + ) + db.add(data_file) + group_file_count += 1 + + db.commit() + total_files += group_file_count + total_sessions += 1 + + session_results.append({ + "location_name": matched_loc.name, + "location_id": matched_loc.id, + "session_id": session_id, + "group_path": group_path, + "files": group_file_count, + "leq_files": leq_count, + "lp_files": lp_count, + "store_name": store_name, + "started_at": started_at.isoformat() if started_at else None, + }) + + return { + "success": True, + "sessions_created": total_sessions, + "files_imported": total_files, + "unmatched_folders": unmatched_paths, + "sessions": session_results, + } + + @router.get("/types/list", response_class=HTMLResponse) async def get_project_types(request: Request, db: Session = Depends(get_db)): """ diff --git a/backend/routers/recurring_schedules.py b/backend/routers/recurring_schedules.py index 9a0289b..ee019a0 100644 --- a/backend/routers/recurring_schedules.py +++ b/backend/routers/recurring_schedules.py @@ -497,6 +497,9 @@ async def get_schedule_list_partial( """ Return HTML partial for schedule list. """ + project = db.query(Project).filter_by(id=project_id).first() + project_status = project.status if project else "active" + schedules = db.query(RecurringSchedule).filter_by( project_id=project_id ).order_by(RecurringSchedule.created_at.desc()).all() @@ -515,4 +518,5 @@ async def get_schedule_list_partial( "request": request, "project_id": project_id, "schedules": schedule_data, + "project_status": project_status, }) diff --git a/backend/routers/roster_rename.py b/backend/routers/roster_rename.py index c99082d..c8dbf19 100644 --- a/backend/routers/roster_rename.py +++ b/backend/routers/roster_rename.py @@ -92,15 +92,15 @@ async def rename_unit( except Exception as e: logger.warning(f"Could not update unit_assignments: {e}") - # Update recording_sessions table (if exists) + # Update monitoring_sessions table (if exists) try: - from backend.models import RecordingSession - db.query(RecordingSession).filter(RecordingSession.unit_id == old_id).update( + from backend.models import MonitoringSession + db.query(MonitoringSession).filter(MonitoringSession.unit_id == old_id).update( {"unit_id": new_id}, synchronize_session=False ) except Exception as e: - logger.warning(f"Could not update recording_sessions: {e}") + logger.warning(f"Could not update monitoring_sessions: {e}") # Commit all changes db.commit() diff --git a/backend/services/recurring_schedule_service.py b/backend/services/recurring_schedule_service.py index d19ce41..8ef3f17 100644 --- a/backend/services/recurring_schedule_service.py +++ b/backend/services/recurring_schedule_service.py @@ -15,7 +15,7 @@ from zoneinfo import ZoneInfo from sqlalchemy.orm import Session from sqlalchemy import and_ -from backend.models import RecurringSchedule, ScheduledAction, MonitoringLocation, UnitAssignment +from backend.models import RecurringSchedule, ScheduledAction, MonitoringLocation, UnitAssignment, Project logger = logging.getLogger(__name__) @@ -332,10 +332,12 @@ class RecurringScheduleService: ) actions.append(start_action) - # Create STOP action + # Create STOP action (stop_cycle handles download when include_download is True) stop_notes = json.dumps({ "schedule_name": schedule.name, "schedule_id": schedule.id, + "schedule_type": "weekly_calendar", + "include_download": schedule.include_download, }) stop_action = ScheduledAction( id=str(uuid.uuid4()), @@ -350,27 +352,6 @@ class RecurringScheduleService: ) actions.append(stop_action) - # Create DOWNLOAD action if enabled (1 minute after stop) - if schedule.include_download: - download_time = end_utc + timedelta(minutes=1) - download_notes = json.dumps({ - "schedule_name": schedule.name, - "schedule_id": schedule.id, - "schedule_type": "weekly_calendar", - }) - download_action = ScheduledAction( - id=str(uuid.uuid4()), - project_id=schedule.project_id, - location_id=schedule.location_id, - unit_id=unit_id, - action_type="download", - device_type=schedule.device_type, - scheduled_time=download_time, - execution_status="pending", - notes=download_notes, - ) - actions.append(download_action) - return actions def _generate_interval_actions( @@ -613,8 +594,16 @@ class RecurringScheduleService: return self.db.query(RecurringSchedule).filter_by(project_id=project_id).all() def get_enabled_schedules(self) -> List[RecurringSchedule]: - """Get all enabled recurring schedules.""" - return self.db.query(RecurringSchedule).filter_by(enabled=True).all() + """Get all enabled recurring schedules for projects that are not on hold or deleted.""" + active_project_ids = [ + p.id for p in self.db.query(Project.id).filter( + Project.status.notin_(["on_hold", "archived", "deleted"]) + ).all() + ] + return self.db.query(RecurringSchedule).filter( + RecurringSchedule.enabled == True, + RecurringSchedule.project_id.in_(active_project_ids), + ).all() def get_recurring_schedule_service(db: Session) -> RecurringScheduleService: diff --git a/backend/services/scheduler.py b/backend/services/scheduler.py index a056cb4..b782280 100644 --- a/backend/services/scheduler.py +++ b/backend/services/scheduler.py @@ -21,7 +21,7 @@ from sqlalchemy.orm import Session from sqlalchemy import and_ from backend.database import SessionLocal -from backend.models import ScheduledAction, RecordingSession, MonitoringLocation, Project, RecurringSchedule +from backend.models import ScheduledAction, MonitoringSession, MonitoringLocation, Project, RecurringSchedule from backend.services.device_controller import get_device_controller, DeviceControllerError from backend.services.alert_service import get_alert_service import uuid @@ -107,10 +107,19 @@ class SchedulerService: try: # Find pending actions that are due now = datetime.utcnow() + + # Only execute actions for active/completed projects (not on_hold, archived, or deleted) + active_project_ids = [ + p.id for p in db.query(Project.id).filter( + Project.status.notin_(["on_hold", "archived", "deleted"]) + ).all() + ] + pending_actions = db.query(ScheduledAction).filter( and_( ScheduledAction.execution_status == "pending", ScheduledAction.scheduled_time <= now, + ScheduledAction.project_id.in_(active_project_ids), ) ).order_by(ScheduledAction.scheduled_time).all() @@ -263,7 +272,7 @@ class SchedulerService: ) # Create recording session - session = RecordingSession( + session = MonitoringSession( id=str(uuid.uuid4()), project_id=action.project_id, location_id=action.location_id, @@ -295,9 +304,20 @@ class SchedulerService: stop_cycle handles: 1. Stop measurement 2. Enable FTP - 3. Download measurement folder - 4. Verify download + 3. Download measurement folder to SLMM local storage + + After stop_cycle, if download succeeded, this method fetches the ZIP + from SLMM and extracts it into Terra-View's project directory, creating + DataFile records for each file. """ + import hashlib + import io + import os + import zipfile + import httpx + from pathlib import Path + from backend.models import DataFile + # Parse notes for download preference include_download = True try: @@ -308,7 +328,7 @@ class SchedulerService: pass # Notes is plain text, not JSON # Execute the full stop cycle via device controller - # SLMM handles stop, FTP enable, and download + # SLMM handles stop, FTP enable, and download to SLMM-local storage cycle_response = await self.device_controller.stop_cycle( unit_id, action.device_type, @@ -316,11 +336,11 @@ class SchedulerService: ) # Find and update the active recording session - active_session = db.query(RecordingSession).filter( + active_session = db.query(MonitoringSession).filter( and_( - RecordingSession.location_id == action.location_id, - RecordingSession.unit_id == unit_id, - RecordingSession.status == "recording", + MonitoringSession.location_id == action.location_id, + MonitoringSession.unit_id == unit_id, + MonitoringSession.status == "recording", ) ).first() @@ -340,10 +360,81 @@ class SchedulerService: except json.JSONDecodeError: pass + db.commit() + + # If SLMM downloaded the folder successfully, fetch the ZIP from SLMM + # and extract it into Terra-View's project directory, creating DataFile records + files_created = 0 + if include_download and cycle_response.get("download_success") and active_session: + folder_name = cycle_response.get("downloaded_folder") # e.g. "Auto_0058" + remote_path = f"/NL-43/{folder_name}" + + try: + SLMM_BASE_URL = os.getenv("SLMM_BASE_URL", "http://localhost:8100") + async with httpx.AsyncClient(timeout=600.0) as client: + zip_response = await client.post( + f"{SLMM_BASE_URL}/api/nl43/{unit_id}/ftp/download-folder", + json={"remote_path": remote_path} + ) + + if zip_response.is_success and len(zip_response.content) > 22: + base_dir = Path(f"data/Projects/{action.project_id}/{active_session.id}/{folder_name}") + base_dir.mkdir(parents=True, exist_ok=True) + + file_type_map = { + '.wav': 'audio', '.mp3': 'audio', + '.csv': 'data', '.txt': 'data', '.json': 'data', '.dat': 'data', + '.rnd': 'data', '.rnh': 'data', + '.log': 'log', + '.zip': 'archive', + '.jpg': 'image', '.jpeg': 'image', '.png': 'image', + '.pdf': 'document', + } + + with zipfile.ZipFile(io.BytesIO(zip_response.content)) as zf: + for zip_info in zf.filelist: + if zip_info.is_dir(): + continue + file_data = zf.read(zip_info.filename) + file_path = base_dir / zip_info.filename + file_path.parent.mkdir(parents=True, exist_ok=True) + with open(file_path, 'wb') as f: + f.write(file_data) + checksum = hashlib.sha256(file_data).hexdigest() + ext = os.path.splitext(zip_info.filename)[1].lower() + data_file = DataFile( + id=str(uuid.uuid4()), + session_id=active_session.id, + file_path=str(file_path.relative_to("data")), + file_type=file_type_map.get(ext, 'data'), + file_size_bytes=len(file_data), + downloaded_at=datetime.utcnow(), + checksum=checksum, + file_metadata=json.dumps({ + "source": "stop_cycle", + "remote_path": remote_path, + "unit_id": unit_id, + "folder_name": folder_name, + "relative_path": zip_info.filename, + }), + ) + db.add(data_file) + files_created += 1 + + db.commit() + logger.info(f"Created {files_created} DataFile records for session {active_session.id} from {folder_name}") + else: + logger.warning(f"ZIP from SLMM for {folder_name} was empty or failed, skipping DataFile creation") + + except Exception as e: + logger.error(f"Failed to extract ZIP and create DataFile records for {folder_name}: {e}") + # Don't fail the stop action — the device was stopped successfully + return { "status": "stopped", "session_id": active_session.id if active_session else None, "cycle_response": cycle_response, + "files_created": files_created, } async def _execute_download( @@ -526,11 +617,11 @@ class SchedulerService: result["steps"]["download"] = {"success": False, "error": "Project or location not found"} # Close out the old recording session - active_session = db.query(RecordingSession).filter( + active_session = db.query(MonitoringSession).filter( and_( - RecordingSession.location_id == action.location_id, - RecordingSession.unit_id == unit_id, - RecordingSession.status == "recording", + MonitoringSession.location_id == action.location_id, + MonitoringSession.unit_id == unit_id, + MonitoringSession.status == "recording", ) ).first() @@ -557,7 +648,7 @@ class SchedulerService: result["steps"]["start"] = {"success": True, "response": cycle_response} # Create new recording session - new_session = RecordingSession( + new_session = MonitoringSession( id=str(uuid.uuid4()), project_id=action.project_id, location_id=action.location_id, diff --git a/backend/services/slmm_client.py b/backend/services/slmm_client.py index b6b683e..b1dcae1 100644 --- a/backend/services/slmm_client.py +++ b/backend/services/slmm_client.py @@ -659,7 +659,7 @@ class SLMMClient: # Format as Auto_XXXX folder name folder_name = f"Auto_{index_number:04d}" - remote_path = f"/NL43_DATA/{folder_name}" + remote_path = f"/NL-43/{folder_name}" # Download the folder result = await self.download_folder(unit_id, remote_path) diff --git a/backend/templates_config.py b/backend/templates_config.py index c0e4212..453b284 100644 --- a/backend/templates_config.py +++ b/backend/templates_config.py @@ -5,6 +5,7 @@ All routers should import `templates` from this module to get consistent filter and global function registration. """ +import json as _json from fastapi.templating import Jinja2Templates # Import timezone utilities @@ -32,8 +33,38 @@ def jinja_timezone_abbr(): # Create templates instance templates = Jinja2Templates(directory="templates") +def jinja_local_date(dt, fmt="%m-%d-%y"): + """Jinja filter: format a UTC datetime as a local date string (e.g. 02-19-26).""" + return format_local_datetime(dt, fmt) + + +def jinja_fromjson(s): + """Jinja filter: parse a JSON string into a dict (returns {} on failure).""" + if not s: + return {} + try: + return _json.loads(s) + except Exception: + return {} + + +def jinja_same_date(dt1, dt2) -> bool: + """Jinja global: True if two datetimes fall on the same local date.""" + if not dt1 or not dt2: + return False + try: + d1 = format_local_datetime(dt1, "%Y-%m-%d") + d2 = format_local_datetime(dt2, "%Y-%m-%d") + return d1 == d2 + except Exception: + return False + + # Register Jinja filters and globals templates.env.filters["local_datetime"] = jinja_local_datetime templates.env.filters["local_time"] = jinja_local_time +templates.env.filters["local_date"] = jinja_local_date +templates.env.filters["fromjson"] = jinja_fromjson templates.env.globals["timezone_abbr"] = jinja_timezone_abbr templates.env.globals["get_user_timezone"] = get_user_timezone +templates.env.globals["same_date"] = jinja_same_date diff --git a/data-dev/backups/snapshot_20251216_201738.db.meta.json b/data-dev/backups/snapshot_20251216_201738.db.meta.json deleted file mode 100644 index 1d25dc1..0000000 --- a/data-dev/backups/snapshot_20251216_201738.db.meta.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "filename": "snapshot_20251216_201738.db", - "created_at": "20251216_201738", - "created_at_iso": "2025-12-16T20:17:38.638982", - "description": "Auto-backup before restore", - "size_bytes": 57344, - "size_mb": 0.05, - "original_db_size_bytes": 57344, - "type": "manual" -} \ No newline at end of file diff --git a/data-dev/backups/snapshot_uploaded_20251216_201732.db.meta.json b/data-dev/backups/snapshot_uploaded_20251216_201732.db.meta.json deleted file mode 100644 index 257cc02..0000000 --- a/data-dev/backups/snapshot_uploaded_20251216_201732.db.meta.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "filename": "snapshot_uploaded_20251216_201732.db", - "created_at": "20251216_201732", - "created_at_iso": "2025-12-16T20:17:32.574205", - "description": "Uploaded: snapshot_20251216_200259.db", - "size_bytes": 77824, - "size_mb": 0.07, - "type": "uploaded" -} \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml index e97357e..3a8964c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -24,30 +24,6 @@ services: retries: 3 start_period: 40s - # --- TERRA-VIEW DEVELOPMENT --- - terra-view-dev: - build: . - container_name: terra-view-dev - ports: - - "1001:8001" - volumes: - - ./data-dev:/app/data - environment: - - PYTHONUNBUFFERED=1 - - ENVIRONMENT=development - - SLMM_BASE_URL=http://host.docker.internal:8100 - restart: unless-stopped - depends_on: - - slmm - extra_hosts: - - "host.docker.internal:host-gateway" - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8001/health"] - interval: 30s - timeout: 10s - retries: 3 - start_period: 40s - # --- SLMM (Sound Level Meter Manager) --- slmm: build: @@ -61,6 +37,8 @@ services: - PYTHONUNBUFFERED=1 - PORT=8100 - CORS_ORIGINS=* + - TCP_IDLE_TTL=-1 + - TCP_MAX_AGE=-1 restart: unless-stopped healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8100/health"] @@ -71,4 +49,3 @@ services: volumes: data: - data-dev: diff --git a/rebuild-dev.sh b/rebuild-dev.sh new file mode 100755 index 0000000..fc79362 --- /dev/null +++ b/rebuild-dev.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Dev rebuild script — increments build number, rebuilds and restarts terra-view +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +BUILD_FILE="$SCRIPT_DIR/build_number.txt" + +# Read and increment build number +BUILD_NUMBER=$(cat "$BUILD_FILE" 2>/dev/null || echo "0") +BUILD_NUMBER=$((BUILD_NUMBER + 1)) +echo "$BUILD_NUMBER" > "$BUILD_FILE" + +echo "Building terra-view dev (build #$BUILD_NUMBER)..." + +cd "$SCRIPT_DIR" +docker compose build --build-arg BUILD_NUMBER="$BUILD_NUMBER" terra-view +docker compose up -d terra-view + +echo "Done — terra-view v0.6.1-$BUILD_NUMBER is running on :1001" diff --git a/rebuild-prod.sh b/rebuild-prod.sh new file mode 100644 index 0000000..8226269 --- /dev/null +++ b/rebuild-prod.sh @@ -0,0 +1,12 @@ +#!/bin/bash +# Production rebuild script — rebuilds and restarts terra-view on :8001 +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +cd "$SCRIPT_DIR" + +echo "Building terra-view production..." +docker compose -f docker-compose.yml build terra-view +docker compose -f docker-compose.yml up -d terra-view + +echo "Done — terra-view production is running on :8001" diff --git a/scripts/rename_unit.py b/scripts/rename_unit.py index 915e7fd..68d4dc6 100644 --- a/scripts/rename_unit.py +++ b/scripts/rename_unit.py @@ -90,14 +90,14 @@ def rename_unit(old_id: str, new_id: str): except Exception: pass # Table may not exist - # Update recording_sessions table (if exists) + # Update monitoring_sessions table (if exists) try: result = session.execute( - text("UPDATE recording_sessions SET unit_id = :new_id WHERE unit_id = :old_id"), + text("UPDATE monitoring_sessions SET unit_id = :new_id WHERE unit_id = :old_id"), {"new_id": new_id, "old_id": old_id} ) if result.rowcount > 0: - print(f" ✓ Updated recording_sessions ({result.rowcount} rows)") + print(f" ✓ Updated monitoring_sessions ({result.rowcount} rows)") except Exception: pass # Table may not exist diff --git a/templates/combined_report_preview.html b/templates/combined_report_preview.html new file mode 100644 index 0000000..4de3bb6 --- /dev/null +++ b/templates/combined_report_preview.html @@ -0,0 +1,315 @@ +{% extends "base.html" %} + +{% block title %}Combined Report Preview - {{ project.name }}{% endblock %} + +{% block content %} + + + + +
+ {{ location_data|length }} location{{ 's' if location_data|length != 1 else '' }} + {% if time_filter_desc %} | {{ time_filter_desc }}{% endif %} + | {{ total_rows }} total row{{ 's' if total_rows != 1 else '' }} +
+{{ project.name }}
++ 0 session(s) selected — each selected session becomes one sheet in the ZIP. + Change the period type per session to control how stats are bucketed (Day vs Night). +
+ + {% if locations %} + {% for loc in locations %} + {% set loc_name = loc.name %} + {% set sessions = loc.sessions %} +No monitoring sessions found.
+Upload data files to create sessions first.
+Active Session
{% if active_session %} - Recording + Monitoring {% else %} Idle {% endif %}
+ {% else %} +Mode
++ Offline / Manual +
+ {% endif %}