feat: add slm model schemas, please run migration on prod db

Feat: add complete combined sound report creation tool (wizard), add new slm schema for each model

feat: update project header link for combined report wizard

feat: add migration script to backfill device_model in monitoring_sessions

feat: implement combined report preview template with spreadsheet functionality

feat: create combined report wizard template for report generation.
This commit is contained in:
2026-03-05 20:43:22 +00:00
parent 3637cf5af8
commit ef8c046f31
10 changed files with 1402 additions and 4 deletions

View File

@@ -31,6 +31,10 @@ ENVIRONMENT = os.getenv("ENVIRONMENT", "production")
# Initialize FastAPI app
VERSION = "0.6.1"
if ENVIRONMENT == "development":
_build = os.getenv("BUILD_NUMBER", "0")
if _build and _build != "0":
VERSION = f"{VERSION}-{_build}"
app = FastAPI(
title="Seismo Fleet Manager",
description="Backend API for managing seismograph fleet status",

View File

@@ -0,0 +1,127 @@
#!/usr/bin/env python3
"""
Migration: Add device_model column to monitoring_sessions table.
Records which physical SLM model produced each session's data (e.g. "NL-43",
"NL-53", "NL-32"). Used by report generation to apply the correct parsing
logic without re-opening files to detect format.
Run once inside the Docker container:
docker exec terra-view python3 backend/migrate_add_session_device_model.py
Backfill strategy for existing rows:
1. If session.unit_id is set, use roster.slm_model for that unit.
2. Else, peek at the first .rnd file in the session: presence of the 'LAeq'
column header identifies AU2 / NL-32 format.
Sessions where neither hint is available remain NULL — the file-content
fallback in report code handles them transparently.
"""
import csv
import io
from pathlib import Path
DB_PATH = Path("data/seismo_fleet.db")
def _peek_first_row(abs_path: Path) -> dict:
"""Read only the header + first data row of an RND file. Very cheap."""
try:
with open(abs_path, "r", encoding="utf-8", errors="replace") as f:
reader = csv.DictReader(f)
return next(reader, None) or {}
except Exception:
return {}
def _detect_model_from_rnd(abs_path: Path) -> str | None:
"""Return 'NL-32' if file uses AU2 column format, else None."""
row = _peek_first_row(abs_path)
if "LAeq" in row:
return "NL-32"
return None
def migrate():
import sqlite3
if not DB_PATH.exists():
print(f"Database not found at {DB_PATH}. Are you running from /home/serversdown/terra-view?")
return
conn = sqlite3.connect(DB_PATH)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
# ── 1. Add column (idempotent) ───────────────────────────────────────────
cur.execute("PRAGMA table_info(monitoring_sessions)")
existing_cols = {row["name"] for row in cur.fetchall()}
if "device_model" not in existing_cols:
cur.execute("ALTER TABLE monitoring_sessions ADD COLUMN device_model TEXT")
conn.commit()
print("✓ Added column device_model to monitoring_sessions")
else:
print("○ Column device_model already exists — skipping ALTER TABLE")
# ── 2. Backfill existing NULL rows ───────────────────────────────────────
cur.execute(
"SELECT id, unit_id FROM monitoring_sessions WHERE device_model IS NULL"
)
sessions = cur.fetchall()
print(f"Backfilling {len(sessions)} session(s) with device_model=NULL...")
updated = skipped = 0
for row in sessions:
session_id = row["id"]
unit_id = row["unit_id"]
device_model = None
# Strategy A: look up unit's slm_model from the roster
if unit_id:
cur.execute(
"SELECT slm_model FROM roster WHERE id = ?", (unit_id,)
)
unit_row = cur.fetchone()
if unit_row and unit_row["slm_model"]:
device_model = unit_row["slm_model"]
# Strategy B: detect from first .rnd file in the session
if device_model is None:
cur.execute(
"""SELECT file_path FROM data_files
WHERE session_id = ?
AND lower(file_path) LIKE '%.rnd'
LIMIT 1""",
(session_id,),
)
file_row = cur.fetchone()
if file_row:
abs_path = Path("data") / file_row["file_path"]
device_model = _detect_model_from_rnd(abs_path)
# None here means NL-43/NL-53 format (or unreadable file) —
# leave as NULL so the existing fallback applies.
if device_model:
cur.execute(
"UPDATE monitoring_sessions SET device_model = ? WHERE id = ?",
(device_model, session_id),
)
updated += 1
else:
skipped += 1
conn.commit()
conn.close()
print(f"✓ Backfilled {updated} session(s) with a device_model.")
if skipped:
print(
f" {skipped} session(s) left as NULL "
"(no unit link and no AU2 file hint — NL-43/NL-53 or unknown; "
"file-content detection applies at report time)."
)
print("Migration complete.")
if __name__ == "__main__":
migrate()

View File

@@ -257,6 +257,10 @@ class MonitoringSession(Base):
location_id = Column(String, nullable=False, index=True) # FK to MonitoringLocation.id
unit_id = Column(String, nullable=True, index=True) # FK to RosterUnit.id (nullable for offline uploads)
# Physical device model that produced this session's data (e.g. "NL-43", "NL-53", "NL-32").
# Null for older records; report code falls back to file-content detection when null.
device_model = Column(String, nullable=True)
session_type = Column(String, nullable=False) # sound | vibration
started_at = Column(DateTime, nullable=False)
stopped_at = Column(DateTime, nullable=True)

View File

@@ -112,6 +112,232 @@ def _is_leq_file(file_path: str, rows: list[dict]) -> bool:
return False
def _filter_rnd_rows(
rows: list[dict],
filter_start_time: str,
filter_end_time: str,
filter_start_date: str,
filter_end_date: str,
) -> list[dict]:
"""Filter RND data rows by time window and/or date range. Handles overnight ranges."""
if not filter_start_time and not filter_end_time and not filter_start_date and not filter_end_date:
return rows
filtered = []
start_hour = start_minute = end_hour = end_minute = None
if filter_start_time:
try:
parts = filter_start_time.split(':')
start_hour = int(parts[0])
start_minute = int(parts[1]) if len(parts) > 1 else 0
except (ValueError, IndexError):
pass
if filter_end_time:
try:
parts = filter_end_time.split(':')
end_hour = int(parts[0])
end_minute = int(parts[1]) if len(parts) > 1 else 0
except (ValueError, IndexError):
pass
start_dt = end_dt = None
if filter_start_date:
try:
start_dt = datetime.strptime(filter_start_date, '%Y-%m-%d').date()
except ValueError:
pass
if filter_end_date:
try:
end_dt = datetime.strptime(filter_end_date, '%Y-%m-%d').date()
except ValueError:
pass
for row in rows:
start_time_str = row.get('Start Time', '')
if not start_time_str:
continue
try:
dt = datetime.strptime(start_time_str, '%Y/%m/%d %H:%M:%S')
row_date = dt.date()
row_hour = dt.hour
row_minute = dt.minute
if start_dt and row_date < start_dt:
continue
if end_dt and row_date > end_dt:
continue
if start_hour is not None and end_hour is not None:
row_time_minutes = row_hour * 60 + row_minute
start_time_minutes = start_hour * 60 + start_minute
end_time_minutes = end_hour * 60 + end_minute
if start_time_minutes > end_time_minutes:
# Overnight range (e.g., 19:00-07:00)
if not (row_time_minutes >= start_time_minutes or row_time_minutes < end_time_minutes):
continue
else:
# Same-day range (e.g., 07:00-19:00)
if not (start_time_minutes <= row_time_minutes < end_time_minutes):
continue
filtered.append(row)
except ValueError:
filtered.append(row)
return filtered
def _read_rnd_file_rows(file_path_str: str) -> list[dict]:
"""Read and parse a single RND CSV file into a list of cleaned row dicts."""
import csv as _csv
from pathlib import Path as _Path
file_path = _Path("data") / file_path_str
if not file_path.exists():
return []
try:
with open(file_path, 'r', encoding='utf-8', errors='replace') as f:
content = f.read()
rows = []
reader = _csv.DictReader(io.StringIO(content))
for row in reader:
cleaned_row = {}
for key, value in row.items():
if key:
cleaned_key = key.strip()
cleaned_value = value.strip() if value else ''
if cleaned_value and cleaned_value not in ['-.-', '-', '']:
try:
cleaned_value = float(cleaned_value)
except ValueError:
pass
elif cleaned_value in ['-.-', '-']:
cleaned_value = None
cleaned_row[cleaned_key] = cleaned_value
rows.append(cleaned_row)
return rows
except Exception:
return []
def _build_combined_location_data(
project_id: str,
db,
start_time: str = "",
end_time: str = "",
start_date: str = "",
end_date: str = "",
enabled_locations: list = None,
) -> dict:
"""
Read all Leq RND files for a project, apply time/date filters, and return
per-location spreadsheet data ready for the wizard preview.
Returns:
{
"project": Project,
"location_data": [
{
"location_name": str,
"raw_count": int,
"filtered_count": int,
"spreadsheet_data": [[idx, date, time, lmax, ln1, ln2, ""], ...]
},
...
]
}
Raises HTTPException 404 if project not found or no Leq files exist.
"""
project = db.query(Project).filter_by(id=project_id).first()
if not project:
raise HTTPException(status_code=404, detail="Project not found")
sessions = db.query(MonitoringSession).filter_by(project_id=project_id).all()
# Group Leq files by location
location_files: dict = {}
for session in sessions:
files = db.query(DataFile).filter_by(session_id=session.id).all()
for file in files:
if not file.file_path or not file.file_path.lower().endswith('.rnd'):
continue
from pathlib import Path as _Path
abs_path = _Path("data") / file.file_path
peek = _peek_rnd_headers(abs_path)
if not _is_leq_file(file.file_path, peek):
continue
location = db.query(MonitoringLocation).filter_by(id=session.location_id).first() if session.location_id else None
loc_name = location.name if location else f"Session {session.id[:8]}"
if loc_name not in location_files:
location_files[loc_name] = []
location_files[loc_name].append(file)
if not location_files:
raise HTTPException(status_code=404, detail="No Leq measurement files found in project.")
# Filter by enabled_locations if specified
if enabled_locations:
location_files = {k: v for k, v in location_files.items() if k in enabled_locations}
if not location_files:
raise HTTPException(status_code=404, detail="None of the selected locations have Leq files.")
location_data = []
for loc_name, files in sorted(location_files.items()):
all_rows = []
for file in files:
rows = _read_rnd_file_rows(file.file_path)
rows, _ = _normalize_rnd_rows(rows)
all_rows.extend(rows)
if not all_rows:
continue
all_rows.sort(key=lambda r: r.get('Start Time', ''))
raw_count = len(all_rows)
filtered_rows = _filter_rnd_rows(all_rows, start_time, end_time, start_date, end_date)
spreadsheet_data = []
for idx, row in enumerate(filtered_rows, 1):
start_time_str = row.get('Start Time', '')
date_str = time_str = ''
if start_time_str:
try:
dt = datetime.strptime(start_time_str, '%Y/%m/%d %H:%M:%S')
date_str = dt.strftime('%Y-%m-%d')
time_str = dt.strftime('%H:%M:%S')
except ValueError:
date_str = start_time_str
lmax = row.get('Lmax(Main)', '')
ln1 = row.get('LN1(Main)', '')
ln2 = row.get('LN2(Main)', '')
spreadsheet_data.append([
idx,
date_str,
time_str,
lmax if lmax else '',
ln1 if ln1 else '',
ln2 if ln2 else '',
'',
])
location_data.append({
"location_name": loc_name,
"raw_count": raw_count,
"filtered_count": len(filtered_rows),
"spreadsheet_data": spreadsheet_data,
})
return {"project": project, "location_data": location_data}
# ============================================================================
# Project List & Overview
# ============================================================================
@@ -979,6 +1205,7 @@ async def ftp_download_to_server(
# If no active session, create one
if not session:
_ftp_unit = db.query(RosterUnit).filter_by(id=unit_id).first()
session = MonitoringSession(
id=str(uuid.uuid4()),
project_id=project_id,
@@ -988,6 +1215,7 @@ async def ftp_download_to_server(
status="completed",
started_at=datetime.utcnow(),
stopped_at=datetime.utcnow(),
device_model=_ftp_unit.slm_model if _ftp_unit else None,
session_metadata='{"source": "ftp_download", "note": "Auto-created for FTP download"}'
)
db.add(session)
@@ -1144,6 +1372,7 @@ async def ftp_download_folder_to_server(
# If no active session, create one
if not session:
_ftp_unit = db.query(RosterUnit).filter_by(id=unit_id).first()
session = MonitoringSession(
id=str(uuid.uuid4()),
project_id=project_id,
@@ -1153,6 +1382,7 @@ async def ftp_download_folder_to_server(
status="completed",
started_at=datetime.utcnow(),
stopped_at=datetime.utcnow(),
device_model=_ftp_unit.slm_model if _ftp_unit else None,
session_metadata='{"source": "ftp_folder_download", "note": "Auto-created for FTP folder download"}'
)
db.add(session)
@@ -2618,9 +2848,12 @@ async def generate_combined_excel_report(
for session in sessions:
files = db.query(DataFile).filter_by(session_id=session.id).all()
for file in files:
# Only include Leq files for reports (contain '_Leq_' in path)
is_leq_file = file.file_path and '_Leq_' in file.file_path and file.file_path.endswith('.rnd')
if is_leq_file:
if not file.file_path or not file.file_path.lower().endswith('.rnd'):
continue
from pathlib import Path as _Path
abs_path = _Path("data") / file.file_path
peek = _peek_rnd_headers(abs_path)
if _is_leq_file(file.file_path, peek):
location = db.query(MonitoringLocation).filter_by(id=session.location_id).first() if session.location_id else None
location_name = location.name if location else f"Session {session.id[:8]}"
@@ -2852,6 +3085,309 @@ async def generate_combined_excel_report(
)
# ============================================================================
# Combined Report Wizard — config page, preview page, and generate endpoint
# ============================================================================
@router.get("/{project_id}/combined-report-wizard", response_class=HTMLResponse)
async def combined_report_wizard(
request: Request,
project_id: str,
db: Session = Depends(get_db),
):
"""Configuration page for the combined multi-location report wizard."""
from backend.models import ReportTemplate
project = db.query(Project).filter_by(id=project_id).first()
if not project:
raise HTTPException(status_code=404, detail="Project not found")
sessions = db.query(MonitoringSession).filter_by(project_id=project_id).all()
# Build location list with Leq file counts (no filtering)
location_file_counts: dict = {}
for session in sessions:
files = db.query(DataFile).filter_by(session_id=session.id).all()
for file in files:
if not file.file_path or not file.file_path.lower().endswith('.rnd'):
continue
from pathlib import Path as _Path
abs_path = _Path("data") / file.file_path
peek = _peek_rnd_headers(abs_path)
if not _is_leq_file(file.file_path, peek):
continue
location = db.query(MonitoringLocation).filter_by(id=session.location_id).first() if session.location_id else None
loc_name = location.name if location else f"Session {session.id[:8]}"
location_file_counts[loc_name] = location_file_counts.get(loc_name, 0) + 1
locations = [
{"name": name, "file_count": count}
for name, count in sorted(location_file_counts.items())
]
report_templates = db.query(ReportTemplate).all()
return templates.TemplateResponse("combined_report_wizard.html", {
"request": request,
"project": project,
"project_id": project_id,
"locations": locations,
"report_templates": report_templates,
})
@router.get("/{project_id}/combined-report-preview", response_class=HTMLResponse)
async def combined_report_preview(
request: Request,
project_id: str,
report_title: str = Query("Background Noise Study"),
project_name: str = Query(""),
client_name: str = Query(""),
start_time: str = Query(""),
end_time: str = Query(""),
start_date: str = Query(""),
end_date: str = Query(""),
enabled_locations: str = Query(""),
db: Session = Depends(get_db),
):
"""Preview and edit combined report data before generating the Excel file."""
enabled_list = [loc.strip() for loc in enabled_locations.split(',') if loc.strip()] if enabled_locations else None
result = _build_combined_location_data(
project_id, db,
start_time=start_time,
end_time=end_time,
start_date=start_date,
end_date=end_date,
enabled_locations=enabled_list,
)
project = result["project"]
location_data = result["location_data"]
total_rows = sum(loc["filtered_count"] for loc in location_data)
final_project_name = project_name if project_name else project.name
# Build time filter display string
time_filter_desc = ""
if start_time and end_time:
time_filter_desc = f"{start_time} {end_time}"
elif start_time or end_time:
time_filter_desc = f"{start_time or ''} {end_time or ''}"
return templates.TemplateResponse("combined_report_preview.html", {
"request": request,
"project": project,
"project_id": project_id,
"report_title": report_title,
"project_name": final_project_name,
"client_name": client_name,
"start_time": start_time,
"end_time": end_time,
"start_date": start_date,
"end_date": end_date,
"time_filter_desc": time_filter_desc,
"location_data": location_data,
"locations_json": json.dumps(location_data),
"total_rows": total_rows,
})
@router.post("/{project_id}/generate-combined-from-preview")
async def generate_combined_from_preview(
project_id: str,
data: dict,
db: Session = Depends(get_db),
):
"""Generate combined Excel report from wizard-edited spreadsheet data."""
try:
import openpyxl
from openpyxl.chart import LineChart, Reference
from openpyxl.styles import Font, Alignment, Border, Side, PatternFill
from openpyxl.utils import get_column_letter
except ImportError:
raise HTTPException(status_code=500, detail="openpyxl is not installed. Run: pip install openpyxl")
project = db.query(Project).filter_by(id=project_id).first()
if not project:
raise HTTPException(status_code=404, detail="Project not found")
report_title = data.get("report_title", "Background Noise Study")
project_name = data.get("project_name", project.name)
client_name = data.get("client_name", "")
locations = data.get("locations", [])
if not locations:
raise HTTPException(status_code=400, detail="No location data provided")
# Styles
title_font = Font(name='Arial', bold=True, size=12)
header_font = Font(name='Arial', bold=True, size=10)
data_font = Font(name='Arial', size=10)
thin_border = Border(
left=Side(style='thin'), right=Side(style='thin'),
top=Side(style='thin'), bottom=Side(style='thin')
)
header_fill = PatternFill(start_color="DAEEF3", end_color="DAEEF3", fill_type="solid")
center_align = Alignment(horizontal='center', vertical='center')
wb = openpyxl.Workbook()
wb.remove(wb.active)
all_location_summaries = []
for loc_info in locations:
loc_name = loc_info.get("location_name", "Unknown")
rows = loc_info.get("spreadsheet_data", [])
if not rows:
continue
safe_sheet_name = "".join(c for c in loc_name if c.isalnum() or c in (' ', '-', '_'))[:31]
ws = wb.create_sheet(title=safe_sheet_name)
# Title row
final_title = f"{report_title} - {project_name}"
ws['A1'] = final_title
ws['A1'].font = title_font
ws['A1'].alignment = center_align
ws.merge_cells('A1:G1')
ws.row_dimensions[1].height = 20
# Client row (row 2) if provided
if client_name:
ws['A2'] = client_name
ws['A2'].font = Font(name='Arial', italic=True, size=10)
ws['A2'].alignment = center_align
ws.merge_cells('A2:G2')
# Location row
ws['A3'] = loc_name
ws['A3'].font = Font(name='Arial', bold=True, size=12)
ws['A3'].alignment = center_align
ws.merge_cells('A3:G3')
ws.row_dimensions[3].height = 20
# Column headers at row 7
headers = ['Test Increment #', 'Date', 'Time', 'LAmax (dBA)', 'LA01 (dBA)', 'LA10 (dBA)', 'Comments']
for col, header in enumerate(headers, 1):
cell = ws.cell(row=7, column=col, value=header)
cell.font = header_font
cell.border = thin_border
cell.fill = header_fill
cell.alignment = center_align
ws.row_dimensions[7].height = 16
column_widths = [12, 11, 9, 11, 11, 11, 20]
for i, width in enumerate(column_widths, 1):
ws.column_dimensions[get_column_letter(i)].width = width
# Data rows starting at row 8
data_start_row = 8
lmax_vals = []
ln1_vals = []
ln2_vals = []
for row_idx, row in enumerate(rows):
data_row = data_start_row + row_idx
# row is [test#, date, time, lmax, ln1, ln2, comment]
test_num = row[0] if len(row) > 0 else row_idx + 1
date_val = row[1] if len(row) > 1 else ''
time_val = row[2] if len(row) > 2 else ''
lmax = row[3] if len(row) > 3 else ''
ln1 = row[4] if len(row) > 4 else ''
ln2 = row[5] if len(row) > 5 else ''
comment = row[6] if len(row) > 6 else ''
ws.cell(row=data_row, column=1, value=test_num).border = thin_border
ws.cell(row=data_row, column=2, value=date_val).border = thin_border
ws.cell(row=data_row, column=3, value=time_val).border = thin_border
ws.cell(row=data_row, column=4, value=lmax if lmax != '' else None).border = thin_border
ws.cell(row=data_row, column=5, value=ln1 if ln1 != '' else None).border = thin_border
ws.cell(row=data_row, column=6, value=ln2 if ln2 != '' else None).border = thin_border
ws.cell(row=data_row, column=7, value=comment).border = thin_border
if isinstance(lmax, (int, float)):
lmax_vals.append(lmax)
if isinstance(ln1, (int, float)):
ln1_vals.append(ln1)
if isinstance(ln2, (int, float)):
ln2_vals.append(ln2)
data_end_row = data_start_row + len(rows) - 1
# Line chart
chart = LineChart()
chart.title = loc_name
chart.style = 10
chart.y_axis.title = "Sound Level (dBA)"
chart.x_axis.title = "Time"
chart.height = 18
chart.width = 22
data_ref = Reference(ws, min_col=4, min_row=7, max_col=6, max_row=data_end_row)
categories = Reference(ws, min_col=3, min_row=data_start_row, max_row=data_end_row)
chart.add_data(data_ref, titles_from_data=True)
chart.set_categories(categories)
if len(chart.series) >= 3:
chart.series[0].graphicalProperties.line.solidFill = "FF0000"
chart.series[1].graphicalProperties.line.solidFill = "00B050"
chart.series[2].graphicalProperties.line.solidFill = "0070C0"
ws.add_chart(chart, "H3")
from openpyxl.worksheet.properties import PageSetupProperties
ws.sheet_properties.pageSetUpPr = PageSetupProperties(fitToPage=True)
ws.page_setup.orientation = 'landscape'
ws.page_setup.fitToWidth = 1
ws.page_setup.fitToHeight = 0
all_location_summaries.append({
'location': loc_name,
'samples': len(rows),
'lmax_avg': round(sum(lmax_vals) / len(lmax_vals), 1) if lmax_vals else None,
'ln1_avg': round(sum(ln1_vals) / len(ln1_vals), 1) if ln1_vals else None,
'ln2_avg': round(sum(ln2_vals) / len(ln2_vals), 1) if ln2_vals else None,
})
# Summary sheet
summary_ws = wb.create_sheet(title="Summary", index=0)
summary_ws['A1'] = f"{report_title} - {project_name} - Summary"
summary_ws['A1'].font = title_font
summary_ws.merge_cells('A1:E1')
summary_headers = ['Location', 'Samples', 'LAmax Avg', 'LA01 Avg', 'LA10 Avg']
for col, header in enumerate(summary_headers, 1):
cell = summary_ws.cell(row=3, column=col, value=header)
cell.font = header_font
cell.fill = header_fill
cell.border = thin_border
for i, width in enumerate([30, 10, 12, 12, 12], 1):
summary_ws.column_dimensions[get_column_letter(i)].width = width
for idx, loc_summary in enumerate(all_location_summaries, 4):
summary_ws.cell(row=idx, column=1, value=loc_summary['location']).border = thin_border
summary_ws.cell(row=idx, column=2, value=loc_summary['samples']).border = thin_border
summary_ws.cell(row=idx, column=3, value=loc_summary['lmax_avg'] or '-').border = thin_border
summary_ws.cell(row=idx, column=4, value=loc_summary['ln1_avg'] or '-').border = thin_border
summary_ws.cell(row=idx, column=5, value=loc_summary['ln2_avg'] or '-').border = thin_border
output = io.BytesIO()
wb.save(output)
output.seek(0)
project_name_clean = "".join(c for c in project_name if c.isalnum() or c in ('_', '-', ' ')).strip()
filename = f"{project_name_clean}_combined_report.xlsx".replace(' ', '_')
return StreamingResponse(
output,
media_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
headers={"Content-Disposition": f'attachment; filename="{filename}"'}
)
# ============================================================================
# Project-level bulk upload (entire date-folder structure)
# ============================================================================
@@ -3062,6 +3598,23 @@ async def upload_all_project_data(
serial_number = rnh_meta.get("serial_number", "")
index_number = rnh_meta.get("index_number", "")
# Detect device model from first RND file in this group (in-memory)
_bulk_device_model = None
for _fname, _fbytes in file_list:
if _fname.lower().endswith(".rnd"):
try:
import csv as _csv_dm, io as _io_dm
_text = _fbytes.decode("utf-8", errors="replace")
_reader = _csv_dm.DictReader(_io_dm.StringIO(_text))
_first = next(_reader, None)
if _first and "LAeq" in _first:
_bulk_device_model = "NL-32"
# NL-43/NL-53 have no distinguishing marker vs each other
# at the format level; leave None for those.
except Exception:
pass
break
session_id = str(uuid.uuid4())
monitoring_session = MonitoringSession(
id=session_id,
@@ -3073,6 +3626,7 @@ async def upload_all_project_data(
stopped_at=stopped_at,
duration_seconds=duration_seconds,
status="completed",
device_model=_bulk_device_model,
session_metadata=json.dumps({
"source": "bulk_upload",
"group_path": group_path,