Add:
- db cache dump on diagnostics request. - individual device logs, db and files. -Device logs api endpoints and diagnostics UI. Fix: - slmm standalone now uses local TZ (was UTC only before) - fixed measurement start time logic.
This commit is contained in:
@@ -15,7 +15,8 @@ from sqlalchemy.orm import Session
|
||||
|
||||
from app.database import SessionLocal
|
||||
from app.models import NL43Config, NL43Status
|
||||
from app.services import NL43Client, persist_snapshot
|
||||
from app.services import NL43Client, persist_snapshot, sync_measurement_start_time_from_ftp
|
||||
from app.device_logger import log_device_event, cleanup_old_logs
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -36,6 +37,7 @@ class BackgroundPoller:
|
||||
self._task: Optional[asyncio.Task] = None
|
||||
self._running = False
|
||||
self._logger = logger
|
||||
self._last_cleanup = None # Track last log cleanup time
|
||||
|
||||
async def start(self):
|
||||
"""Start the background polling task."""
|
||||
@@ -78,6 +80,15 @@ class BackgroundPoller:
|
||||
except Exception as e:
|
||||
self._logger.error(f"Error in poll loop: {e}", exc_info=True)
|
||||
|
||||
# Run log cleanup once per hour
|
||||
try:
|
||||
now = datetime.utcnow()
|
||||
if self._last_cleanup is None or (now - self._last_cleanup).total_seconds() > 3600:
|
||||
cleanup_old_logs()
|
||||
self._last_cleanup = now
|
||||
except Exception as e:
|
||||
self._logger.warning(f"Log cleanup failed: {e}")
|
||||
|
||||
# Calculate dynamic sleep interval
|
||||
sleep_time = self._calculate_sleep_interval()
|
||||
self._logger.debug(f"Sleeping for {sleep_time} seconds until next poll cycle")
|
||||
@@ -205,6 +216,71 @@ class BackgroundPoller:
|
||||
db.commit()
|
||||
self._logger.info(f"✓ Successfully polled {unit_id}")
|
||||
|
||||
# Log to device log
|
||||
log_device_event(
|
||||
unit_id, "INFO", "POLL",
|
||||
f"Poll success: state={snap.measurement_state}, Leq={snap.leq}, Lp={snap.lp}",
|
||||
db
|
||||
)
|
||||
|
||||
# Check if device is measuring but has no start time recorded
|
||||
# This happens if measurement was started before SLMM began polling
|
||||
# or after a service restart
|
||||
status = db.query(NL43Status).filter_by(unit_id=unit_id).first()
|
||||
|
||||
# Reset the sync flag when measurement stops (so next measurement can sync)
|
||||
if status and status.measurement_state != "Start":
|
||||
if status.start_time_sync_attempted:
|
||||
status.start_time_sync_attempted = False
|
||||
db.commit()
|
||||
self._logger.debug(f"Reset FTP sync flag for {unit_id} (measurement stopped)")
|
||||
log_device_event(unit_id, "DEBUG", "STATE", "Measurement stopped, reset FTP sync flag", db)
|
||||
|
||||
# Attempt FTP sync if:
|
||||
# - Device is measuring
|
||||
# - No start time recorded
|
||||
# - FTP sync not already attempted for this measurement
|
||||
# - FTP is configured
|
||||
if (status and
|
||||
status.measurement_state == "Start" and
|
||||
status.measurement_start_time is None and
|
||||
not status.start_time_sync_attempted and
|
||||
cfg.ftp_enabled and
|
||||
cfg.ftp_username and
|
||||
cfg.ftp_password):
|
||||
|
||||
self._logger.info(
|
||||
f"Device {unit_id} is measuring but has no start time - "
|
||||
f"attempting FTP sync"
|
||||
)
|
||||
log_device_event(unit_id, "INFO", "SYNC", "Attempting FTP sync for measurement start time", db)
|
||||
|
||||
# Mark that we attempted sync (prevents repeated attempts on failure)
|
||||
status.start_time_sync_attempted = True
|
||||
db.commit()
|
||||
|
||||
try:
|
||||
synced = await sync_measurement_start_time_from_ftp(
|
||||
unit_id=unit_id,
|
||||
host=cfg.host,
|
||||
tcp_port=cfg.tcp_port,
|
||||
ftp_port=cfg.ftp_port or 21,
|
||||
ftp_username=cfg.ftp_username,
|
||||
ftp_password=cfg.ftp_password,
|
||||
db=db
|
||||
)
|
||||
if synced:
|
||||
self._logger.info(f"✓ FTP sync succeeded for {unit_id}")
|
||||
log_device_event(unit_id, "INFO", "SYNC", "FTP sync succeeded - measurement start time updated", db)
|
||||
else:
|
||||
self._logger.warning(f"FTP sync returned False for {unit_id}")
|
||||
log_device_event(unit_id, "WARNING", "SYNC", "FTP sync returned False", db)
|
||||
except Exception as sync_err:
|
||||
self._logger.warning(
|
||||
f"FTP sync failed for {unit_id}: {sync_err}"
|
||||
)
|
||||
log_device_event(unit_id, "ERROR", "SYNC", f"FTP sync failed: {sync_err}", db)
|
||||
|
||||
except Exception as e:
|
||||
# Failure - increment counter and potentially mark offline
|
||||
status.consecutive_failures += 1
|
||||
@@ -217,11 +293,13 @@ class BackgroundPoller:
|
||||
self._logger.warning(
|
||||
f"Device {unit_id} marked unreachable after {status.consecutive_failures} failures: {error_msg}"
|
||||
)
|
||||
log_device_event(unit_id, "ERROR", "POLL", f"Device marked UNREACHABLE after {status.consecutive_failures} failures: {error_msg}", db)
|
||||
status.is_reachable = False
|
||||
else:
|
||||
self._logger.warning(
|
||||
f"Poll failed for {unit_id} (attempt {status.consecutive_failures}/3): {error_msg}"
|
||||
)
|
||||
log_device_event(unit_id, "WARNING", "POLL", f"Poll failed (attempt {status.consecutive_failures}/3): {error_msg}", db)
|
||||
|
||||
db.commit()
|
||||
|
||||
|
||||
277
app/device_logger.py
Normal file
277
app/device_logger.py
Normal file
@@ -0,0 +1,277 @@
|
||||
"""
|
||||
Per-device logging system.
|
||||
|
||||
Provides dual output: database entries for structured queries and file logs for backup.
|
||||
Each device gets its own log file in data/logs/{unit_id}.log with rotation.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from datetime import datetime, timedelta
|
||||
from logging.handlers import RotatingFileHandler
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from app.database import SessionLocal
|
||||
from app.models import DeviceLog
|
||||
|
||||
# Configure base logger
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Log directory (persisted in Docker volume)
|
||||
LOG_DIR = Path(os.path.dirname(os.path.dirname(__file__))) / "data" / "logs"
|
||||
LOG_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Per-device file loggers (cached)
|
||||
_device_file_loggers: dict = {}
|
||||
|
||||
# Log retention (days)
|
||||
LOG_RETENTION_DAYS = int(os.getenv("LOG_RETENTION_DAYS", "7"))
|
||||
|
||||
|
||||
def _get_file_logger(unit_id: str) -> logging.Logger:
|
||||
"""Get or create a file logger for a specific device."""
|
||||
if unit_id in _device_file_loggers:
|
||||
return _device_file_loggers[unit_id]
|
||||
|
||||
# Create device-specific logger
|
||||
device_logger = logging.getLogger(f"device.{unit_id}")
|
||||
device_logger.setLevel(logging.DEBUG)
|
||||
|
||||
# Avoid duplicate handlers
|
||||
if not device_logger.handlers:
|
||||
# Create rotating file handler (5 MB max, keep 3 backups)
|
||||
log_file = LOG_DIR / f"{unit_id}.log"
|
||||
handler = RotatingFileHandler(
|
||||
log_file,
|
||||
maxBytes=5 * 1024 * 1024, # 5 MB
|
||||
backupCount=3,
|
||||
encoding="utf-8"
|
||||
)
|
||||
handler.setLevel(logging.DEBUG)
|
||||
|
||||
# Format: timestamp [LEVEL] [CATEGORY] message
|
||||
formatter = logging.Formatter(
|
||||
"%(asctime)s [%(levelname)s] [%(category)s] %(message)s",
|
||||
datefmt="%Y-%m-%d %H:%M:%S"
|
||||
)
|
||||
handler.setFormatter(formatter)
|
||||
device_logger.addHandler(handler)
|
||||
|
||||
# Don't propagate to root logger
|
||||
device_logger.propagate = False
|
||||
|
||||
_device_file_loggers[unit_id] = device_logger
|
||||
return device_logger
|
||||
|
||||
|
||||
def log_device_event(
|
||||
unit_id: str,
|
||||
level: str,
|
||||
category: str,
|
||||
message: str,
|
||||
db: Optional[Session] = None
|
||||
):
|
||||
"""
|
||||
Log an event for a specific device.
|
||||
|
||||
Writes to both:
|
||||
1. Database (DeviceLog table) for structured queries
|
||||
2. File (data/logs/{unit_id}.log) for backup/debugging
|
||||
|
||||
Args:
|
||||
unit_id: Device identifier
|
||||
level: Log level (DEBUG, INFO, WARNING, ERROR)
|
||||
category: Event category (TCP, FTP, POLL, COMMAND, STATE, SYNC)
|
||||
message: Log message
|
||||
db: Optional database session (creates one if not provided)
|
||||
"""
|
||||
timestamp = datetime.utcnow()
|
||||
|
||||
# Write to file log
|
||||
try:
|
||||
file_logger = _get_file_logger(unit_id)
|
||||
log_func = getattr(file_logger, level.lower(), file_logger.info)
|
||||
# Pass category as extra for formatter
|
||||
log_func(message, extra={"category": category})
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to write file log for {unit_id}: {e}")
|
||||
|
||||
# Write to database
|
||||
close_db = False
|
||||
try:
|
||||
if db is None:
|
||||
db = SessionLocal()
|
||||
close_db = True
|
||||
|
||||
log_entry = DeviceLog(
|
||||
unit_id=unit_id,
|
||||
timestamp=timestamp,
|
||||
level=level.upper(),
|
||||
category=category.upper(),
|
||||
message=message
|
||||
)
|
||||
db.add(log_entry)
|
||||
db.commit()
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to write DB log for {unit_id}: {e}")
|
||||
if db:
|
||||
db.rollback()
|
||||
finally:
|
||||
if close_db and db:
|
||||
db.close()
|
||||
|
||||
|
||||
def cleanup_old_logs(retention_days: Optional[int] = None, db: Optional[Session] = None):
|
||||
"""
|
||||
Delete log entries older than retention period.
|
||||
|
||||
Args:
|
||||
retention_days: Days to retain (default: LOG_RETENTION_DAYS env var or 7)
|
||||
db: Optional database session
|
||||
"""
|
||||
if retention_days is None:
|
||||
retention_days = LOG_RETENTION_DAYS
|
||||
|
||||
cutoff = datetime.utcnow() - timedelta(days=retention_days)
|
||||
|
||||
close_db = False
|
||||
try:
|
||||
if db is None:
|
||||
db = SessionLocal()
|
||||
close_db = True
|
||||
|
||||
deleted = db.query(DeviceLog).filter(DeviceLog.timestamp < cutoff).delete()
|
||||
db.commit()
|
||||
|
||||
if deleted > 0:
|
||||
logger.info(f"Cleaned up {deleted} log entries older than {retention_days} days")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to cleanup old logs: {e}")
|
||||
if db:
|
||||
db.rollback()
|
||||
finally:
|
||||
if close_db and db:
|
||||
db.close()
|
||||
|
||||
|
||||
def get_device_logs(
|
||||
unit_id: str,
|
||||
limit: int = 100,
|
||||
offset: int = 0,
|
||||
level: Optional[str] = None,
|
||||
category: Optional[str] = None,
|
||||
since: Optional[datetime] = None,
|
||||
db: Optional[Session] = None
|
||||
) -> list:
|
||||
"""
|
||||
Query log entries for a specific device.
|
||||
|
||||
Args:
|
||||
unit_id: Device identifier
|
||||
limit: Max entries to return (default: 100)
|
||||
offset: Number of entries to skip (default: 0)
|
||||
level: Filter by level (DEBUG, INFO, WARNING, ERROR)
|
||||
category: Filter by category (TCP, FTP, POLL, COMMAND, STATE, SYNC)
|
||||
since: Filter entries after this timestamp
|
||||
db: Optional database session
|
||||
|
||||
Returns:
|
||||
List of log entries as dicts
|
||||
"""
|
||||
close_db = False
|
||||
try:
|
||||
if db is None:
|
||||
db = SessionLocal()
|
||||
close_db = True
|
||||
|
||||
query = db.query(DeviceLog).filter(DeviceLog.unit_id == unit_id)
|
||||
|
||||
if level:
|
||||
query = query.filter(DeviceLog.level == level.upper())
|
||||
if category:
|
||||
query = query.filter(DeviceLog.category == category.upper())
|
||||
if since:
|
||||
query = query.filter(DeviceLog.timestamp >= since)
|
||||
|
||||
# Order by newest first
|
||||
query = query.order_by(DeviceLog.timestamp.desc())
|
||||
|
||||
# Apply pagination
|
||||
entries = query.offset(offset).limit(limit).all()
|
||||
|
||||
return [
|
||||
{
|
||||
"id": e.id,
|
||||
"timestamp": e.timestamp.isoformat() + "Z",
|
||||
"level": e.level,
|
||||
"category": e.category,
|
||||
"message": e.message
|
||||
}
|
||||
for e in entries
|
||||
]
|
||||
|
||||
finally:
|
||||
if close_db and db:
|
||||
db.close()
|
||||
|
||||
|
||||
def get_log_stats(unit_id: str, db: Optional[Session] = None) -> dict:
|
||||
"""
|
||||
Get log statistics for a device.
|
||||
|
||||
Returns:
|
||||
Dict with counts by level and category
|
||||
"""
|
||||
close_db = False
|
||||
try:
|
||||
if db is None:
|
||||
db = SessionLocal()
|
||||
close_db = True
|
||||
|
||||
total = db.query(DeviceLog).filter(DeviceLog.unit_id == unit_id).count()
|
||||
|
||||
# Count by level
|
||||
level_counts = {}
|
||||
for level in ["DEBUG", "INFO", "WARNING", "ERROR"]:
|
||||
count = db.query(DeviceLog).filter(
|
||||
DeviceLog.unit_id == unit_id,
|
||||
DeviceLog.level == level
|
||||
).count()
|
||||
if count > 0:
|
||||
level_counts[level] = count
|
||||
|
||||
# Count by category
|
||||
category_counts = {}
|
||||
for category in ["TCP", "FTP", "POLL", "COMMAND", "STATE", "SYNC", "GENERAL"]:
|
||||
count = db.query(DeviceLog).filter(
|
||||
DeviceLog.unit_id == unit_id,
|
||||
DeviceLog.category == category
|
||||
).count()
|
||||
if count > 0:
|
||||
category_counts[category] = count
|
||||
|
||||
# Get oldest and newest
|
||||
oldest = db.query(DeviceLog).filter(
|
||||
DeviceLog.unit_id == unit_id
|
||||
).order_by(DeviceLog.timestamp.asc()).first()
|
||||
|
||||
newest = db.query(DeviceLog).filter(
|
||||
DeviceLog.unit_id == unit_id
|
||||
).order_by(DeviceLog.timestamp.desc()).first()
|
||||
|
||||
return {
|
||||
"total": total,
|
||||
"by_level": level_counts,
|
||||
"by_category": category_counts,
|
||||
"oldest": oldest.timestamp.isoformat() + "Z" if oldest else None,
|
||||
"newest": newest.timestamp.isoformat() + "Z" if newest else None
|
||||
}
|
||||
|
||||
finally:
|
||||
if close_db and db:
|
||||
db.close()
|
||||
@@ -53,3 +53,22 @@ class NL43Status(Base):
|
||||
last_poll_attempt = Column(DateTime, nullable=True) # Last time background poller attempted to poll
|
||||
last_success = Column(DateTime, nullable=True) # Last successful poll timestamp
|
||||
last_error = Column(Text, nullable=True) # Last error message (truncated to 500 chars)
|
||||
|
||||
# FTP start time sync tracking
|
||||
start_time_sync_attempted = Column(Boolean, default=False) # True if FTP sync was attempted for current measurement
|
||||
|
||||
|
||||
class DeviceLog(Base):
|
||||
"""
|
||||
Per-device log entries for debugging and audit trail.
|
||||
Stores events like commands, state changes, errors, and FTP operations.
|
||||
"""
|
||||
|
||||
__tablename__ = "device_logs"
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
unit_id = Column(String, index=True, nullable=False)
|
||||
timestamp = Column(DateTime, default=func.now(), index=True)
|
||||
level = Column(String, default="INFO") # DEBUG, INFO, WARNING, ERROR
|
||||
category = Column(String, default="GENERAL") # TCP, FTP, POLL, COMMAND, STATE, SYNC
|
||||
message = Column(Text, nullable=False)
|
||||
|
||||
126
app/routers.py
126
app/routers.py
@@ -3,6 +3,7 @@ from fastapi.responses import FileResponse
|
||||
from sqlalchemy.orm import Session
|
||||
from datetime import datetime
|
||||
from pydantic import BaseModel, field_validator, Field
|
||||
from typing import Optional
|
||||
import logging
|
||||
import ipaddress
|
||||
import json
|
||||
@@ -1842,9 +1843,134 @@ async def run_diagnostics(unit_id: str, db: Session = Depends(get_db)):
|
||||
|
||||
# All tests passed
|
||||
diagnostics["overall_status"] = "pass"
|
||||
|
||||
# Add database dump: config and status cache
|
||||
diagnostics["database_dump"] = {
|
||||
"config": {
|
||||
"unit_id": cfg.unit_id,
|
||||
"host": cfg.host,
|
||||
"tcp_port": cfg.tcp_port,
|
||||
"tcp_enabled": cfg.tcp_enabled,
|
||||
"ftp_enabled": cfg.ftp_enabled,
|
||||
"ftp_port": cfg.ftp_port,
|
||||
"ftp_username": cfg.ftp_username,
|
||||
"ftp_password": "***" if cfg.ftp_password else None, # Mask password
|
||||
"web_enabled": cfg.web_enabled,
|
||||
"poll_interval_seconds": cfg.poll_interval_seconds,
|
||||
"poll_enabled": cfg.poll_enabled
|
||||
},
|
||||
"status_cache": None
|
||||
}
|
||||
|
||||
# Get cached status if available
|
||||
status = db.query(NL43Status).filter_by(unit_id=unit_id).first()
|
||||
if status:
|
||||
# Helper to format datetime as ISO with Z suffix to indicate UTC
|
||||
def to_utc_iso(dt):
|
||||
return dt.isoformat() + 'Z' if dt else None
|
||||
|
||||
diagnostics["database_dump"]["status_cache"] = {
|
||||
"unit_id": status.unit_id,
|
||||
"last_seen": to_utc_iso(status.last_seen),
|
||||
"measurement_state": status.measurement_state,
|
||||
"measurement_start_time": to_utc_iso(status.measurement_start_time),
|
||||
"counter": status.counter,
|
||||
"lp": status.lp,
|
||||
"leq": status.leq,
|
||||
"lmax": status.lmax,
|
||||
"lmin": status.lmin,
|
||||
"lpeak": status.lpeak,
|
||||
"battery_level": status.battery_level,
|
||||
"power_source": status.power_source,
|
||||
"sd_remaining_mb": status.sd_remaining_mb,
|
||||
"sd_free_ratio": status.sd_free_ratio,
|
||||
"is_reachable": status.is_reachable,
|
||||
"consecutive_failures": status.consecutive_failures,
|
||||
"last_poll_attempt": to_utc_iso(status.last_poll_attempt),
|
||||
"last_success": to_utc_iso(status.last_success),
|
||||
"last_error": status.last_error,
|
||||
"raw_payload": status.raw_payload
|
||||
}
|
||||
|
||||
return diagnostics
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# DEVICE LOGS ENDPOINTS
|
||||
# ============================================================================
|
||||
|
||||
@router.get("/{unit_id}/logs")
|
||||
def get_device_logs(
|
||||
unit_id: str,
|
||||
limit: int = 100,
|
||||
offset: int = 0,
|
||||
level: Optional[str] = None,
|
||||
category: Optional[str] = None,
|
||||
db: Session = Depends(get_db)
|
||||
):
|
||||
"""
|
||||
Get log entries for a specific device.
|
||||
|
||||
Query parameters:
|
||||
- limit: Max entries to return (default: 100, max: 1000)
|
||||
- offset: Number of entries to skip (for pagination)
|
||||
- level: Filter by level (DEBUG, INFO, WARNING, ERROR)
|
||||
- category: Filter by category (TCP, FTP, POLL, COMMAND, STATE, SYNC)
|
||||
|
||||
Returns newest entries first.
|
||||
"""
|
||||
from app.device_logger import get_device_logs as fetch_logs, get_log_stats
|
||||
|
||||
# Validate limit
|
||||
limit = min(limit, 1000)
|
||||
|
||||
logs = fetch_logs(
|
||||
unit_id=unit_id,
|
||||
limit=limit,
|
||||
offset=offset,
|
||||
level=level,
|
||||
category=category,
|
||||
db=db
|
||||
)
|
||||
|
||||
stats = get_log_stats(unit_id, db)
|
||||
|
||||
return {
|
||||
"status": "ok",
|
||||
"unit_id": unit_id,
|
||||
"logs": logs,
|
||||
"count": len(logs),
|
||||
"stats": stats,
|
||||
"filters": {
|
||||
"level": level,
|
||||
"category": category
|
||||
},
|
||||
"pagination": {
|
||||
"limit": limit,
|
||||
"offset": offset
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@router.delete("/{unit_id}/logs")
|
||||
def clear_device_logs(unit_id: str, db: Session = Depends(get_db)):
|
||||
"""
|
||||
Clear all log entries for a specific device.
|
||||
"""
|
||||
from app.models import DeviceLog
|
||||
|
||||
deleted = db.query(DeviceLog).filter(DeviceLog.unit_id == unit_id).delete()
|
||||
db.commit()
|
||||
|
||||
logger.info(f"Cleared {deleted} log entries for device {unit_id}")
|
||||
|
||||
return {
|
||||
"status": "ok",
|
||||
"message": f"Cleared {deleted} log entries for {unit_id}",
|
||||
"deleted_count": deleted
|
||||
}
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# BACKGROUND POLLING CONFIGURATION ENDPOINTS
|
||||
# ============================================================================
|
||||
|
||||
115
app/services.py
115
app/services.py
@@ -76,10 +76,22 @@ def persist_snapshot(s: NL43Snapshot, db: Session):
|
||||
# Measurement just started - record the start time
|
||||
row.measurement_start_time = datetime.utcnow()
|
||||
logger.info(f"✓ Measurement started on {s.unit_id} at {row.measurement_start_time}")
|
||||
# Log state change (lazy import to avoid circular dependency)
|
||||
try:
|
||||
from app.device_logger import log_device_event
|
||||
log_device_event(s.unit_id, "INFO", "STATE", f"Measurement STARTED at {row.measurement_start_time}", db)
|
||||
except Exception:
|
||||
pass
|
||||
elif was_measuring and not is_measuring:
|
||||
# Measurement stopped - clear the start time
|
||||
row.measurement_start_time = None
|
||||
logger.info(f"✓ Measurement stopped on {s.unit_id}")
|
||||
# Log state change
|
||||
try:
|
||||
from app.device_logger import log_device_event
|
||||
log_device_event(s.unit_id, "INFO", "STATE", "Measurement STOPPED", db)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
row.measurement_state = new_state
|
||||
row.counter = s.counter
|
||||
@@ -101,6 +113,109 @@ def persist_snapshot(s: NL43Snapshot, db: Session):
|
||||
raise
|
||||
|
||||
|
||||
async def sync_measurement_start_time_from_ftp(
|
||||
unit_id: str,
|
||||
host: str,
|
||||
tcp_port: int,
|
||||
ftp_port: int,
|
||||
ftp_username: str,
|
||||
ftp_password: str,
|
||||
db: Session
|
||||
) -> bool:
|
||||
"""
|
||||
Sync measurement start time from the FTP folder timestamp.
|
||||
|
||||
This is called when SLMM detects a device is already measuring but doesn't
|
||||
have a recorded start time (e.g., after service restart or if measurement
|
||||
was started before SLMM began polling).
|
||||
|
||||
The workflow:
|
||||
1. Disable FTP (reset)
|
||||
2. Enable FTP
|
||||
3. List NL-43 folder to get measurement folder timestamps
|
||||
4. Use the most recent folder's timestamp as the start time
|
||||
5. Update the database
|
||||
|
||||
Args:
|
||||
unit_id: Device identifier
|
||||
host: Device IP/hostname
|
||||
tcp_port: TCP control port
|
||||
ftp_port: FTP port (usually 21)
|
||||
ftp_username: FTP username (usually "USER")
|
||||
ftp_password: FTP password (usually "0000")
|
||||
db: Database session
|
||||
|
||||
Returns:
|
||||
True if sync succeeded, False otherwise
|
||||
"""
|
||||
logger.info(f"[FTP-SYNC] Attempting to sync measurement start time for {unit_id} via FTP")
|
||||
|
||||
client = NL43Client(
|
||||
host, tcp_port,
|
||||
ftp_username=ftp_username,
|
||||
ftp_password=ftp_password,
|
||||
ftp_port=ftp_port
|
||||
)
|
||||
|
||||
try:
|
||||
# Step 1: Disable FTP to reset it
|
||||
logger.info(f"[FTP-SYNC] Step 1: Disabling FTP on {unit_id}")
|
||||
await client.disable_ftp()
|
||||
await asyncio.sleep(1.5) # Wait for device to process
|
||||
|
||||
# Step 2: Enable FTP
|
||||
logger.info(f"[FTP-SYNC] Step 2: Enabling FTP on {unit_id}")
|
||||
await client.enable_ftp()
|
||||
await asyncio.sleep(2.0) # Wait for FTP server to start
|
||||
|
||||
# Step 3: List NL-43 folder
|
||||
logger.info(f"[FTP-SYNC] Step 3: Listing /NL-43 folder on {unit_id}")
|
||||
files = await client.list_ftp_files("/NL-43")
|
||||
|
||||
# Filter for directories only (measurement folders)
|
||||
folders = [f for f in files if f.get('is_dir', False)]
|
||||
|
||||
if not folders:
|
||||
logger.warning(f"[FTP-SYNC] No measurement folders found on {unit_id}")
|
||||
return False
|
||||
|
||||
# Sort by modified timestamp (newest first)
|
||||
folders.sort(key=lambda f: f.get('modified_timestamp', ''), reverse=True)
|
||||
|
||||
latest_folder = folders[0]
|
||||
folder_name = latest_folder['name']
|
||||
logger.info(f"[FTP-SYNC] Found latest measurement folder: {folder_name}")
|
||||
|
||||
# Step 4: Parse timestamp
|
||||
if 'modified_timestamp' in latest_folder and latest_folder['modified_timestamp']:
|
||||
timestamp_str = latest_folder['modified_timestamp']
|
||||
# Parse ISO format timestamp (already in UTC from SLMM FTP listing)
|
||||
start_time = datetime.fromisoformat(timestamp_str.replace('Z', ''))
|
||||
|
||||
# Step 5: Update database
|
||||
status = db.query(NL43Status).filter_by(unit_id=unit_id).first()
|
||||
if status:
|
||||
old_time = status.measurement_start_time
|
||||
status.measurement_start_time = start_time
|
||||
db.commit()
|
||||
|
||||
logger.info(f"[FTP-SYNC] ✓ Successfully synced start time for {unit_id}")
|
||||
logger.info(f"[FTP-SYNC] Folder: {folder_name}")
|
||||
logger.info(f"[FTP-SYNC] Old start time: {old_time}")
|
||||
logger.info(f"[FTP-SYNC] New start time: {start_time}")
|
||||
return True
|
||||
else:
|
||||
logger.warning(f"[FTP-SYNC] Status record not found for {unit_id}")
|
||||
return False
|
||||
else:
|
||||
logger.warning(f"[FTP-SYNC] Could not parse timestamp from folder {folder_name}")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[FTP-SYNC] Failed to sync start time for {unit_id}: {e}")
|
||||
return False
|
||||
|
||||
|
||||
# Rate limiting: NL43 requires ≥1 second between commands
|
||||
_last_command_time = {}
|
||||
_rate_limit_lock = asyncio.Lock()
|
||||
|
||||
73
migrate_add_device_logs.py
Normal file
73
migrate_add_device_logs.py
Normal file
@@ -0,0 +1,73 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Database migration: Add device_logs table.
|
||||
|
||||
This table stores per-device log entries for debugging and audit trail.
|
||||
|
||||
Run this once to add the new table.
|
||||
"""
|
||||
|
||||
import sqlite3
|
||||
import os
|
||||
|
||||
# Path to the SLMM database
|
||||
DB_PATH = os.path.join(os.path.dirname(__file__), "data", "slmm.db")
|
||||
|
||||
|
||||
def migrate():
|
||||
print(f"Adding device_logs table to: {DB_PATH}")
|
||||
|
||||
if not os.path.exists(DB_PATH):
|
||||
print("Database does not exist yet. Table will be created automatically on first run.")
|
||||
return
|
||||
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
|
||||
try:
|
||||
# Check if table already exists
|
||||
cursor.execute("""
|
||||
SELECT name FROM sqlite_master
|
||||
WHERE type='table' AND name='device_logs'
|
||||
""")
|
||||
if cursor.fetchone():
|
||||
print("✓ device_logs table already exists, no migration needed")
|
||||
return
|
||||
|
||||
# Create the table
|
||||
print("Creating device_logs table...")
|
||||
cursor.execute("""
|
||||
CREATE TABLE device_logs (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
unit_id VARCHAR NOT NULL,
|
||||
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
level VARCHAR DEFAULT 'INFO',
|
||||
category VARCHAR DEFAULT 'GENERAL',
|
||||
message TEXT NOT NULL
|
||||
)
|
||||
""")
|
||||
|
||||
# Create indexes for efficient querying
|
||||
print("Creating indexes...")
|
||||
cursor.execute("CREATE INDEX ix_device_logs_unit_id ON device_logs (unit_id)")
|
||||
cursor.execute("CREATE INDEX ix_device_logs_timestamp ON device_logs (timestamp)")
|
||||
|
||||
conn.commit()
|
||||
print("✓ Created device_logs table with indexes")
|
||||
|
||||
# Verify
|
||||
cursor.execute("""
|
||||
SELECT name FROM sqlite_master
|
||||
WHERE type='table' AND name='device_logs'
|
||||
""")
|
||||
if not cursor.fetchone():
|
||||
raise Exception("device_logs table was not created successfully")
|
||||
|
||||
print("✓ Migration completed successfully")
|
||||
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
migrate()
|
||||
60
migrate_add_start_time_sync_flag.py
Normal file
60
migrate_add_start_time_sync_flag.py
Normal file
@@ -0,0 +1,60 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Database migration: Add start_time_sync_attempted field to nl43_status table.
|
||||
|
||||
This field tracks whether FTP sync has been attempted for the current measurement,
|
||||
preventing repeated sync attempts when FTP fails.
|
||||
|
||||
Run this once to add the new column.
|
||||
"""
|
||||
|
||||
import sqlite3
|
||||
import os
|
||||
|
||||
# Path to the SLMM database
|
||||
DB_PATH = os.path.join(os.path.dirname(__file__), "data", "slmm.db")
|
||||
|
||||
|
||||
def migrate():
|
||||
print(f"Adding start_time_sync_attempted field to: {DB_PATH}")
|
||||
|
||||
if not os.path.exists(DB_PATH):
|
||||
print("Database does not exist yet. Column will be created automatically.")
|
||||
return
|
||||
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
|
||||
try:
|
||||
# Check if column already exists
|
||||
cursor.execute("PRAGMA table_info(nl43_status)")
|
||||
columns = [col[1] for col in cursor.fetchall()]
|
||||
|
||||
if 'start_time_sync_attempted' in columns:
|
||||
print("✓ start_time_sync_attempted column already exists, no migration needed")
|
||||
return
|
||||
|
||||
# Add the column
|
||||
print("Adding start_time_sync_attempted column...")
|
||||
cursor.execute("""
|
||||
ALTER TABLE nl43_status
|
||||
ADD COLUMN start_time_sync_attempted BOOLEAN DEFAULT 0
|
||||
""")
|
||||
conn.commit()
|
||||
print("✓ Added start_time_sync_attempted column")
|
||||
|
||||
# Verify
|
||||
cursor.execute("PRAGMA table_info(nl43_status)")
|
||||
columns = [col[1] for col in cursor.fetchall()]
|
||||
|
||||
if 'start_time_sync_attempted' not in columns:
|
||||
raise Exception("start_time_sync_attempted column was not added successfully")
|
||||
|
||||
print("✓ Migration completed successfully")
|
||||
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
migrate()
|
||||
@@ -333,6 +333,134 @@
|
||||
|
||||
html += `<p style="margin-top: 12px; font-size: 0.9em; color: #666;">Last run: ${new Date(data.timestamp).toLocaleString()}</p>`;
|
||||
|
||||
// Add database dump section if available
|
||||
if (data.database_dump) {
|
||||
html += `<div style="margin-top: 16px; border-top: 1px solid #d0d7de; padding-top: 12px;">`;
|
||||
html += `<h4 style="margin: 0 0 12px 0;">📦 Database Dump</h4>`;
|
||||
|
||||
// Config section
|
||||
if (data.database_dump.config) {
|
||||
const cfg = data.database_dump.config;
|
||||
html += `<div style="background: #f0f4f8; padding: 12px; border-radius: 4px; margin-bottom: 12px;">`;
|
||||
html += `<strong>Configuration (nl43_config)</strong>`;
|
||||
html += `<table style="width: 100%; margin-top: 8px; font-size: 0.9em;">`;
|
||||
html += `<tr><td style="padding: 2px 8px; color: #666;">Host</td><td>${cfg.host}:${cfg.tcp_port}</td></tr>`;
|
||||
html += `<tr><td style="padding: 2px 8px; color: #666;">TCP Enabled</td><td>${cfg.tcp_enabled ? '✓' : '✗'}</td></tr>`;
|
||||
html += `<tr><td style="padding: 2px 8px; color: #666;">FTP Enabled</td><td>${cfg.ftp_enabled ? '✓' : '✗'}${cfg.ftp_enabled ? ` (port ${cfg.ftp_port}, user: ${cfg.ftp_username || 'none'})` : ''}</td></tr>`;
|
||||
html += `<tr><td style="padding: 2px 8px; color: #666;">Background Polling</td><td>${cfg.poll_enabled ? `✓ every ${cfg.poll_interval_seconds}s` : '✗ disabled'}</td></tr>`;
|
||||
html += `</table></div>`;
|
||||
}
|
||||
|
||||
// Status cache section
|
||||
if (data.database_dump.status_cache) {
|
||||
const cache = data.database_dump.status_cache;
|
||||
html += `<div style="background: #f0f8f4; padding: 12px; border-radius: 4px; margin-bottom: 12px;">`;
|
||||
html += `<strong>Status Cache (nl43_status)</strong>`;
|
||||
html += `<table style="width: 100%; margin-top: 8px; font-size: 0.9em;">`;
|
||||
|
||||
// Measurement state and timing
|
||||
html += `<tr><td style="padding: 2px 8px; color: #666;">Measurement State</td><td><strong>${cache.measurement_state || 'unknown'}</strong></td></tr>`;
|
||||
if (cache.measurement_start_time) {
|
||||
const startTime = new Date(cache.measurement_start_time);
|
||||
const elapsed = Math.floor((Date.now() - startTime) / 1000);
|
||||
const elapsedStr = elapsed > 3600 ? `${Math.floor(elapsed/3600)}h ${Math.floor((elapsed%3600)/60)}m` : elapsed > 60 ? `${Math.floor(elapsed/60)}m ${elapsed%60}s` : `${elapsed}s`;
|
||||
html += `<tr><td style="padding: 2px 8px; color: #666;">Measurement Started</td><td>${startTime.toLocaleString()} (${elapsedStr} ago)</td></tr>`;
|
||||
}
|
||||
html += `<tr><td style="padding: 2px 8px; color: #666;">Counter (d0)</td><td>${cache.counter || 'N/A'}</td></tr>`;
|
||||
|
||||
// Sound levels
|
||||
html += `<tr><td colspan="2" style="padding: 8px 8px 2px 8px; font-weight: 600; border-top: 1px solid #d0d7de;">Sound Levels (dB)</td></tr>`;
|
||||
html += `<tr><td style="padding: 2px 8px; color: #666;">Lp (Instantaneous)</td><td>${cache.lp || 'N/A'}</td></tr>`;
|
||||
html += `<tr><td style="padding: 2px 8px; color: #666;">Leq (Equivalent)</td><td>${cache.leq || 'N/A'}</td></tr>`;
|
||||
html += `<tr><td style="padding: 2px 8px; color: #666;">Lmax / Lmin</td><td>${cache.lmax || 'N/A'} / ${cache.lmin || 'N/A'}</td></tr>`;
|
||||
html += `<tr><td style="padding: 2px 8px; color: #666;">Lpeak</td><td>${cache.lpeak || 'N/A'}</td></tr>`;
|
||||
|
||||
// Device status
|
||||
html += `<tr><td colspan="2" style="padding: 8px 8px 2px 8px; font-weight: 600; border-top: 1px solid #d0d7de;">Device Status</td></tr>`;
|
||||
html += `<tr><td style="padding: 2px 8px; color: #666;">Battery</td><td>${cache.battery_level || 'N/A'}${cache.power_source ? ` (${cache.power_source})` : ''}</td></tr>`;
|
||||
html += `<tr><td style="padding: 2px 8px; color: #666;">SD Card</td><td>${cache.sd_remaining_mb ? `${cache.sd_remaining_mb} MB` : 'N/A'}${cache.sd_free_ratio ? ` (${cache.sd_free_ratio} free)` : ''}</td></tr>`;
|
||||
|
||||
// Polling status
|
||||
html += `<tr><td colspan="2" style="padding: 8px 8px 2px 8px; font-weight: 600; border-top: 1px solid #d0d7de;">Polling Status</td></tr>`;
|
||||
html += `<tr><td style="padding: 2px 8px; color: #666;">Reachable</td><td>${cache.is_reachable ? '🟢 Yes' : '🔴 No'}</td></tr>`;
|
||||
if (cache.last_seen) {
|
||||
html += `<tr><td style="padding: 2px 8px; color: #666;">Last Seen</td><td>${new Date(cache.last_seen).toLocaleString()}</td></tr>`;
|
||||
}
|
||||
if (cache.last_success) {
|
||||
html += `<tr><td style="padding: 2px 8px; color: #666;">Last Success</td><td>${new Date(cache.last_success).toLocaleString()}</td></tr>`;
|
||||
}
|
||||
if (cache.last_poll_attempt) {
|
||||
html += `<tr><td style="padding: 2px 8px; color: #666;">Last Poll Attempt</td><td>${new Date(cache.last_poll_attempt).toLocaleString()}</td></tr>`;
|
||||
}
|
||||
html += `<tr><td style="padding: 2px 8px; color: #666;">Consecutive Failures</td><td>${cache.consecutive_failures || 0}</td></tr>`;
|
||||
if (cache.last_error) {
|
||||
html += `<tr><td style="padding: 2px 8px; color: #666;">Last Error</td><td style="color: #d00; font-size: 0.85em;">${cache.last_error}</td></tr>`;
|
||||
}
|
||||
|
||||
html += `</table></div>`;
|
||||
|
||||
// Raw payload (collapsible)
|
||||
if (cache.raw_payload) {
|
||||
html += `<details style="margin-top: 8px;"><summary style="cursor: pointer; color: #666; font-size: 0.9em;">📄 Raw Payload</summary>`;
|
||||
html += `<pre style="background: #f6f8fa; padding: 8px; border-radius: 4px; font-size: 0.8em; overflow-x: auto; margin-top: 8px;">${cache.raw_payload}</pre></details>`;
|
||||
}
|
||||
} else {
|
||||
html += `<p style="color: #888; font-style: italic;">No cached status available for this unit.</p>`;
|
||||
}
|
||||
|
||||
html += `</div>`;
|
||||
}
|
||||
|
||||
// Fetch and display device logs
|
||||
try {
|
||||
const logsRes = await fetch(`/api/nl43/${unitId}/logs?limit=50`);
|
||||
if (logsRes.ok) {
|
||||
const logsData = await logsRes.json();
|
||||
if (logsData.logs && logsData.logs.length > 0) {
|
||||
html += `<div style="margin-top: 16px; border-top: 1px solid #d0d7de; padding-top: 12px;">`;
|
||||
html += `<h4 style="margin: 0 0 12px 0;">📋 Device Logs (${logsData.stats.total} total)</h4>`;
|
||||
|
||||
// Stats summary
|
||||
if (logsData.stats.by_level) {
|
||||
html += `<div style="margin-bottom: 8px; font-size: 0.85em; color: #666;">`;
|
||||
const levels = logsData.stats.by_level;
|
||||
const parts = [];
|
||||
if (levels.ERROR) parts.push(`<span style="color: #d00;">${levels.ERROR} errors</span>`);
|
||||
if (levels.WARNING) parts.push(`<span style="color: #fa0;">${levels.WARNING} warnings</span>`);
|
||||
if (levels.INFO) parts.push(`${levels.INFO} info`);
|
||||
html += parts.join(' · ');
|
||||
html += `</div>`;
|
||||
}
|
||||
|
||||
// Log entries (collapsible)
|
||||
html += `<details open><summary style="cursor: pointer; font-size: 0.9em; margin-bottom: 8px;">Recent entries (${logsData.logs.length})</summary>`;
|
||||
html += `<div style="max-height: 300px; overflow-y: auto; background: #f6f8fa; border: 1px solid #d0d7de; border-radius: 4px; padding: 8px; font-size: 0.8em; font-family: monospace;">`;
|
||||
|
||||
logsData.logs.forEach(entry => {
|
||||
const levelColor = {
|
||||
'ERROR': '#d00',
|
||||
'WARNING': '#b86e00',
|
||||
'INFO': '#0969da',
|
||||
'DEBUG': '#888'
|
||||
}[entry.level] || '#666';
|
||||
|
||||
const time = new Date(entry.timestamp).toLocaleString();
|
||||
html += `<div style="margin-bottom: 4px; border-bottom: 1px solid #eee; padding-bottom: 4px;">`;
|
||||
html += `<span style="color: #888;">${time}</span> `;
|
||||
html += `<span style="color: ${levelColor}; font-weight: 600;">[${entry.level}]</span> `;
|
||||
html += `<span style="color: #666;">[${entry.category}]</span> `;
|
||||
html += `${entry.message}`;
|
||||
html += `</div>`;
|
||||
});
|
||||
|
||||
html += `</div></details>`;
|
||||
html += `</div>`;
|
||||
}
|
||||
}
|
||||
} catch (logErr) {
|
||||
console.log('Could not fetch device logs:', logErr);
|
||||
}
|
||||
|
||||
resultsEl.innerHTML = html;
|
||||
log(`Diagnostics complete: ${data.overall_status}`);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user