Feat/Fix: Scheduler actions more strictly defined. Commands now working.

This commit is contained in:
serversdwn
2026-01-22 20:25:19 +00:00
parent 65ea0920db
commit c771a86675
7 changed files with 360 additions and 38 deletions

View File

@@ -23,6 +23,7 @@ from sqlalchemy import and_
from backend.database import SessionLocal
from backend.models import ScheduledAction, RecordingSession, MonitoringLocation, Project, RecurringSchedule
from backend.services.device_controller import get_device_controller, DeviceControllerError
from backend.services.alert_service import get_alert_service
import uuid
logger = logging.getLogger(__name__)
@@ -197,6 +198,21 @@ class SchedulerService:
print(f"✓ Action {action.id} completed successfully")
# Create success alert
try:
alert_service = get_alert_service(db)
alert_metadata = response.get("cycle_response", {}) if isinstance(response, dict) else {}
alert_service.create_schedule_completed_alert(
schedule_id=action.id,
action_type=action.action_type,
unit_id=unit_id,
project_id=action.project_id,
location_id=action.location_id,
metadata=alert_metadata,
)
except Exception as alert_err:
logger.warning(f"Failed to create success alert: {alert_err}")
except Exception as e:
# Mark action as failed
action.execution_status = "failed"
@@ -207,6 +223,20 @@ class SchedulerService:
print(f"✗ Action {action.id} failed: {e}")
# Create failure alert
try:
alert_service = get_alert_service(db)
alert_service.create_schedule_failed_alert(
schedule_id=action.id,
action_type=action.action_type,
unit_id=unit_id if 'unit_id' in dir() else action.unit_id,
error_message=str(e),
project_id=action.project_id,
location_id=action.location_id,
)
except Exception as alert_err:
logger.warning(f"Failed to create failure alert: {alert_err}")
return result
async def _execute_start(
@@ -215,35 +245,19 @@ class SchedulerService:
unit_id: str,
db: Session,
) -> Dict[str, Any]:
"""Execute a 'start' action."""
# Parse action notes for automation settings
auto_increment_index = False
try:
if action.notes:
notes_data = json.loads(action.notes)
auto_increment_index = notes_data.get("auto_increment_index", False)
except json.JSONDecodeError:
pass # Notes is plain text, not JSON
"""Execute a 'start' action using the start_cycle command.
# If auto_increment_index is enabled, increment the store index before starting
increment_response = None
if auto_increment_index and action.device_type == "slm":
try:
logger.info(f"Auto-incrementing store index for unit {unit_id}")
increment_response = await self.device_controller.increment_index(
unit_id,
action.device_type,
)
logger.info(f"Index incremented: {increment_response}")
except Exception as e:
logger.warning(f"Failed to increment index for {unit_id}: {e}")
# Continue with start anyway - don't fail the whole action
# Start recording via device controller
response = await self.device_controller.start_recording(
start_cycle handles:
1. Sync device clock to server time
2. Find next safe index (with overwrite protection)
3. Start measurement
"""
# Execute the full start cycle via device controller
# SLMM handles clock sync, index increment, and start
cycle_response = await self.device_controller.start_cycle(
unit_id,
action.device_type,
config={},
sync_clock=True,
)
# Create recording session
@@ -257,8 +271,7 @@ class SchedulerService:
status="recording",
session_metadata=json.dumps({
"scheduled_action_id": action.id,
"auto_increment_index": auto_increment_index,
"increment_response": increment_response,
"cycle_response": cycle_response,
}),
)
db.add(session)
@@ -266,9 +279,7 @@ class SchedulerService:
return {
"status": "started",
"session_id": session.id,
"device_response": response,
"index_incremented": auto_increment_index,
"increment_response": increment_response,
"cycle_response": cycle_response,
}
async def _execute_stop(
@@ -277,11 +288,29 @@ class SchedulerService:
unit_id: str,
db: Session,
) -> Dict[str, Any]:
"""Execute a 'stop' action."""
# Stop recording via device controller
response = await self.device_controller.stop_recording(
"""Execute a 'stop' action using the stop_cycle command.
stop_cycle handles:
1. Stop measurement
2. Enable FTP
3. Download measurement folder
4. Verify download
"""
# Parse notes for download preference
include_download = True
try:
if action.notes:
notes_data = json.loads(action.notes)
include_download = notes_data.get("include_download", True)
except json.JSONDecodeError:
pass # Notes is plain text, not JSON
# Execute the full stop cycle via device controller
# SLMM handles stop, FTP enable, and download
cycle_response = await self.device_controller.stop_cycle(
unit_id,
action.device_type,
download=include_download,
)
# Find and update the active recording session
@@ -299,11 +328,20 @@ class SchedulerService:
active_session.duration_seconds = int(
(active_session.stopped_at - active_session.started_at).total_seconds()
)
# Store download info in session metadata
if cycle_response.get("download_success"):
try:
metadata = json.loads(active_session.session_metadata or "{}")
metadata["downloaded_folder"] = cycle_response.get("downloaded_folder")
metadata["local_path"] = cycle_response.get("local_path")
active_session.session_metadata = json.dumps(metadata)
except json.JSONDecodeError:
pass
return {
"status": "stopped",
"session_id": active_session.id if active_session else None,
"device_response": response,
"cycle_response": cycle_response,
}
async def _execute_download(