From 65ea0920db01e8fbf92036f843ef0e347296144c Mon Sep 17 00:00:00 2001 From: serversdwn Date: Wed, 21 Jan 2026 23:11:58 +0000 Subject: [PATCH] Feat: Scheduler implemented, WIP --- assets/terra-view-icon_large.png | Bin 0 -> 37300 bytes backend/main.py | 19 +- backend/migrate_add_auto_increment_index.py | 67 +++ backend/models.py | 90 +++ backend/routers/alerts.py | 327 +++++++++++ backend/routers/project_locations.py | 34 ++ backend/routers/projects.py | 1 + backend/routers/recurring_schedules.py | 458 +++++++++++++++ backend/services/alert_service.py | 407 +++++++++++++ backend/services/device_controller.py | 70 +++ backend/services/device_status_monitor.py | 184 ++++++ .../services/recurring_schedule_service.py | 550 ++++++++++++++++++ backend/services/scheduler.py | 143 ++++- backend/services/slmm_client.py | 185 ++++++ templates/partials/alerts/alert_dropdown.html | 87 +++ templates/partials/alerts/alert_list.html | 125 ++++ .../projects/recurring_schedule_list.html | 151 +++++ .../partials/projects/schedule_calendar.html | 231 ++++++++ .../partials/projects/schedule_interval.html | 158 +++++ templates/projects/detail.html | 410 ++++++++++++- 20 files changed, 3682 insertions(+), 15 deletions(-) create mode 100644 assets/terra-view-icon_large.png create mode 100644 backend/migrate_add_auto_increment_index.py create mode 100644 backend/routers/alerts.py create mode 100644 backend/routers/recurring_schedules.py create mode 100644 backend/services/alert_service.py create mode 100644 backend/services/device_status_monitor.py create mode 100644 backend/services/recurring_schedule_service.py create mode 100644 templates/partials/alerts/alert_dropdown.html create mode 100644 templates/partials/alerts/alert_list.html create mode 100644 templates/partials/projects/recurring_schedule_list.html create mode 100644 templates/partials/projects/schedule_calendar.html create mode 100644 templates/partials/projects/schedule_interval.html diff --git a/assets/terra-view-icon_large.png b/assets/terra-view-icon_large.png new file mode 100644 index 0000000000000000000000000000000000000000..51881a92851321d331e71ab351258175617e90d5 GIT binary patch literal 37300 zcmW(+15{*h8_%0L%9y{C|93hQ&b@QHz4v{d z=cgi6m1WS7iIJh8pwQ%GCDoyzpdsHvLm?tSz8E-{Swca@d&^0RYkFp#W_tu^F5Eqe zJ|rKl>I%wgiQ!5?dpiJ^y%jh(I<)6HIv&=XCqJL%FoA6l6x8GHvEtH=UBpt^iUNV#>#xOa z_8dzj1C7~6jjldYr5I#d`$KKv9L$z!E(NPkGVyONu~%a`MDaJE^OeZ#-xB=cq`8r7 zhJ>wyYzJ9ZS~G9=H`}U+*T0$%g&zV^s`7jLH?vtS*y7eu=r%7x^iPpkMS(Yw40E=^~*Fz)-#C`pXv9H?L7cxgLTHgWkg zaJzgqC3+f_FB*fJ6;rTMD^~d${}_Hs5eX_JSYbshp5HuUX@0!+GGee4h`c30aSo0d zIs9RI@A?`gQAUuj(|fGct6(ldt1#9fsqcOxMKC<5aT0I6;^s~Eala%e>#!8^ZYu;E zS$J$vD_wqC{>XaIi}3bF9x9BO zGLHBI6ywahWIPLs5xK<^_%`k9uV9c1H(XOLzVbu)ZMx0QZNcx^L4Z$);Cicguc}rO zXbOuJr+~^dCzqxTiR% zd0XhocvW)*=vf6)3uZf#_prlE1s+3rd(m>*U*n}3?>pOBf1YzMw`6Pt zZ`4xUren2*sp7%{2egfuakVUrTIxT!eP}bB3*@DudUOaVzV%YfJxd1O%d6$K)+!Lp zmD-Y|#&vMvckq-Lat0)X8{;;$;aWV75d-7zDoobl{SRrDH-+K6F3%daM+*|(_uG!v znTq!JXok%+`&h^J8QFe>A>A&jkBvdKcAQu_?bwUwLyy*3yL4_&{kHetg`URXC$PaC z7SN_OOTGJ1LtbJZb)YM~)sA<-uq0|zTz#082z{Go|3gPy#kFF?`Xdq9S0t2{Vg9ot zM)Gs#>21yJ^}1No1z&XK_tf<$*h7uspDv}jFO1}Q^wTFb&|d9}9qr%mGgkgJHZ&b@ zj1{VL3ab4h{FlghS*ykc)Zi%R&0o6*Sl9KYO7C?HCoXR+WNDj!0Y*imW~X?x zo`pXUKHHTwJB^G$sbiT9v?j^_n1(m|+orA^obe_87(WnqTa*nsk{dOd{Bydw_^XFY zPzQ>k)A5E09Q=As2otMa;<&MPEC9;m&23p+(hA01^q?o-B}YB|elrH+$fhHakhzvx z-bG<5JsgiC=&ww16mP;Ao8;g1iMmbq9$%{Lw|&J z)xK*x&*vIvxDIhpn9=6IJBs5?W2cUb=}64rLTSS@Vft`&#AbtKd#O!j6&2#R0W$!AVgxOGC}=5}oe~siD#6SkmzEIB zaO$oUVH(6xbHYv=N?XGm$k5~bUc|*OvHYf%T<@RVHGe60z~)W)tO+;OaEEKnOhNmA z|L2xz0&j=gkg_(_R>*7BBR{)4Ve53XIq!_)F5n3d|`Ss|NK;*h;-=qMjrGD3T8rQ8Y(kionbYtdH43UJPq zuv1k%CrI*w#}g^L7uOY|`WF(1PQ~`v!RTk$^E*J5Mb&_4vw;if?!W?_BAJ2Bz%}5Z zeX5Q7@biX#wi5+jksVErpAPg(-n~KB)~;5kX=v+bHXX$U$KN#o$hzs}bgrigNfWO~ z3OrG88tx@IVwT6Z=%nVFu%W7gbs9^i>;9hRvF2U=C+G~9YnXP@Uwmf@Tvo&|W(h?b z0;Qme+eA-YY&XoUwp!nvggpi`Mym?Ksr{2UhCY~&EKL09CO7?C^#~`pU6m}D%7PeA zBt)D_PDxgEPrD2qnei1FPXu_w@W?YAe9Dd~Ym3yI1YnWX<19u9PZ!Y%k&>Z)DXNEAQl%#~TrUh%!GY^g#ZoQ1MghOFHyfDhw2VS)A=5Ac zzKp^B9K!{}z~r*pZie-d7Fn#)qRy2Y597w9Bq1``ok`(Yx2^{|O4d5_3BQ;o&}-A_ zG|WjLil;A~Ygy5lO=%ma3hCwGEf~Jl_TQy68B8Ad){`6rWvMGs)X$|(lb}*A@yn(U zLdjOZ`Ba8-=*)Lgg1AheD$9R_hY(lz(@-9I-L6jZa|c`AAxGj<*M3Ez z3@W9<602K)MJdK#l{T>IH}-6F_xgIh@rJzfaN z1OvF^W?gqs01}A7)0mN;sM^cG6jCVpj8vKfQQ#2v3EK$p$okiXj zcC$=AjaUAlD1vy1oHC7Dv+o2tAwjm6wX}@Pkhj=Q6cG-trg?zEIOh7ph&^0Ch%?rX z|8$bl%}Vy+-s}wdhq=t8E=ar8fy3^+aJ(M4aw-9TN(Q;FS}g$X{BOxmJqkDsXhrpy z5>jfyWqODc8WAPh1xH$kKX@`qiKS5f&bFp^%La2KD5XUppcY0*)KQDAZ#R=PEg&(A>}mK%PJ>6Q2d%EsQAS?NKz4-&(5veO))J+5)Z$Vvc2W&^^?E! zqhnK%ASEf?bQ5YGAHJ=}uO!-~O%io6^!P@7R1*qXAA;u;XnvyrR+1z6kz@JqQyjzT zRVYQ6ygmAInOk^hS}ZaTsJ**nkwbjAhE);^qiT1~ngb|bgT56+c>3^NExTi1odIyr z(xge$=^Ve@Ll#U8Y`foiIB_$*u~_soeJ#c z6(OWc3W2+`OZa^OI%wg+{|O_X0f?ixN0=e_-bRSxJ%WGc^J_22pg$)TI?lLYeDz!e za7+-C)tB6o>h`+;mCr={ixCG?epGEsOI@MH7(ksEaUga`h}75w)o2sQt%4RwEQW>71-V7EHdTCmN^(@4prNoM$qCcgi)>ZtWGB;-`;8Z_~p~? zO{~n#JSFbJ;@uxON>yBp5-ciwT@~*aM(i50w6Eiq$P(~L+(P1&_%}Kek?^HyB(ba+PEJ0q2KW@ax>zs&TVWpq(2EF|R#T zpPnnk0h_3gls`8=L%^QZ6~OIA48$f@`t*uV`FV(Bs@c!qCR3hK0q&SsjgJ&Dx}?1F zyB`x*>@S#XS7~QFsv!VS23*JGHY@*K3}Sqdq=w{vaTyT>=ju@_-@rL^v3OI(oz!;b zSN}=2mXnG16+;)Ta-qZbQPN1Q89|PK+g?LA5%a5ks^`gAxD+C(C7ZGefgCYwPKlZ7 zaX9zo9R1%x)ZEl(zd`nTikzLF@|%D7@Xn+Lxv>SoWyuaYqB1L7x0rXV`S``lI?(GGIZOA&GeNkhCMJ~~T!y2|7)J29+*Vg>12P&Ll>rz#Hc)*A{kvJ91)S*M} z?@#czX@1acZ&|2$gopK>QMjedT=IFRm|z=a@rALtuWM9*1z%Vy?l?LnG`6|L=Ny)O zCP4ZhWCB#H zh5gA)v)Gz;Vt*pBlLKmPsAjB8%mZ;H^X0RB=*HR?ws9QK?LZzGnbyw!; z{8ama1es{MPNaAZ^!#R>AdGgBCN|qUv0=cS+f77$mAD$rzm2O0=)?Y7Kn67kQD3oa zA?=~qnQkF1co$m=`1l%xr=O7Js?WS9I6l4Rq6Wqc&(d6O@UhhWy3({G?^a3s$;eCf z@D~x|yjFy-JI$`zv5CyPvxJ(u@wbd*cKoP9Ffw`UP-SUUqXz%JU$NvN$-*BU$jEvL zgGMaEE3AD6Fn(9==A=ePpw=2xnxKlCKl2KL)F2pJ%*_2$$9o_ts+jyVuAG7*XNmgu z5gRzYzA!diS1AdWKCrNf@qL^GUYj|S`x{wgePNH!GmQ2Y8d zr7Z{M?X(l1It^Z&Xq9K$JV2#vXhVV@i&p>2o1Bq4 z50@_Y%88LUDOTpaJJW(g3rziM)=EZ=s)Q9)-p6h9`Xk(V-5m#^J)zhpG0V>i+Ys*` zN;$rS2_XW{!U8G<2Pq~x$LcUcWcKu5S_COzqbv$g!bA_B!uOV=MyKK-O{}!@9_=6b zj|f#$*-gF2TA30xKa{GMbaLWOUDGMqn7Y~hTN036}U-X@A zcf+p6j8qnG%>FF&amIYEh|FX?a*EbJ7tdBQ+rCr9^gT!hZ@wYj=NL%wU~ob+YoCX_ zYUFsFYsS3Jh~#yc)YrYfPE11bw6fGW>hN*FM;IDWQeNJq*+5g+20?=f3|G2pxY!YJ zL!{m?6n>9XeqEwp$PUR4&q53cHKp=9I9D}VVseP$ z%;Z+5X|OuigCxNc;=!0c2SS^Bd#ME18{QXjio}PlgD)3s2avy(78BJ(w-TV&oVxzg z%;qF?yLh-30z-V+#I*HVI$ltUl8Y*w5x_=Ahi{<%i+Sp7ioQP=FpzbV@1}iuD+uk- zA9Wf!Fle=Di!Y)Ewp{!nROw6wY!URMMx1N&zfoof9}!~+6`F=|%S!ZMg26j?KUB3A zo?P7hmjY;QL-T<0@|>5R!k)r{wJM0l`)_(HVxy$~E#ipc)bU$AVdOBBAVNh#(HxqX zEM;#%5V(x!;x|~H8{70eOCTvZBu*Xsm$cV$`BQy;J!JpRxCzm0$WZu3o3@@z>LzAa zAH|S?n>+8?4GsqAo!?hll8w3uDErRn(YXI)iCt1MbHymijBLYTqo~T!Wytk6765;& z2o+=KQ;kSAQvff`%HswQ<0vr!>;mPsfD`kHdd9QY>lXe}(!(#>;>7s$6lK^3WhVR7 z5ZK;Sa#XQd9jI;W{p%ZbL;-NJhrZ|bXpVsi1-cqENB8xQUbfye7@VL;2ePx1nmOHE zt7X+mN!GB%2T9AM_P8c}nyVix){Mi2ZAwpBjY-El0|ZxNm=#CoDkGFCzfFCPDzU&u zh1Y$&mTZ8-=Z98qzILwpulXWJWlNs9qEkEi`OkX6L)qZWztS9nASaZS zKO=UftBhi)N+^($#zIvt`7!JzOGHoWSmertG%E=dDcD}%UYwEvD*wplK4ydONpku* zX(67+;jh$WfQ<-mvo@9%gI>HYFMeeHlnnB~wommb5q{hwWmmy++`_ll89g6yAv8-baUZ z^e}g&)iUh%YdR_IHNmaVf^^D+7y%$%ZXVv5X`R@mligo7R^xb(N>ZYZuZ~#akU*v){QLtqvH&w`sJyDG$uLp*L2(SQ^}q#g z#0TlhBDvk&HQE)X9*TIIyXJdn1_<|KAZHkbWcM}gE=31n$6=DG_sP@0R{1A_Ryl6G+xP5M9ruiWT^J0ssfIrURvR65 zr~IF|xU9x0(-oZ`w`FQACy1qgkq>p}9g5;=B4v#XP;I^P%fPz(z6?k9q*A&naXewo(d8>2V@m;L2J8n`LZ58+kF za)>jaVzWCxj65SD2r45Z z)AO~4{&5>kw4kLty~q61&1@8#s*zDl0nq0}e{-MB|BYl}VL^7Rpn+u^X^3JfQAIeW zfU&-z(doLpw8ml#Uzi*NjLBWkef9IgKlZ}sU{#jkZ?h8(eYTIDyxR!8mvC`*PBGiT zBuO!F9dQ{{gP*4(RR#pJsoanuuHZ+xUQ;E9iMDl|cD1|R)BUSuv@EwiDuGv!7lyEB z+ywOe@5uE7+aWye8BT!ciqYz{e}^hzRKQ}A9WV-L4~~dmjzNwtam{UM^Qmidt+yWu z2UB8ter-JGvo{nWuPx!nji@N(33UFn=8h~t45mhed-d!v@)4nEXlQsn*$xwUSZiUR zu4dNny?pU4f3Td)7-^@^{k9)Xgf4;{3Sx(pBmTI58g+A}SeYu;i;->2eM1 zSZx`&Vy7?Use}cUtD)8K zG*y=Sak=CQO<82n_A=xCWcJUggT9N0n-!a=vemjzvGfz9m4 zB55jrDEi-(*Tiw`AbU1E~{+iHV6aaFFS8dWz!QzuGaj-6Rba1e3nOU+pcf3}|R* zh|m$=uo-AWO)nd&{F1R~WR?Y=_9rTgIx_b6=_=c=<>{0J5d$E7)MbJvcc-i{#&WSj z*XQhHladrYQ5HbR^Bp($dA`&dqp8@7OL8z*)wV%KKBDsE^=0#8u$Gy5%xt;Zl+=IC zDY(y+TZx7|%4ws;MU4lK$^-($nf2<^REO_xR;7h#X$^@&Bfvn&X)(e(vhTO0s~Wy- zPbvCo9J|06!DF2mfqPGURv-AWgz*fXL=uNryb;MP$Rl0Lp<)ntE-NJDaE80`B%R}C zV5-`Jph?+ZksVjlJl$89y$26mx_H?k4*%K2X*RJvf2*noVuul#S6FR+Mo% z%IYzsENvhBtEuPZjp==(7F{>(>nhuK4ZKvhaAo^)dd8A&5`A>y?b<*1rSA7 z%x5uE@)(1CkQuZ@$ijrtO8rJfaN^zT*@>f^x}gi72A8^GEOf0i2A4YD(d@g1IcP&1Gk#loK+_vwTL&`wKWA!^TPEE&ZhV<=ngZ9mRMn@YA{Z+&+u# zT66lULtAnf`Ew~*C=)H0)uh;*o8nwi$7e+VHWhZn^u~t$ht#S6?iGK&YIsvBi}?ar zs|?)HW8m2Hv?VK|rKL5Vy-M{ymHB`R!3CbKv3<67p*uSlE83`iTlAt&97a_HRB--} zyLPXapF~Y=QYN^0`08#a_*(WeRMfPaK8@8j-_|PdZw1r{-_2`4kS&a=hDKk_5a}0T zd>Z^Hr$^H;uA@9Ts7D_WF~@l|84k-+LhLKJ2+tr5WdV4@B`k^R?+an=Hae5aOKPWH|Orlfv4#P0G6Sj}WxQ!dBp3AFz1N5&RnjSxeY#g(nniXbkf z7E#~ia+~E+R#}c<@)nB5Z1gJliIklck3mPva3JircAd)st8@%$&wROtfXS`(qQi3B zcXcf-_p_A&DFVaZ)&`sM3UKei&0nK%Zu@c|<5+6dRzVc&e>|sF6&Nyif4184kmK)o z@%ckc%Wd#irWiXRfDjowN?OC0KtlDaQK*GpGzDMG=Iee6sy1NxxvK#2{q>-$cI&7# zVrW;G#~(V3)EQA-URhud7oC3#s?m16_iBi2sz@$TUFbDYWwLJFhv6Y}JWWSwj(q|y zk)EQryfi0aB_h}V-76HGBx3Wz1@J4Mn~$%?dGAw}`$~Cm5LJxmJ3qPSmYejRigA{&F`2EwK;c5mlYRJN}n zpK;K`h`7smS$OkhiGgN4qZKNjyU3aYUKs-=Z3*(PUEBPum}&7dzJ?C*Maw>#2k+2y zNJ`JV2NW>mDv$rYGfgTdXI3zyb|T9J!?18H&jZrSS4w8=-5B)M<#rL&Gs*`XGU)n3 z(RS|!si_7l|r{7_K!H${V zCoVsSv?P!n$aHwRr+*iTL`sSZ1R(nS3{}~B%&08-=87)tNX^5;6EibosaIlJ@SQP| z#!XXG)A4$Qe(&rV(peRm@oBa&*!|yOIzKutsDn+W^Te|pd#~2KFFt4a-k&DG7m?I&aWorY2!aklTN4{~=tm(BAuI8H4PkW_Ro@JF@1UkNAO-J|YzD_^jZYTkOa9c+P+0iEyC)DK+ruxwe0Us}IK z{^@T%Lkh=C4n;~$4L1+pB(El}%S-3S! z=;NMmFqT4wGyU6BB;M-|LD#pF6qbH$B=?K8kyyW1n$Cyy3z$a0*4+=?Ccn>IR^)r6 z+huy&ntVzS#T zGQ$L#$j^G+w;WV6Czj^=g@WeLEiSiuvOISpvV3Mo7=&{?caT>4OkaN@m=b1x*@+4& zSX3WE>9B{D<$!^fUG2t3VjH3$-INkX(L%#!%#~K?Q*eig<2L+*yK-?}X;>{m5OI4U zASNFX1Sl)3CyvmJztxC!GsTJHminXo1lQ7wU#giQkLU4usjDY}J$9Y$#hWN$wjzRAaeGp44Wk&zL1#KMom2(6{1F0LFX z7BH0SnMLt-7?zZp%Ju!$+P-Da(z}AT{eFcS0*$^%NePt1f9SNVPOi9}{H?RX`1uKg z4i|Z5dU=>@ikT7@+|7;}OdT%W8$3E{2Wvzv4b%*rI)G>J+E2G?e!U)pdb}+a3E7Nr zUTX&7RCT>07*)m6=XwS?_`m3Tc98pB{=DEY?Un4dw7tp6h!w?@ZP9a4@xz~e64V4h z-=-6E-|y-pZqpL$$}qP=OI-z3tMeyZ$qm#d1WRbl+!4niyw$F!_A)pN=m}}j&jd@z z&P3=$D4KU@$ne|ygJN_*{hE?{<#IBg@Zkk-#`Iw{{SxAg4W-o6I) zU|#82ub`0fw<*O-;u;Xp3%ka zkgaWhkt}4JbFqz%B;Ggrz=Gq`<<4vZBTtS!_?EqtOn#Gbot4$qcy;WkK|(%zm?$If zU$hEF2Z!b{Qfj!&W7L?aSEIkHig%dNMPD7!g{SGx358zJ*p3pFJ)@Y=&uOy^90x*?ceTN`h7+HJlbWh$$v_y_)72_XJ; z9m3`pQCrK&JSM+jv&X3_yQBm>E9>BvhPpQ;%AFd5U`0uDa~wRpJz0jQ6XTE33%?g3 z_tUiz$N+lW=j-e_oGFYL*g0$sO{Ob25MIqaHeJ-wOL)4;wLD*INh?Wr_O)McO@=IL zyy~>TuF8;F=INEo?(5f zhhssly~X;o-;@>B6!jn6)!LnWHTrR6!P<7J4pDt03-V3`Z}unZXbGe+BkFHnUDzD| zVaY7F5aYsYX{C$$i%3&PP~$Skv*P1dSEp9HbGF<2mc#t}u7`*LF%|VPH;W3M0nES2 zf+gnw^Hd-z;w(d6Ipb2LS6z?!Kxp#<=lnewGMG$}D z+S?=&W|_cy(YWO$<(fEJP0lh4A?YEm0-Up ze==|y*o&E>XtY}$OrTXjg8KD_Fj2 zU`AR({e{yamepL4(Lcc!?%aNa-L0-5%-h*-b~l z!t!GyCQza*JM_d$ZY~^brY`-x0PYP69;o~pk&^sUq##_wmuB_2Oe=xIilEsh6PYV3;Phs1-cgI4wgB@h}2qD7=!|&Ra9TaRyFZ(8tSG zLxwh_;YuU+?6YK^{mABpZ@gm9k-fZTs#-?bOgR19L1MV1A=G4%0cp55L!uj_mFsO! z*QuSufuEEmwX}{&h;>?TS^VBkjQ^t(nqRf3F?}w3KDoOZpdU5$wWr;+B|#b|q#$~X zKYq{;k@+)2c%tE)*CT|XW}YkKP!}6g5j1_%HRC<6|HF$VKFo=UO zgKhwXzgp(b-@d$6=(Hq+y+2j^=E)UpT!q4p)&+9j;nhv20LTvVbRl$CnUof$$ zQ+o2>#eMuPsn)cbA|I%|5gCc`v9-j$w2ctN+&RAth?l(6jW&h?j0|xxy@e>>P{laY z6|-5iaRV#8Da|qP$};3_Y?x9kI6N;+z`w!MuD==^*}d;iAq=qyy{0)BJ9a1=Rt83C zZkn5A0)Lg`3F#ob!a`2CHO8z|Mk!%tRIN@_`HHnJQn58;H^ztjyi z`r>*TJcW&@&(7wTmgeTy6e%&g5AH1BPM~DRsPUs#<3pFR!pMSI#8>oCz8BP)12}g}h_iLJ1%-Pw*@*BurHAP>YwPSqhAh%*g_^Ebh=s20X5+e(5?J%=*OG$U*Q2n3Z_vBim78<;Wt$gg zgwh_0vs?&cw-TItH=l+~vfXk2aw`cNv59n1%&{-mdZDD$a|zkB%DcJoq*$1yS-S55 zBah3d-EC1g*Gs6fKA7NM1G)dEM%R)3Rjnn?EJmjkO2bbQ05orLGO(@QWDdxv#qK#L2r||6!ud+`<%?E%ZWW9C+D8Um4 z5(|9shaQtxD(=cN(ohV)t28l)~iXn*+H!kb_Ot%kTjIxhe1&nB}+nfLW=eCIqZ21}|e=4yHaiM`{ zxIP`y!#+(ZG*X_JSlB`v3nn%wIkfYP@9QiloX2a6kn;BXw{zI+u>3UWngKUHAxQFG zONzG7K6h~>8XuWE*?IFWIG}cPrAg=kM#@qvpAk6{b6x-I=YVL&RyFOZCiQeVaw7j0 z%uzxj6e`yu#p!x0a=VEtnqY5e>mglhD$OE`yfUiqVTM6NLU3acT{BQU4}-t9n-^;F z*sncOezul@XG%>-=%R96D-w%l{ z0Y!C$8HxQj6|_!(-X*mv#)Fw@5ZQ!xA)Zw}~nuQ3Xm{3T}+PA(=+%fNyv z1Mj$54G0!?_h@RACvEb;a>sGJ+3~;U%C(#oPYfS0mn_86)RI!uWFGr>?tNZdUjEm~ zUpL(YR?rHzmd!AJ7?m%NhVR5b2B`aAZE zWzp^Y-nm>K1j+p8V4>#-r{_YnC%Ej@pf%S$UZeEg1iyExRbfVGDuDFtzZILR?LSQ$ zW_{0Sk3O9-eQJ@zwZ6-78=FR-pEEo1S}CXB)k@mFsobMP9@FEmimk_dim301EwH8( ze654QV?e{D?anhz-9l09=j;u)%@k9+)`r$>rOJ>42nKC;a;m~z;leti8p5C=CHsXF z)3vSA=G>YLvtGIp+z#0+UhdEGd7Yd!2^)hm&L7=ZSIH;(*@E-qX%I`b```CPW!v{M!q(f3~x$eY#ua{w{M8av1%n5i|Cig$H0tD*e9-n`L&R@oVYFy;i%h1ny z!8jFy8yy>qm(|l>g`CiU@sHnyC72RFYG!TCcGuwpk)NTt7(i-Y+EP4l)v}6}c(U}z zY9!NPCqizxuExj#mW{t@hK%* zb4x(({7Ogo*VftdDrEZD+taO)D^K`t(=Oc=Fk6neBl_L@d*dS;1icib5&8-3HDD+V zf-1a2bzTrg!OD^k^OP37<#N!VJj7)2FH?o(mC^T$7VcSMB1MsIsow}#bXadJG~|Q8 zVS1n8BrsFyLhDtfh9a7Rks8DiWmxpgg92E&)(#A}%Rs~m8#)rRvSzvoH&a5f|1rg; zO+9%^)lga%biqSP^eq1P>^AosNL(Wy#F$aSVfzSdv|AtD4kJ_MC#754h$#Wr*Ee#S z529}PfB2eR5!S_I3~a6R{09^lersDjju|=@M|f;_g-!W9b5*<_(qKpMr!WxlIO87V zc#vN}sApt>AoiEw+G}TJp*zi@Bxc=fx(SvEh;eFGLugc$ny4&-)U`PUhETnmeHK)5 z;WQoQn3C^VZj0$~u;LpAtx1D}2<=8}HP+E`iRflK!4VJ{yXN*!leiub+zX*j|9soI43uf*VqWggov(*vi;)S!)i1jTWb@Dtrktpk-%fKRBgIT2jeFS z$wtHu3A|qR5ea+;w7H+}bUbV}(F#x-^s((>2Zb}1>O#0Kss)#%voxY?b#C4(`D z#5`1MF~)!1yvq;K^IM+wGt0`#wn&UW5J(oQb|H*=KNb>pkV`Cey#HGVa&oXKB$^?RBy zuD+cph9sq;Re`dcL{_u^`UcUWi3K8aA&2Id7BnDn zhY$jz?sbpNz6>54NEXNEMt?IeKR+;FY6@~-IM)BwywQHW*GlAuDCYA$AtcB$Fk-H< zb33Pepv|`tZlUM1OJZF5hegrGhD2S@!27J>b)WBJs6eX9`KUOR9YTLQ@BTP&cn)LQ zKlWf;KE3_7Mr|~qS%xG5LIl8ZsO!^!mL*#beVFJ13^D$(cP`Wjdv`iqtCAC9hypW@E1jF0nlA~y%uvy^g;OMU1CfzyEWDV4^M?oO~R@}7hZsa-_2^4 z_jwDm-fg6d*>pd5qR+X@Qoc2#@#}1A{Oq^KODxEKOHeD6=5SeLp6uF!>9u3g)W+yN zxye8JVYQQ56kXM*ExCj72yq$qKHi>}WXHCGP}uehqS!2Zg1jKC9WR=QE9IoBD@T@F z0uoT%9g3AvS$E8eYIf!b-U!?QK&4-)oQIW9wZxxbpJic(zypDL0iHf2Zf)d^eLtD3 z^1VYT|G>fEjD=$&Doc>4DfQ*R1I$)cHARVjXw>Yc*sU@RVls+ghAkPRTw zOt6F}<+TOK4qeD=6q~ zbOR**mjo;U*G3k_=asY~7%r)4B})qT=sR>sl~usZaWA)9pqLLvJC$_T(qM)3{u@bb zxocdVas4%V{k?aaH>FA^P-e~xB3`U{Ty(U*Ka())R0sSb;e#a8T!?Q2lGeL(h(}h( zHU@XEwjPq}>{fZ1$0W7fN>p+u{mGv&M!p46eF?>k9eQ$H2sfd^EZ8iGb0(Bsy(2n!;ZG*{tj-e6bGCZQ<6+&0W)VUvt&caxr3Z> zY)rTIoU^O0InuH24n2Tg0*4djyfEhYS$3B5iUHY_YbO49EJxdiT&KqH@=7_vc&Z&= zSi2+JQI;Sj%c`8~1(M8|?;k@UP*>B9i0AgaTUc6daru;-oGe+W#ylsX#Zwf$y&+Wg zoqR=4UjWj)A@WIAKcr__-JfnKX}RHP8@u5_QgtG$oAXJW#B|t5xK>SbpZ(q;{-1L% z5`DGJfpU)5J_sfdQ(f=A#_AMS+M)A~?n-A(M2jGz+GU9~x+rjIw}$0aS>73>%MjIx ziEI>`3UP6m$9hjl(~#i5ygo1ST^;|fsk z0d3|4CjoYlTn&e6$CET^tJC|Z{O&bkKJV+q2)*;$w;nCMUM=c-?f&As{Lx;Xv{PQ3 zV9~FjdsplmRRd;J?zlIE%_uPy)DF;12{9#vs1P;Q;{s_#2i5RJdQ8>U@?k+n0UHZf zZ3r6j3A4Qb8Y)c$-pFGav+0%g@K&;inZ>FdH@PH2oIJ>M{;|R!bqvbmt-GY ztb1%w5u(=Eum8x=y~Je`{y+s&vuB|QL!v#sEjoRbVp}tAaQ7n+#domA4|0;xB`&0; zg=`)`YzWePpzV6&ucl4E%!f_?T! zNs+#vU*PbZHZk&i*ZuEG`i-{vkOG0kL2BJjcq?B{tq_~SK-ShKkUE3N%^{MU|HX7^ zCb3r$zsp5|!Qa8Zf46+Rzj*py*+A$IyU#K^Jx64po7AA;ytQ7~gP6vdB-cgf3wiF_ zxRsjLujl6W^+Dk(8h_7B!Z-P_%?k| zAXe4>{i?(6q|n8`EBc(G+S#u%MeuSo>ECB!OcYUCVG<(X@>PkZgl|xB=oPoB{NGUi z3)Mjy?RK`u=5DgKF(eW8jPqY=uIq7+t-QQkPT^aIzgna+h5S#W7V>u7KFLa7LrPiM zpEW;djsFNl+kNHo|6?pQ&N&m3FuHP_GWAFYH!BZ`b9`<{(jrx6=N>@Egux7w9dphg zAu!Egxle~3p0?wxW^H)WN|!rd?#P0(e1K)CeKLx1X!!YQZe>=2 z8UKrxR_``DjZDnH?g$)^Z7o~1YSzu+g4O~=wKf$P@@HaXyn1Rs-RK}unv(@!Kf*$z z=eiSKNoMVhrE2Kv%IWJjYb3}Exu_C}RE0f5nEz&_*q)h|!~yG&aT_ksDE;{DL%? zw)_sLvXTl0fP$pxPodGyU_5-!=oYS76icBK`rp4+wOXiBEa;tkw;-sM(9pndESOqe zM*hr>V|*~}kFU!16XXjOhJ8*;30Z}*{M&k<;OBoTsW>=SWy#gB-H@JWZb%7R8SxI5FgT&;{pA)MX1 zcEjiVLBz=T7nEPQ>j<1%WN+TXj@v8x6dKz^{|;qF<&9$;VDVn4@yoNXikvYk8HLLB zi5HcIj?$4S-(>O2rZ05k7M@^{G1&tpeTb-~*3Y@a{nanUCz4ct z#u^|OyjF8AC9yxnYyX(j zC_A`!SNQiS|2_TEga(iNu$MR#I?B)jKnM%mvV{ND}#d!?bMj|wm6-!^1 zCU5v`QO(g7Rc$~Qy0$y5-GGu$e2*Oi8|yKnS|YHw&zzLdlVOzS-stU{{@Mn2V=^|m z#N^2@BiTTwQsaQV{A;Nqo$w)vb2n#`g33pSB{mTz@{ZQ>+^x}@We%A;N$woyWTh(f z(}$3+^Er;^P1zrCQk~AC&(<%a3Yrh#p>|XxX$Z{B)J5x!xeJW58iRz57LkABqY#%k zC`*_AN$%!Sc!wa%ZW(h@c3twF-C$qXLC5a?jPci`$UV&Gvn``I&ZGECzO!!Lsr`kQBEzMF2hvS}Z+Y%P zieFq|Ix77?jlaF-=Fr?ac{uu99|&g2Xbfz7-NeufQO}CmIxshr@_R*sZZNa;+w5vtjOKh|h0C zOtfMVO>u(4=#gPz57b*Q3Q#pO^9okCB!(x>i$~Pt;ZpB1=7dsu!itR=gaTw`-g#~KUF*lf*# zdftW@VxP^E5T!%|!#(|&PL}(bmaBvqI@B2_UmjqUYxcf(3S%fbCWtwoXFf!+Fbv>h z8`c-NVL-A|Ot#Cf$7vhlMhhfFUh67((1j*`w?vuP?{k8$*vbi)@!uMfV39@o6%G4i z)mxixI$7ypgsw5iXk3PwH`fhCT0aq=IlkEO7-c{5b5c#WyKz1)URkq6?og0Sc2DOi z{lBtfxqh1skKf-e{VBY(XXSsKxp=I zt=TPoB9`77S8Rv8=Z$t1jj&A=_03#41J=#EWJEjF-HSSR;>85^{;FWq#i1La8FOWX z62;k-nyEYE(BB1{vld8N@k>JYHs6%*g{Qa ziun)q)d!#bichE*4<#Wsuo0Iqtt`XpHr{JIc(+=$(a~k=an`PbZcwOM0iEiDo9{IZ z?Lh&1w6bYaN+-sV8~-|?Sw4^Sf83c3X$n`ze2&H+8Mj5Ns3FtUD%?)WUeQ&Al{Mpv zcoI>0evVC(42xP*+nQlGo{*?c{;VBuhTqdhs7{s=cOR;E8t+gw6ZkbL>}aqXXs@b< ziTEM+nu>*&H0_@Oxs!#GW1Wp)m(Eu-x2g><@&F25Om443liZx*##)&^=qvGcVW%mq z9_%KJm}9+cD$EwOZ(8mwjfx>!u#h7HiLE@d{aRMM^XS%;KavYOKg~OS!fQP2>5Tpr zPz{fm{;z|a=&Jx!l;X#7MI~he^7*26%@Ho|rK(@`>=!bG6n|ruwHXmF@TFu}k%!Vq zdFx_-KV%3iU9J(+^%!dRORtF$Qa6~p8;D9is&~dEz=*N=7~seyM(jNQYHKmLa(E}C zN)SFf)L<}(1M#&dtlC@XZZ{k9apisDZVbVSD!*Yd-;{>;odB@;1YlhofWSW{K_?G(_@b zE60)BKR-V`ghOJ@6NUaQpjw7dQq$fxYbQDR_AkQe#qrQs+H3hrKwTm`{vn7QepqLNpTGroq&pvHBPUCNmZPtCWRF?6xa()?z%++8v3Sv+l&Pv_p8ot(Dq_N-^kC*ZSCX%tE>w8W`u;i?i;AayRcW>55N&{3y*YH2>$P*Sj}(Ae7$xgvYTLjF`8V%^a&(FAMEuaC7uOTM#`B|i{dTB`dycdxi0qRf(WMV4dkF)D*T`H>af zz{9EnQkBf+(4vi7-{)ld6Q`jMN}J8NTP+KOhlP1@dVvl_o^Y&*p;ovoN&$M}Ht>B3 z5ujBc8%-l+#?8~TKd(3$>bz}(d50CBXMABWM5BXt+FepemlHO`|L_hm`HVGtY6=5i zNY|l;?EJd^*wUBN`cRCG;x=75onPD4+RfV%>F|nL2BHD+GS61e{%m`(nmM$kgo zOz><7{#sby+tUIUi05$YP?}fM`l(Z$Fc310(41~>J%Nt*p(`eVnnJ>nv_=BW)0_} zEneIQA&lI_vs!Un9%NeRT*2-)!U$4%M&llmhP-{xJcOY>%pA`&NsD=lyf8NGVeZbBIKB*6HS*3l{+6}=)9i7KyY~Bt zd}MW|O2#z6g@-NPy|c0NuX;>t(^g#DH}OoQWNSBbm?Oi7!^VeihUM0$Pd(fYVfr?K zb6-&rvIKt$>hin0DkD1NPvts1$hoUGQq(v*|IQI_L96b1wpTcjqvN}1;TQN9F3|EL z-#X>PJ0r9u<`tAL!-OrzaR0RSDXm>=xs01-dO7zh z`n1;hX|k{7^&!r>w#hNX>XdSI>cK6Ms;RT!HPPU;BF!^(_{Qo8QN-FRsRb$Wy^Zxd zeiJRWN2tb6_W5l}2Xt6oZFN+68J}ri#m&Ur@ukb`)+ke!xvm}F9TPXOen2l2%&uGd zzz(V11by3MJj*%m-Fi+^p5cjy*x=|s#rn&7+b!(Jkz8A<@8=fsk4KE7bG#?LFB-!= z>l#0^km66hlBDQ8Pw%;o8UL~4Uj4S4Z}J=+kr(wAgWA=Ue;mxifnG5tSkZ2NHpW`f z9Zo)RIbyy$@$BbS&Y@gao6J>Q7Y?gVLO4{Y)3s(vJ&|U}$FToYcCxH4lh-+e?|r{v z#1+RnU(3@A6+J`W!#n6|NAdgTxW|8(lv~7I!b+>@M56h+(aNHJB-hXRIiKl|!2rjc z%grd)iEh;dac4ZYz#h{YKedH*73ajg)<+6W-vb^dIL7IM{uz&t<#lj>II{#F75h8C z-9@aL7tvnFnG+4adO4djp0r%u)o;#L<$bVxgmdd}z04}hFUEFog<|47lKHaeF`gRp z%pYUp#`-0l+BZWTM>^0T{SSZe$@G^(S!e1(Pi7(p+AA0o1kHyWtWk-DbH2x)9wc-o zu19E4`vR;*t{pnVo_}v|nXoJ#_Yn*kcGC@J*IET$lcQU(u{(9@-&IO-uwf`VmFCrx zFSNljaXL8fE!5#_-G=16E2c*~-}3<^C_&wQC~x@W*? zq0cex@+RG5e%Cl#g`^P^>iWCQfY0ss=;jgXBf-_Th#2|%BTe>O+mn;qn=C9=yX)>W zjl#y(jQMZf(IT@4X7Svl1tJwldAzJTUwNF|9{Gt*mE{-~Ms^rge*U~M-gM4Z54JpK zr{ST16pPf7ebhQS;DMmovdxyDhziS7rCV4{`~XoLEa@;bk!t-Enl|-A&NC8%)&fu^>eocDA(~)lFy9}(1hWg-sk1CmEQHf_(H8cGiI$oWVr-ZCr zZ{>oAQ}!esA#)zqGInQMDMIp?Ylf!hZ~+IMdQ@q7wi6FYR#=9fKcaA6+XP+1j++8w zNqh2_soH^LTY=TKxMp`!zgI0^W+ue`HLqz{Ca_w0T>9|7n*B75bN2q1wGmlyz|K$c ztTXuZjGK-=XcRx0y{20HSbe*4O&=ghi0j)ee6-vkQ!qE9#Kg&*V1~)0j8tccj%Xf` zK4(HBRG^^vu<3*wa-=SNvW}e?Cm7LQX-b9*6$n3s-+qMfFM!asmm&>J~xJ${*%gC4c zS1D^53bu#++RRZ1quwhVgO8QDEqsP^_-&(vI;Ac7Q?T@t&fAfk7gkBqU)-6F^XoDN zzNF)ou)zzzryD)R2#I!o9ZZG9^MRa~$9O6K;KtP{KLHO?f zP#*u^53LDNz~Qw+Ah*-v!~Vaoc>lk?R+sA1!&KT6OsBC#J6S@&>gfh#e1yoxDzHj^ z5i;={yYp0Mj}I+b?S~IHv$f6r_49;fuaCQ{-@&SZ(&P?d z&*zEzVeMXu6&c3m(|`T-b>H)J@jil&zlO462r&sGCUT+~dVh5-sU{|klseC!b?}K& zkx#M*x)@iLIQt;W_45R}@H;WMnXqxsQC<>7et0pskzm!9ESDp@Vz_MA_nPfqUngQh zGye0>;XYj$3&>fz{y;!iO!*f}YF^o|dXmYOF8c_aSudE*%0DN%$P{wJ5o4dHcvy1P zH?-Ab#D8)a_GnfZF!s9;Qc=>GAm>4{`WhtbY23VHz|gW$vPl4eVA>`{gvY$D=ID_D*%#LUP?sPJr~X+rCemE2hd0~QLm_Q(d& z%HQL^{gq51QZ^qF;ZP;4@@3v%;tyIzo;V4jR7+%)ge8u=UoL41Um;3t87r*_MG46! zB0)M&&Q4mGA4_7QpTr{NJSW0#T=EI4mg^Kkf|ONVOG zOJdU>XQs*~a5_Jcg;F@JTqWgW$%NFA0ZTZC$Is9 z`{zo7wHl@f7^)?q>nvF=B&%L$C0IE-Rb=S|6Y%a2m^U9`qs^QXF}_^|pck5ZnVVaL zseC0ZC>0n!S@~{RusCN9^c|Kz|TbJtP~Z=$}H zC$>f3Qva@8bhz&>+=!`tx~i-%gcu~l()d4mk@S0%>VLX5AF78TWYabfV#ts+AN;x;Q;NEZF z2q#hNssG5Ks`f6Lb?MW;{ijlLHe)24_MZmFUZfUV_UfrP1{{ChnPs~SMHip!KPQYR zVqp)%u&xI2oPWEDwTALO_K{`*JqUmz22M0%DOI(Rtd{g9P4(xAA7B0~55Sc;S2${6 zIyy44vsX_n5{N0mh?qmmrhW>=j7;s<4~{hGyz7{rP1v#eEg4YG4>KPmj2lk3TT7ip z${$*DE++a1e*?vD*Ks)_QZ=iv--Rv54eBA;TWp$$uN?+fFEap?1;2I9v8S>yAg#K4 ze+bjj9hx*w)u{Wyd5xl7ZhX2Hm&<5`vR*C{5!8j1i1 z>xeXjiXb$jjDt|JVHmFVDq8AFwCGIk5no%#EcclL8ynyHodU;d*o%K*GG7)da=QSW zdjw=sf#{T_L7_GL<@D3eu+|x5$(K_K61glhM6b4>Ql!pfC3HjydD0|~ zU|R12W4TX~&WqfTyKow?^O+G^6(#;47qx@CdiSucbMXO|E4zD=6+JW*+d`as-Kcqw zAEF*o8T!N*a{h6=47vRmhFXPlTVIAtl$Nz_|3k!e02B{NS!6X;fpK;bqSXeT2<+F3 z7VGrUKwJ`&L}#am7V)cwc7Corb260hx>uiutEk%J&)4&fZN(yasK?!zx$>_BNkLXD zL|{W58n;Gn|8zkwN@45u6z0nJa_4^MNzzP$GF9zwMTH@!_TAFlboP(!@^n_4S0vi;L9$PP!m_19gQ2 zU9N5IXT#OI*Y-pfrmH)Yl@cv?Cx3~NDg?asY!%9R$j^CVjVd`RZ_x#` zS2C!oIU-Ns47N5mT#;XKL)+{PvB(PC?!dX+@61fk#>L1s{$S$`|EdNP^WCq)u;gw` zL^rRJRS)j?C<=4y=kx5z4i!n$uvAJ8c{#cX(evc}&yHa19vC<2Zm}kTTg-lvD1(PY zYvisCmTbkseYY3F>|k!lCRpA=i4D#+Pv1nHr|88+Rtzcs;jC|olpmhfQEf*zfRv|NW9(SPUAz0HRZ5W9keigBI+R zAK-v4tf-|GR&affb&FKrbTg{WYb7*P34G(ge}O}#Lk`n)dqf^1Nrt}PDDEhS!nte% ztsrm?Gc%*M?Da@YfpvZea3MH= z>Vk#wLrtniLi!w;qk(XAs9juw=$xaD$3JQs*`)GUX=Sr@jC(*9=r1X22`Vrr0^TOR z*QZPFpF{SxhD?$K@K%Dp<5Y$WK_1d(?W;lynKq`=)a$rtCK*$Y#)b;7S?;|p_ z+OrY9>7$E#Iqu=gEn^2OW56a+p=}5Ev8e`*KwB?%BFTi6r?W1M*d+wU*n}Wi>%H;! z#ei|>{<9z3H=OS=cNe9k;-E~4I<2Z}YGP!mBnhN=!!Xt_AC<7)0T~y<=DEv%!73`fu#u$?GU$e`hCy~d)N90sK$m2p%gVEPN>EA?C)Khgu(phkmgBKcP zZMys41As`_;2Z&50Yj!Fgqbr^tzBk~c=fyagfMlUjS$r)3PFXp&4 zTMTwGF#k3k8Uy)O)M*0x-O6&&e()~v7L^p_CE6eU2fO?*D){R0qxoDM_?D!pnpnCW%@zqqbr4`*E>0S?d;_a|sekO&C(oL*zug}aSE;CL^YZQ+c%^5MIxkl= z-=4UO<|ws3?UbDzrh2XXBSt7n|2UV!Ay?Ms|P*+9f%jA$wj#C^I~21v*8e6pa9{6&kK;mHZSj$Cv!{ON`aGCEDYy8 zJnVPN@9UjN%ur;>?Dcn>VGZELW;0@9wDA0|y}@vKd0z!?D13yGc(RWpbA)0mJ)Hf) z6V9^`YTp0i<$|#&kD#=xY^&{)q}fLYb>p?{$xJey^TG(p{rCUECN%co$?%3XmiF(C{5T52GEOpc=;&W-caB>BHN~Lc2zvvz4A<_wAs4V)a5;~D z+Z|2rlA_J%e|sS@H~SP9EkdjsC@EzbgwqGz^e8i&c|`#~tiMw&QV;WIk7n+f`>$VR+=JeU@)+&!6_vy#@M6o%#0CJW=iU zhBvBJ6gCQTP0oQzYv6pay+DpIncI+t&4dWBCSqM~l1(ABC@S+QT!0)giO2Cjwn)BT z|EADPpC7L2rFtCB{%6JB_D9`}7D3(hyBzCc84ee?#=gQ2v;8}1Wu?Rm865X6(RH#M zk#EJj{n+piM|iws)_vNvK?1_6blir-V#B+@fU{Y@L~?AutGpevl-%c)2~+Pge^Q$f zj=A!qzja_`$Z2j42i|Z3LIPL&0gPR6=-&62y5sEbb1+ic+Qdyx#|5RO89<5I`Y=;= z0iaA7Hd?7G1=W%?vA}|up3dtuuBbZ(jPUE5Z!+a6h2M*_j_Hk!Jfi}8mR>J{uU*Az zCEqPAa~c~%i;JZ<&$+H6B9FShwOpUynn}ZN>{MimgQ;03Ea=4xl#NH`J?av zsH9YDxge~ff@v}PXQyG$GRRm1C$yNtw~y32ZKcV%!u9e?Z&aqw+@VyFnkRxMlOg;b z*Ix%}AInIZ&+mUVHYT)+=XPuM`rsg!1`k*E) zrA1YW=kaJ&XFJ4Lr>LPJ1VV+nm-8<=T2};NILw=`UbkAs7UpO79v+veYwPLGtG1%1 zTB0-T@x_DEC(W1EPg|u;=)&vQ_jr6RETP2)1T-nd1>FmoFWa;XTD9SdET65c7!CU( zb1N%b1c~>NEeHx&e!<4lDgcsKGW{+U^?pjw z+@aWeihLSZK9W^0cl^x9Wk+~Ougo{ z?CD5M^$wpkLwSzDeTGqs43l?t>-9S z=V6(#vNS-~_XL#8qeuSKN*wV(O&hgADQ*M!I^{0s246~_e1NtK9W_`5KNh7+Z)LG2 z&0$jNX)i`X{brQ+%>*t<-vGcI_u%0f#s2!OL*(^|HuGipH_*xPVWROx6TYrRq zgD5dTx{N$;k|PKoYTL8`R@K4aGOi_)Sm`uE(5!aeRNPS>@V?O(6sW~NruPUmCyRn`-aRudei*`5}CHJyfs?f`t2CtHxFk|3ySE0 zOCgKSf4mhb5=POK)LQ;|`(Sq`7gQmPERZC>9x=95-GBQc+zuA~?nq+Zb2q->H11e% z=^?txm+bzo?4_{5t1-GFl6S6NABM5Wtwhwhomn$UH?N!~Y>cPyL3MXgn<40xxLGFmFB*TAV2GEQkc5%7-+_jYL3Lt~3T}SvR4W zf$TG+nhGb2{y!oz$tmbJCwWv)f7?QMnWK4JL$%>dp`cJ8Zjv$r&$ z`R2d^1PAFeq@^Z{`cqt+{ggm2eTWxi75iq2f^^vhkS5;Jdf50jHV%i!S9_aEj? zmgwc~w?DjYm)4vPaw#lDE&wQ)`=Xx`B8k@-we9ivbz`ChFkLdl$`}oRCoGxMnuPK1 zUm(Xs6+E&5K$bIP!8S=kHfly1gO$b_z`n%zuZm4p+kRxGuz0gxmXTpfbjks&?6QRn zq6|33OE*?TJjaToJQkmrTdr6;d@=)(bbHz57|hNugtaRFr(y<~8+ZaKAjbG_8NTY; za{!TRbIp|%F!-gE+JX@LdN73!=Rp?AM(o}&l0d@B3V-5;tGHMXu2njkNen_*PvdNQ z>I3)nw$v+jjE~%sliy-C6nq+;q?rG@A}@^kzn3#Dv-k1Rvo14pbN+F3#e=2VybI;} zBBMMGa#h#n^v7hBHhf66zdkwakH;y=DaEO#o*2`gygy#-sKFm5-wD=j%TUmEN0CWn zk}LAniGs^q3ofQB;)}kKQgmlvmouC$RN8M$Ky((L|M+C}SHJkb5{RW)+j;%qHv)^m z-8!@^<-n8z^ZJqfsL5*c@3Ti};237AUxxyC#bnunMagSOh!`Fd64W}w6r9kmdL2N- zYTi1%bPMU(0$ExBI$)J8JRNv@zZ1ULI6wo(P8nk42h5hxHp3tLSUevbdKHq|n@*dD za(cp~y0@C2Zc~A3&1~^^1C0a{J#Jds*gqM_zPqEiM zI@4H zm<~0L%o#Yeso7a^DvX}fTie-WQ;PiUVYX4Dg-Yl7Rx3hl293_nKqP=!O-dMW-Aj)u zJ*<7K&qTGomih2iM<9%Lm$8bmev;+|IT}h%<-7Thy;66a^O9HY%y4LtN6^wCFOc+P zecU%~{s6Y5C8OCitJ2(ek5jzlgp@Dw}@MYw}f~ugkjODLX z#f0eogMzLUHe@ut)ogL&IAzijS5u=V(ca7Ueu)Ks6ys^21pg@aL4<0~F&zN5O2=j2 zU-jVXcAk2_x&yK?pyEDV;0nS)9&f<^&UEGhQOgJyGWEam)}{_KrW;He*}Udvdi09P9O#k&`m@Vr1r?d@J!8T!arKB zzvN8Nc|RVmskiC#H%j#FU4Jw^AlL@6@ksBDsH{?E^E@H2%cHX~Xwbw{a z5OeJx%Z*~(+NuE~^7Kzl8JJP>M39v+tSrgZ)iwnB@8DKGB`nNEJEH|ihQZieYtW9& z12E!XbhgO2BM8=9gWqDD^uaRlg#MfQeF>I-KS_3j(+he7`AePcAN z&GIfZ)4>k7J^tA7uj6Qp3tdnCrx|jt4WwP{cItQ3la(Uhm%0$VZi8`;bZ%Iuf#g602~uXT(nY5w_|djJyH7@sM&OjDS{umznt-S5n#- z>VDdi`1-Ub_ym9hZxqPAttf%bJ;7J*w%6lRMK!HyXAx&?wUH)ttJEMXK9%%aZ5zQT zV%`KjGN0oA-l)mXdxB#YWi7oR*d6k`T^l>9GOe0GfaY9i!9CID~@pmLqXxeiwAnWFS z{Il?MZ0*eYT6F(b#~T(nWYE!VPIA4>+?3IMGWhRdlAWcmC*}uVlFNen#{Tact6lz}CXP_Iw|9-X3^fEV7U~s(y zTmH-1@b^zs>_v39m%~*0+w`l1u*Q^H^ZpF zBhecM={vhL7m>-+oh^~`5g1p?e`+Hc25fGG!3RG7e(x?}ko zEZFP|OrLfjoCbc-FUVba&40o%<@sXX9L%nl8F{6xByuXr>-&rOEyJmQI+!L|ilf9e;3zoY~ z7!VvBO3|P|K?+iehwFjR>20`%yrW}{y{P6o;?&z}0H|*OybbUl4NsLr@bU5a6zutN zAo&RaPV8_r!6)==c+_+^csRL~>C;WYac>RZm4WZ(k#_fL-Ucg+D*=O0ctMm&`N#|M zcH^^7CYg?O-Yr`MvR%T32Be$_pg8(@hCN~R8eZXL-K5lg8?`r+eX6#gWc064Xi&pG ztvaFZ=cWh8fSefS4~iYLcKa&}(O$(?eUZdq<$^QQP*wZNW-^=h&jilLzxX1sgskb`GO-ZL0TOxsq ztsxx9nJzBu#KZAo76<4GZ@E!AoG$aFgr)`2RbTf6CNN?>hx zO;$kAe7Cj63Wjy42Fnl2mK-{O`%l~G3I_TRT$?@ZuQ>YNwo#5l;iv!B7pVXXd@@g} zXid8=n>cZe7impR|7eS*Ql_Xvv8mS0h$26a0W0UI9hxnp$uPyDE@9N85mNM|V#DM3 zmg>;n`g7YLA=t){!zv6d?)$Vee-{*l-`sGyi7#5alOcBj_<#55>m5Us;5}1Hsh-he z?k#;ZJucvO9~bJNrK}}hM{XI&1ie=?OkZo9`n`!8mWte6$%;0cCwM4hV|cmZG*yLo z{V5NrivO>UQk;B!(;Y4m=brOd0{ZDORk}%?Y3oP+X>ql8LHexsS=r0pw|n>>D^q4` z@fu4_G9nV8%%p@nAlBM8)|$_E-5lf`!-F45>c8*#=BiA3Zz z|D7Opah#o+Qm}wfCsgt7V(!GSXYhEx@nHF>Daw~}vgV{Xx2BXtfzGFHIRr-bdI0|i zWh9+o)|wLNw)F?K;8>RM+`k5)G46G^3--O+BNg&AR6<{ts4740gH(5>qfDl|7NtJr){yC@buQO3<57aRMlorgmO8)&BsA?x)i z+@OlmvlFS7$E_U-caB9JwPQ)4Hr>6bwwAQaWtK#jPBH?yDkOLo#*ne!Q_ENzDs#{L zJuo+7sqLdtv-(sR-yum;VojO+yD^W+umgr5q_MQrW@hE=pRa#2{X6Jyz-e($%BQ`9 zf9Rz>*<8WBK8bQ>K5`5{E`ef@o!ShW%WU_*)ft;VuVMcbsH8@F@(pn~ulsb$%p&Z( zXzbbFd37bcv?!^!wZkzm_RMe$?hqZr!&r!ySUn+FwwIutu-e~eId?*=jvT;l-<7r0 z{^Zor?c^O=-Kd$vsj%7j(0u+t2)wyX5hN#~frCx8k=jkpRr+19?h~dhrF4jgbJmhO z4{GJLd7gRTj|uA0cR5nuLFn&No4(r88K=zgGl(qdq{xKuue z58*&G%7PrL!Pa*+-_|-fGe;ytg+Yr0+Q(4jmUZ!Kr}-_6qrU}T(QsNHKBkm%)ghhs zph_-5M}Ln}s{27%T(F5UKfqxmz3hNvmq#+BYe@PoAzybHgim19Om0y_i1+iOpyfhk z;B4(DO-(J8kJk-P>kXpQmbh_hSsrfqK4@P(`^&sNk^cVuTfgADd_n5@^s%k=9*2N{ zK;^peIPdXL+h(Yto01yT!aiBfC(RirMK+;RSf#O@9~Dl$WJuDOSs?vh9hft%JDJYT zZbt5Wd9In$*;=_>8)|3F%oOBzH7h;eSD2^8^Fb;$54$!t*715CLaM)H&NLaW7nE!0mlV((+_D>W+d$J zz{31T?P-zCq78HsU>k*EoVJiKYihRoUI{d2U4Pao(_}fH5Nuh>Dnk5$?h@A4I$PuV5>s7u`bTG z2zObWNA;gxqQ>5mb`WzxK{AI13|^4ct-L@Ha?W0+rZVMMc1lc^kKc4c<`AEHZ&Pu{ zs3H?D|7(DetR@c{tM=aSHFCboIc^RSu5dm89RwXutM`dIZU`W6dF!Y3k$9%XP)dNU ztqh>Wz~lyouiw153&w>SapB(1ko-sfvVwJC<7LlabfpsfXe&zQSk0=i$gZW z0bQzVeOg7{WPhA|w5sVXH3jx@y`|b?GML0~9^|0G@eU~i)Gl?me}yrFCR9402}71& z_@lX4O&FB6?3%;Q%~pE$q1IZj$@7uyf$=jtmvh>|fRPDCUet4=U-~H?H8J~0y7OAM zHpCqYJZgaG3l^BicZ=?k09vK(E&uC!^+>83J_naQ}D(hn>WdRIuv=>Z+koH8neYkIRTmO$XM^Z63UhCW(j4lwh?; zlJd7Y_5OJ++W+?%IMtUgk3%87IYm=;pbiZN z>sX%wYK%t1g}-X1VFjK@Fskv@{se=EEBP4TBLi)Q!>{!HuFA_j>R(cyek^y7lZ&WV zIWdx!X$)lv`KwoWygNQVwp?nAKw{z|jq3AP#Cj|0g3CaL5u(iN!UBfiP=KdSkEK6v zAhxii1X{22?p%Fx^Z7Or>}28$W)t&9OGNB)TX~pKZ!8S}8usdT&`e29H~mj%uQGvJ zCc*CJs0xp-9%rf&@1p_N+jtB$H4tx)wVhD_a)vRuW2dK8of|Ti_drYWt)tl=)1`zJ z2(AYY+@|astqeq@qvXM|V$xv;V1fYG3zviE>9_DIG**b10)nNcG2Hb-AR|jAn>+1ZflU-C1 zk{gv0Z<1!u2HE#U&PnC8*53+>s1EE(vl|=$44KDG)V)yFAy&50uBG2tycoXo1KgHr z87XZ(dl~K-fPV5efC_ZU%~$Fl>SUtN)fwV*_&tYZyYl`pYGk}a$0nFX`MDP|-&xa1o!ClJhZBhcz%IcmUlN~s0Swg0;3M+WvyXb-H%+6D5V z@=Q~Yxw1{dno7CM@Njg>diMmKp>Wf@VQ%@W1K>zK{8yT^ivA$x(4AH3^7W=#|J}hA zQ=MDl2n5%e+nn|MbJws3YPHG8#DyiMB$7 zhNBuN8OyzW?DW-@3VYK*>5Q0*4-txqEtgDiBras8X|@Cqsh_NOL30~UZ#W4WkBx@S zPU*eVBs%#~6J%M#4S{fnVvzAFJ0noCKn19`-h&E-v5-k#bvFp+Fz?o3AwDZk+RvVL$sChxw<-VRe6c zfdsS7LaNrNPnZu7?7)tHciTp)KU0k6y3vz(H`n%ZOjX4m(rbY4*u9DHYap@->TSN| zTaaeW?4Af}uRXDs8PfWhUUghH6oNlOp`(((7g0Ik<|0qQ(} z^S-(X%640zH=2qjpdMIU3}%^_Jv|CWoh|0{fVnYEPsgKefUlydDhEb>Qg314cd;mc zxxNUs%W%1%Phi#^@cW&qztt<}!gu2}yGk;UWHH75Q*=?q5wNUC!AuZUFc7L6v_N`_ z(F6eyW%vBy(E2Uo25+rie})a-p`&0V6`$7$8Nzwx8V&q=Vqh&(lIG)bg%picK|V$- z;L`Vik{*%Ex~RE@#r@&h$u=O=Z7u9&ycJrJd_aTx7bu_jrTSg}qA z*hIy_Pve(_KaaYkL;w4}SQ*Q^rxRtC)ZM9%%5|YA@VI|0FMUYzDYne<|2jMKZz%i! zk6(r?$z+S%h`1vrL%5N35|Oop$e!GytbRp3mp|@jP$0*Dg;=7B~HRCFuTHcGHZe<0dJPAa7QP zKCS}-^8!E19fm-LNrq-}9q6`$IW8>gbd+6ObyQ8;5h&GO>uEtd-Oy5 zszA^mOfLQ~Y*9b_eXKL1+#w5Opn@$$)Uk%dYp1hMA}$HJ_vD&_C)qM_)%azc1n6iT zL%ekNm)s|$`AA~^08U(u2MZdA`8}XEw!d8B(Rq<*UoGS`di*;b>U=MN+Klg1y*05N zYy>+Fi~L{R(XnK|v#Q0(9tAp`(O09rk7uk5kGG3udfE^M8!I4o&uVOo)HoHLlMS^aV;g)S{g6p&Yc?Gp96EODaV**3s?&qdG+J}cmDjZ^Q_H1dYtg|e&#{yhpRRF() z{ka~P5)sFOEk9YBL&_1Jo@s%#_H>dx0~VUqU*jN90pmjA;DgD=Fq}bUn58Ilf5<_5 zeO`-r^g#Bd|Ds6RE+~K^>4FR)u@3*VdqJl?&mG#bNR4L11cW+`4#JGoqn^dUqj@RGEn}@gr4y1{m7%%+ezIOW*(57r=?L_X_ z<9<9dGVM8+P+h-Kkh`97Vk8SOx|S7`@Ur1buYhIqkTkw|;JNzfP3J`lIB1hHTm1PK zJZeo#zXMWte8xXM0f(;LGdJ1vZ^f&A^zWL+47{m+MniIeQDvy$bHtAN7{#`vs4Oe4 zJ^hL-vMZm7|Do7C$U2J7v_pzMC207TevbCTWy)LBuR@8^qxU0Wc^%RwvN~T+EH7ozm=uv56!rRuj$~WBtVsBb-4<57O&M{( zTKT?17sgUQV*>G;X0UCxY{R@|k{0Lm4i%;G+Vw`7wUa*Tm2b)wdfoAgB*S(7+h?IDP2IQKs*RLv3n^CX>wdPg!xE@opS{LJ0C<<`60#8_X9h4&4%Zm>~9Ksq%Z5 zNW}^*x%DWK;@cFBu!&Vj=8Qap-(1G|d35sKK1otM)do;ul`cxak}HI!vnyyB!IZIf zqP+1#$GNxe$zjhqOgU*q5<+60!d+VOZ+3s#UEc{_&{%t2T&~ryIi={@DNtsNkWPIQ zXs6&}M@MO&$NWw@zGKyLiZLLxFu?|G^@byT_gJ1vg>Iuz>)jNL0i)phB-~Z=PIBs`QaNGsfPq(1~ z1`jJ1Dx-8rKxQAV&|WqdtxO@>$)XvOAk1ec9u-YHSc1xNnX@61B&?-gP>o54M&3vRWT?QiXNvX2D zQPq|^fV+9ZSUaXjG*+Le@a}o;yJ}p3=Z=n!Qxi?A8YahguX*!8Prc)_7WKzw7;{tX zXsGYfFA(1YcYsFb`kX)d!c?5d`#!QQ&^Y?&bImF}&ZQ_VvB1dJM8yxaCN*LwyU=Q! zWMBhS)$?cl`K_FRIna%BbDdQC0tVzFy6z5wgr*uZvp$0p@{V|>aJrS-*!Qsm^t_lxIgJG1LnB}%=-3*6nRKVxkoPoYzJ6NtdQ_1wMwKYISI9cq_4I@h)HhR>dc z+qccts(U@2bCV?YlHI?=xlIM-APixwwl&K1&6eo!x`i~lvUy{iBwaS#so|VoM0Hx@ z;$ZqXCiP+h6ev9qTha-C9`UelCc}~XDDL6jd7@d)_z--)YBUqcjQYu?#{t2FduHyA z#OFB1V4Os|M?M~!TA)8lXr+-ViKRwYyKBr$u1WW!|GoVFqhF(skw$F24Zj3%vy5%9 za5whZKe=+w;!;2C@D$DZSg8;~Tp3=slSnC;k2J)qcq~TPEw|Fzi__LjR=fVSs9Bo zC&Q$ni$9Y5oKXI@Wx+Sc)9m%Dg>!)@3sQ}ONh~B<82*jvpqLr0=M&@s zAYHqGfDa^u~@bmvt};@Xbp{C9hv=WBeR?>gYt|N@}mi`hS%tw(9>i7qDgX9aGmJ z@CvfTMr!xzhGxdD*!I4XNPS=<0wDGjeDBH#TjtNs{Ip!w`ev@_kodylm3FK0SHDX6 zv%znm4tDlJ7G=iE6BkX@D?GnxDeoF?*!TdQb0c8|>S7TY38Z-<*1^LnW$?t{Yor+Ra|#NavIfF4j20X&@fWZzsgAG_2RQzSA6p35%ai zJOrs~`6h1TYH??r`+3<|A}QoXcm>&N^tAtPK&8)_7t^t+LjD!31MDHR? zz#!eKRPrJRON;|1F}fq2-H1*Svl_o)mNSLW%v8`LhaJ_gbj6fHCr%(PK_M^0zx=ly zB^kfZ%>F=9Bs{NW@o(-`n@&i>>RVScf2_7K-}Z7{71`34P|m#&L;4$t`I^Os<^3xS z;MI5M2lOA9K@gnDHvrC8m$`uRsxmB+|L>Q2+fu-fdSY03yKby$y0P5{z~_dcg+YbB HYt;V$#~S<{ literal 0 HcmV?d00001 diff --git a/backend/main.py b/backend/main.py index 00991ac..3e08546 100644 --- a/backend/main.py +++ b/backend/main.py @@ -105,8 +105,17 @@ app.include_router(scheduler.router) from backend.routers import report_templates app.include_router(report_templates.router) -# Start scheduler service on application startup +# Alerts router +from backend.routers import alerts +app.include_router(alerts.router) + +# Recurring schedules router +from backend.routers import recurring_schedules +app.include_router(recurring_schedules.router) + +# Start scheduler service and device status monitor on application startup from backend.services.scheduler import start_scheduler, stop_scheduler +from backend.services.device_status_monitor import start_device_status_monitor, stop_device_status_monitor @app.on_event("startup") async def startup_event(): @@ -115,9 +124,17 @@ async def startup_event(): await start_scheduler() logger.info("Scheduler service started") + logger.info("Starting device status monitor...") + await start_device_status_monitor() + logger.info("Device status monitor started") + @app.on_event("shutdown") def shutdown_event(): """Clean up services on app shutdown""" + logger.info("Stopping device status monitor...") + stop_device_status_monitor() + logger.info("Device status monitor stopped") + logger.info("Stopping scheduler service...") stop_scheduler() logger.info("Scheduler service stopped") diff --git a/backend/migrate_add_auto_increment_index.py b/backend/migrate_add_auto_increment_index.py new file mode 100644 index 0000000..f91a3e2 --- /dev/null +++ b/backend/migrate_add_auto_increment_index.py @@ -0,0 +1,67 @@ +""" +Migration: Add auto_increment_index column to recurring_schedules table + +This migration adds the auto_increment_index column that controls whether +the scheduler should automatically find an unused store index before starting +a new measurement. + +Run this script once to update existing databases: + python -m backend.migrate_add_auto_increment_index +""" + +import sqlite3 +import os + +DB_PATH = "data/seismo_fleet.db" + + +def migrate(): + """Add auto_increment_index column to recurring_schedules table.""" + if not os.path.exists(DB_PATH): + print(f"Database not found at {DB_PATH}") + return False + + conn = sqlite3.connect(DB_PATH) + cursor = conn.cursor() + + try: + # Check if recurring_schedules table exists + cursor.execute(""" + SELECT name FROM sqlite_master + WHERE type='table' AND name='recurring_schedules' + """) + if not cursor.fetchone(): + print("recurring_schedules table does not exist yet. Will be created on app startup.") + conn.close() + return True + + # Check if auto_increment_index column already exists + cursor.execute("PRAGMA table_info(recurring_schedules)") + columns = [row[1] for row in cursor.fetchall()] + + if "auto_increment_index" in columns: + print("auto_increment_index column already exists in recurring_schedules table.") + conn.close() + return True + + # Add the column + print("Adding auto_increment_index column to recurring_schedules table...") + cursor.execute(""" + ALTER TABLE recurring_schedules + ADD COLUMN auto_increment_index BOOLEAN DEFAULT 1 + """) + conn.commit() + print("Successfully added auto_increment_index column.") + + conn.close() + return True + + except Exception as e: + print(f"Migration failed: {e}") + conn.close() + return False + + +if __name__ == "__main__": + success = migrate() + exit(0 if success else 1) diff --git a/backend/models.py b/backend/models.py index ad5b388..50f552d 100644 --- a/backend/models.py +++ b/backend/models.py @@ -300,3 +300,93 @@ class ReportTemplate(Base): created_at = Column(DateTime, default=datetime.utcnow) updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) + + +# ============================================================================ +# Sound Monitoring Scheduler +# ============================================================================ + +class RecurringSchedule(Base): + """ + Recurring schedule definitions for automated sound monitoring. + + Supports two schedule types: + - "weekly_calendar": Select specific days with start/end times (e.g., Mon/Wed/Fri 7pm-7am) + - "simple_interval": For 24/7 monitoring with daily stop/download/restart cycles + """ + __tablename__ = "recurring_schedules" + + id = Column(String, primary_key=True, index=True) # UUID + project_id = Column(String, nullable=False, index=True) # FK to Project.id + location_id = Column(String, nullable=False, index=True) # FK to MonitoringLocation.id + unit_id = Column(String, nullable=True, index=True) # FK to RosterUnit.id (optional, can use assignment) + + name = Column(String, nullable=False) # "Weeknight Monitoring", "24/7 Continuous" + schedule_type = Column(String, nullable=False) # "weekly_calendar" | "simple_interval" + device_type = Column(String, nullable=False) # "slm" | "seismograph" + + # Weekly Calendar fields (schedule_type = "weekly_calendar") + # JSON format: { + # "monday": {"enabled": true, "start": "19:00", "end": "07:00"}, + # "tuesday": {"enabled": false}, + # ... + # } + weekly_pattern = Column(Text, nullable=True) + + # Simple Interval fields (schedule_type = "simple_interval") + interval_type = Column(String, nullable=True) # "daily" | "hourly" + cycle_time = Column(String, nullable=True) # "00:00" - time to run stop/download/restart + include_download = Column(Boolean, default=True) # Download data before restart + + # Automation options (applies to both schedule types) + auto_increment_index = Column(Boolean, default=True) # Auto-increment store/index number before start + # When True: prevents "overwrite data?" prompts by using a new index each time + + # Shared configuration + enabled = Column(Boolean, default=True) + timezone = Column(String, default="America/New_York") + + # Tracking + last_generated_at = Column(DateTime, nullable=True) # When actions were last generated + next_occurrence = Column(DateTime, nullable=True) # Computed next action time + + created_at = Column(DateTime, default=datetime.utcnow) + updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) + + +class Alert(Base): + """ + In-app alerts for device status changes and system events. + + Designed for future expansion to email/webhook notifications. + Currently supports: + - device_offline: Device became unreachable + - device_online: Device came back online + - schedule_failed: Scheduled action failed to execute + """ + __tablename__ = "alerts" + + id = Column(String, primary_key=True, index=True) # UUID + + # Alert classification + alert_type = Column(String, nullable=False) # "device_offline" | "device_online" | "schedule_failed" + severity = Column(String, default="warning") # "info" | "warning" | "critical" + + # Related entities (nullable - may not all apply) + project_id = Column(String, nullable=True, index=True) + location_id = Column(String, nullable=True, index=True) + unit_id = Column(String, nullable=True, index=True) + schedule_id = Column(String, nullable=True) # RecurringSchedule or ScheduledAction id + + # Alert content + title = Column(String, nullable=False) # "NRL-001 Device Offline" + message = Column(Text, nullable=True) # Detailed description + alert_metadata = Column(Text, nullable=True) # JSON: additional context data + + # Status tracking + status = Column(String, default="active") # "active" | "acknowledged" | "resolved" | "dismissed" + acknowledged_at = Column(DateTime, nullable=True) + resolved_at = Column(DateTime, nullable=True) + + created_at = Column(DateTime, default=datetime.utcnow) + expires_at = Column(DateTime, nullable=True) # Auto-dismiss after this time diff --git a/backend/routers/alerts.py b/backend/routers/alerts.py new file mode 100644 index 0000000..42a8353 --- /dev/null +++ b/backend/routers/alerts.py @@ -0,0 +1,327 @@ +""" +Alerts Router + +API endpoints for managing in-app alerts. +""" + +from fastapi import APIRouter, Request, Depends, HTTPException, Query +from fastapi.templating import Jinja2Templates +from fastapi.responses import HTMLResponse, JSONResponse +from sqlalchemy.orm import Session +from typing import Optional +from datetime import datetime, timedelta + +from backend.database import get_db +from backend.models import Alert, RosterUnit +from backend.services.alert_service import get_alert_service + +router = APIRouter(prefix="/api/alerts", tags=["alerts"]) +templates = Jinja2Templates(directory="templates") + + +# ============================================================================ +# Alert List and Count +# ============================================================================ + +@router.get("/") +async def list_alerts( + db: Session = Depends(get_db), + status: Optional[str] = Query(None, description="Filter by status: active, acknowledged, resolved, dismissed"), + project_id: Optional[str] = Query(None), + unit_id: Optional[str] = Query(None), + alert_type: Optional[str] = Query(None, description="Filter by type: device_offline, device_online, schedule_failed"), + limit: int = Query(50, le=100), + offset: int = Query(0, ge=0), +): + """ + List alerts with optional filters. + """ + alert_service = get_alert_service(db) + + alerts = alert_service.get_all_alerts( + status=status, + project_id=project_id, + unit_id=unit_id, + alert_type=alert_type, + limit=limit, + offset=offset, + ) + + return { + "alerts": [ + { + "id": a.id, + "alert_type": a.alert_type, + "severity": a.severity, + "title": a.title, + "message": a.message, + "status": a.status, + "unit_id": a.unit_id, + "project_id": a.project_id, + "location_id": a.location_id, + "created_at": a.created_at.isoformat() if a.created_at else None, + "acknowledged_at": a.acknowledged_at.isoformat() if a.acknowledged_at else None, + "resolved_at": a.resolved_at.isoformat() if a.resolved_at else None, + } + for a in alerts + ], + "count": len(alerts), + "limit": limit, + "offset": offset, + } + + +@router.get("/active") +async def list_active_alerts( + db: Session = Depends(get_db), + project_id: Optional[str] = Query(None), + unit_id: Optional[str] = Query(None), + alert_type: Optional[str] = Query(None), + min_severity: Optional[str] = Query(None, description="Minimum severity: info, warning, critical"), + limit: int = Query(50, le=100), +): + """ + List only active alerts. + """ + alert_service = get_alert_service(db) + + alerts = alert_service.get_active_alerts( + project_id=project_id, + unit_id=unit_id, + alert_type=alert_type, + min_severity=min_severity, + limit=limit, + ) + + return { + "alerts": [ + { + "id": a.id, + "alert_type": a.alert_type, + "severity": a.severity, + "title": a.title, + "message": a.message, + "unit_id": a.unit_id, + "project_id": a.project_id, + "created_at": a.created_at.isoformat() if a.created_at else None, + } + for a in alerts + ], + "count": len(alerts), + } + + +@router.get("/active/count") +async def get_active_alert_count(db: Session = Depends(get_db)): + """ + Get count of active alerts (for navbar badge). + """ + alert_service = get_alert_service(db) + count = alert_service.get_active_alert_count() + return {"count": count} + + +# ============================================================================ +# Single Alert Operations +# ============================================================================ + +@router.get("/{alert_id}") +async def get_alert( + alert_id: str, + db: Session = Depends(get_db), +): + """ + Get a specific alert. + """ + alert = db.query(Alert).filter_by(id=alert_id).first() + if not alert: + raise HTTPException(status_code=404, detail="Alert not found") + + # Get related unit info + unit = None + if alert.unit_id: + unit = db.query(RosterUnit).filter_by(id=alert.unit_id).first() + + return { + "id": alert.id, + "alert_type": alert.alert_type, + "severity": alert.severity, + "title": alert.title, + "message": alert.message, + "metadata": alert.alert_metadata, + "status": alert.status, + "unit_id": alert.unit_id, + "unit_name": unit.id if unit else None, + "project_id": alert.project_id, + "location_id": alert.location_id, + "schedule_id": alert.schedule_id, + "created_at": alert.created_at.isoformat() if alert.created_at else None, + "acknowledged_at": alert.acknowledged_at.isoformat() if alert.acknowledged_at else None, + "resolved_at": alert.resolved_at.isoformat() if alert.resolved_at else None, + "expires_at": alert.expires_at.isoformat() if alert.expires_at else None, + } + + +@router.post("/{alert_id}/acknowledge") +async def acknowledge_alert( + alert_id: str, + db: Session = Depends(get_db), +): + """ + Mark alert as acknowledged. + """ + alert_service = get_alert_service(db) + alert = alert_service.acknowledge_alert(alert_id) + + if not alert: + raise HTTPException(status_code=404, detail="Alert not found") + + return { + "success": True, + "alert_id": alert.id, + "status": alert.status, + } + + +@router.post("/{alert_id}/dismiss") +async def dismiss_alert( + alert_id: str, + db: Session = Depends(get_db), +): + """ + Dismiss alert. + """ + alert_service = get_alert_service(db) + alert = alert_service.dismiss_alert(alert_id) + + if not alert: + raise HTTPException(status_code=404, detail="Alert not found") + + return { + "success": True, + "alert_id": alert.id, + "status": alert.status, + } + + +@router.post("/{alert_id}/resolve") +async def resolve_alert( + alert_id: str, + db: Session = Depends(get_db), +): + """ + Manually resolve an alert. + """ + alert_service = get_alert_service(db) + alert = alert_service.resolve_alert(alert_id) + + if not alert: + raise HTTPException(status_code=404, detail="Alert not found") + + return { + "success": True, + "alert_id": alert.id, + "status": alert.status, + } + + +# ============================================================================ +# HTML Partials for HTMX +# ============================================================================ + +@router.get("/partials/dropdown", response_class=HTMLResponse) +async def get_alert_dropdown( + request: Request, + db: Session = Depends(get_db), +): + """ + Return HTML partial for alert dropdown in navbar. + """ + alert_service = get_alert_service(db) + alerts = alert_service.get_active_alerts(limit=10) + + # Calculate relative time for each alert + now = datetime.utcnow() + alerts_data = [] + for alert in alerts: + delta = now - alert.created_at + if delta.days > 0: + time_ago = f"{delta.days}d ago" + elif delta.seconds >= 3600: + time_ago = f"{delta.seconds // 3600}h ago" + elif delta.seconds >= 60: + time_ago = f"{delta.seconds // 60}m ago" + else: + time_ago = "just now" + + alerts_data.append({ + "alert": alert, + "time_ago": time_ago, + }) + + return templates.TemplateResponse("partials/alerts/alert_dropdown.html", { + "request": request, + "alerts": alerts_data, + "total_count": alert_service.get_active_alert_count(), + }) + + +@router.get("/partials/list", response_class=HTMLResponse) +async def get_alert_list( + request: Request, + db: Session = Depends(get_db), + status: Optional[str] = Query(None), + limit: int = Query(20), +): + """ + Return HTML partial for alert list page. + """ + alert_service = get_alert_service(db) + + if status: + alerts = alert_service.get_all_alerts(status=status, limit=limit) + else: + alerts = alert_service.get_all_alerts(limit=limit) + + # Calculate relative time for each alert + now = datetime.utcnow() + alerts_data = [] + for alert in alerts: + delta = now - alert.created_at + if delta.days > 0: + time_ago = f"{delta.days}d ago" + elif delta.seconds >= 3600: + time_ago = f"{delta.seconds // 3600}h ago" + elif delta.seconds >= 60: + time_ago = f"{delta.seconds // 60}m ago" + else: + time_ago = "just now" + + alerts_data.append({ + "alert": alert, + "time_ago": time_ago, + }) + + return templates.TemplateResponse("partials/alerts/alert_list.html", { + "request": request, + "alerts": alerts_data, + "status_filter": status, + }) + + +# ============================================================================ +# Cleanup +# ============================================================================ + +@router.post("/cleanup-expired") +async def cleanup_expired_alerts(db: Session = Depends(get_db)): + """ + Cleanup expired alerts (admin/maintenance endpoint). + """ + alert_service = get_alert_service(db) + count = alert_service.cleanup_expired_alerts() + + return { + "success": True, + "cleaned_up": count, + } diff --git a/backend/routers/project_locations.py b/backend/routers/project_locations.py index 40f3d5d..936ec47 100644 --- a/backend/routers/project_locations.py +++ b/backend/routers/project_locations.py @@ -90,6 +90,40 @@ async def get_project_locations( }) +@router.get("/locations-json") +async def get_project_locations_json( + project_id: str, + db: Session = Depends(get_db), + location_type: Optional[str] = Query(None), +): + """ + Get all monitoring locations for a project as JSON. + Used by the schedule modal to populate location dropdown. + """ + project = db.query(Project).filter_by(id=project_id).first() + if not project: + raise HTTPException(status_code=404, detail="Project not found") + + query = db.query(MonitoringLocation).filter_by(project_id=project_id) + + if location_type: + query = query.filter_by(location_type=location_type) + + locations = query.order_by(MonitoringLocation.name).all() + + return [ + { + "id": loc.id, + "name": loc.name, + "location_type": loc.location_type, + "description": loc.description, + "address": loc.address, + "coordinates": loc.coordinates, + } + for loc in locations + ] + + @router.post("/locations/create") async def create_location( project_id: str, diff --git a/backend/routers/projects.py b/backend/routers/projects.py index 1df0ee0..2ef5f7e 100644 --- a/backend/routers/projects.py +++ b/backend/routers/projects.py @@ -28,6 +28,7 @@ from backend.models import ( UnitAssignment, RecordingSession, ScheduledAction, + RecurringSchedule, RosterUnit, ) diff --git a/backend/routers/recurring_schedules.py b/backend/routers/recurring_schedules.py new file mode 100644 index 0000000..3168edb --- /dev/null +++ b/backend/routers/recurring_schedules.py @@ -0,0 +1,458 @@ +""" +Recurring Schedules Router + +API endpoints for managing recurring monitoring schedules. +""" + +from fastapi import APIRouter, Request, Depends, HTTPException, Query +from fastapi.templating import Jinja2Templates +from fastapi.responses import HTMLResponse, JSONResponse +from sqlalchemy.orm import Session +from typing import Optional +from datetime import datetime +import json + +from backend.database import get_db +from backend.models import RecurringSchedule, MonitoringLocation, Project, RosterUnit +from backend.services.recurring_schedule_service import get_recurring_schedule_service + +router = APIRouter(prefix="/api/projects/{project_id}/recurring-schedules", tags=["recurring-schedules"]) +templates = Jinja2Templates(directory="templates") + + +# ============================================================================ +# List and Get +# ============================================================================ + +@router.get("/") +async def list_recurring_schedules( + project_id: str, + db: Session = Depends(get_db), + enabled_only: bool = Query(False), +): + """ + List all recurring schedules for a project. + """ + project = db.query(Project).filter_by(id=project_id).first() + if not project: + raise HTTPException(status_code=404, detail="Project not found") + + query = db.query(RecurringSchedule).filter_by(project_id=project_id) + if enabled_only: + query = query.filter_by(enabled=True) + + schedules = query.order_by(RecurringSchedule.created_at.desc()).all() + + return { + "schedules": [ + { + "id": s.id, + "name": s.name, + "schedule_type": s.schedule_type, + "device_type": s.device_type, + "location_id": s.location_id, + "unit_id": s.unit_id, + "enabled": s.enabled, + "weekly_pattern": json.loads(s.weekly_pattern) if s.weekly_pattern else None, + "interval_type": s.interval_type, + "cycle_time": s.cycle_time, + "include_download": s.include_download, + "timezone": s.timezone, + "next_occurrence": s.next_occurrence.isoformat() if s.next_occurrence else None, + "last_generated_at": s.last_generated_at.isoformat() if s.last_generated_at else None, + "created_at": s.created_at.isoformat() if s.created_at else None, + } + for s in schedules + ], + "count": len(schedules), + } + + +@router.get("/{schedule_id}") +async def get_recurring_schedule( + project_id: str, + schedule_id: str, + db: Session = Depends(get_db), +): + """ + Get a specific recurring schedule. + """ + schedule = db.query(RecurringSchedule).filter_by( + id=schedule_id, + project_id=project_id, + ).first() + + if not schedule: + raise HTTPException(status_code=404, detail="Schedule not found") + + # Get related location and unit info + location = db.query(MonitoringLocation).filter_by(id=schedule.location_id).first() + unit = None + if schedule.unit_id: + unit = db.query(RosterUnit).filter_by(id=schedule.unit_id).first() + + return { + "id": schedule.id, + "name": schedule.name, + "schedule_type": schedule.schedule_type, + "device_type": schedule.device_type, + "location_id": schedule.location_id, + "location_name": location.name if location else None, + "unit_id": schedule.unit_id, + "unit_name": unit.id if unit else None, + "enabled": schedule.enabled, + "weekly_pattern": json.loads(schedule.weekly_pattern) if schedule.weekly_pattern else None, + "interval_type": schedule.interval_type, + "cycle_time": schedule.cycle_time, + "include_download": schedule.include_download, + "timezone": schedule.timezone, + "next_occurrence": schedule.next_occurrence.isoformat() if schedule.next_occurrence else None, + "last_generated_at": schedule.last_generated_at.isoformat() if schedule.last_generated_at else None, + "created_at": schedule.created_at.isoformat() if schedule.created_at else None, + "updated_at": schedule.updated_at.isoformat() if schedule.updated_at else None, + } + + +# ============================================================================ +# Create +# ============================================================================ + +@router.post("/") +async def create_recurring_schedule( + project_id: str, + request: Request, + db: Session = Depends(get_db), +): + """ + Create recurring schedules for one or more locations. + + Body for weekly_calendar (supports multiple locations): + { + "name": "Weeknight Monitoring", + "schedule_type": "weekly_calendar", + "location_ids": ["uuid1", "uuid2"], // Array of location IDs + "weekly_pattern": { + "monday": {"enabled": true, "start": "19:00", "end": "07:00"}, + "tuesday": {"enabled": false}, + ... + }, + "include_download": true, + "auto_increment_index": true, + "timezone": "America/New_York" + } + + Body for simple_interval (supports multiple locations): + { + "name": "24/7 Continuous", + "schedule_type": "simple_interval", + "location_ids": ["uuid1", "uuid2"], // Array of location IDs + "interval_type": "daily", + "cycle_time": "00:00", + "include_download": true, + "auto_increment_index": true, + "timezone": "America/New_York" + } + + Legacy single location support (backwards compatible): + { + "name": "...", + "location_id": "uuid", // Single location ID + ... + } + """ + project = db.query(Project).filter_by(id=project_id).first() + if not project: + raise HTTPException(status_code=404, detail="Project not found") + + data = await request.json() + + # Support both location_ids (array) and location_id (single) for backwards compatibility + location_ids = data.get("location_ids", []) + if not location_ids and data.get("location_id"): + location_ids = [data.get("location_id")] + + if not location_ids: + raise HTTPException(status_code=400, detail="At least one location is required") + + # Validate all locations exist + locations = db.query(MonitoringLocation).filter( + MonitoringLocation.id.in_(location_ids), + MonitoringLocation.project_id == project_id, + ).all() + + if len(locations) != len(location_ids): + raise HTTPException(status_code=404, detail="One or more locations not found") + + service = get_recurring_schedule_service(db) + created_schedules = [] + base_name = data.get("name", "Unnamed Schedule") + + # Create a schedule for each location + for location in locations: + # Determine device type from location + device_type = "slm" if location.location_type == "sound" else "seismograph" + + # Append location name if multiple locations + schedule_name = f"{base_name} - {location.name}" if len(locations) > 1 else base_name + + schedule = service.create_schedule( + project_id=project_id, + location_id=location.id, + name=schedule_name, + schedule_type=data.get("schedule_type", "weekly_calendar"), + device_type=device_type, + unit_id=data.get("unit_id"), + weekly_pattern=data.get("weekly_pattern"), + interval_type=data.get("interval_type"), + cycle_time=data.get("cycle_time"), + include_download=data.get("include_download", True), + auto_increment_index=data.get("auto_increment_index", True), + timezone=data.get("timezone", "America/New_York"), + ) + created_schedules.append({ + "schedule_id": schedule.id, + "location_id": location.id, + "location_name": location.name, + }) + + return JSONResponse({ + "success": True, + "schedules": created_schedules, + "count": len(created_schedules), + "message": f"Created {len(created_schedules)} recurring schedule(s)", + }) + + +# ============================================================================ +# Update +# ============================================================================ + +@router.put("/{schedule_id}") +async def update_recurring_schedule( + project_id: str, + schedule_id: str, + request: Request, + db: Session = Depends(get_db), +): + """ + Update a recurring schedule. + """ + schedule = db.query(RecurringSchedule).filter_by( + id=schedule_id, + project_id=project_id, + ).first() + + if not schedule: + raise HTTPException(status_code=404, detail="Schedule not found") + + data = await request.json() + service = get_recurring_schedule_service(db) + + # Build update kwargs + update_kwargs = {} + for field in ["name", "weekly_pattern", "interval_type", "cycle_time", + "include_download", "auto_increment_index", "timezone", "unit_id"]: + if field in data: + update_kwargs[field] = data[field] + + updated = service.update_schedule(schedule_id, **update_kwargs) + + return { + "success": True, + "schedule_id": updated.id, + "message": "Schedule updated successfully", + } + + +# ============================================================================ +# Delete +# ============================================================================ + +@router.delete("/{schedule_id}") +async def delete_recurring_schedule( + project_id: str, + schedule_id: str, + db: Session = Depends(get_db), +): + """ + Delete a recurring schedule. + """ + service = get_recurring_schedule_service(db) + deleted = service.delete_schedule(schedule_id) + + if not deleted: + raise HTTPException(status_code=404, detail="Schedule not found") + + return { + "success": True, + "message": "Schedule deleted successfully", + } + + +# ============================================================================ +# Enable/Disable +# ============================================================================ + +@router.post("/{schedule_id}/enable") +async def enable_schedule( + project_id: str, + schedule_id: str, + db: Session = Depends(get_db), +): + """ + Enable a disabled schedule. + """ + service = get_recurring_schedule_service(db) + schedule = service.enable_schedule(schedule_id) + + if not schedule: + raise HTTPException(status_code=404, detail="Schedule not found") + + return { + "success": True, + "schedule_id": schedule.id, + "enabled": schedule.enabled, + "message": "Schedule enabled", + } + + +@router.post("/{schedule_id}/disable") +async def disable_schedule( + project_id: str, + schedule_id: str, + db: Session = Depends(get_db), +): + """ + Disable a schedule. + """ + service = get_recurring_schedule_service(db) + schedule = service.disable_schedule(schedule_id) + + if not schedule: + raise HTTPException(status_code=404, detail="Schedule not found") + + return { + "success": True, + "schedule_id": schedule.id, + "enabled": schedule.enabled, + "message": "Schedule disabled", + } + + +# ============================================================================ +# Preview Generated Actions +# ============================================================================ + +@router.post("/{schedule_id}/generate-preview") +async def preview_generated_actions( + project_id: str, + schedule_id: str, + db: Session = Depends(get_db), + days: int = Query(7, ge=1, le=30), +): + """ + Preview what actions would be generated without saving them. + """ + schedule = db.query(RecurringSchedule).filter_by( + id=schedule_id, + project_id=project_id, + ).first() + + if not schedule: + raise HTTPException(status_code=404, detail="Schedule not found") + + service = get_recurring_schedule_service(db) + actions = service.generate_actions_for_schedule( + schedule, + horizon_days=days, + preview_only=True, + ) + + return { + "schedule_id": schedule_id, + "schedule_name": schedule.name, + "preview_days": days, + "actions": [ + { + "action_type": a.action_type, + "scheduled_time": a.scheduled_time.isoformat(), + "notes": a.notes, + } + for a in actions + ], + "action_count": len(actions), + } + + +# ============================================================================ +# Manual Generation Trigger +# ============================================================================ + +@router.post("/{schedule_id}/generate") +async def generate_actions_now( + project_id: str, + schedule_id: str, + db: Session = Depends(get_db), + days: int = Query(7, ge=1, le=30), +): + """ + Manually trigger action generation for a schedule. + """ + schedule = db.query(RecurringSchedule).filter_by( + id=schedule_id, + project_id=project_id, + ).first() + + if not schedule: + raise HTTPException(status_code=404, detail="Schedule not found") + + if not schedule.enabled: + raise HTTPException(status_code=400, detail="Schedule is disabled") + + service = get_recurring_schedule_service(db) + actions = service.generate_actions_for_schedule( + schedule, + horizon_days=days, + preview_only=False, + ) + + return { + "success": True, + "schedule_id": schedule_id, + "generated_count": len(actions), + "message": f"Generated {len(actions)} scheduled actions", + } + + +# ============================================================================ +# HTML Partials +# ============================================================================ + +@router.get("/partials/list", response_class=HTMLResponse) +async def get_schedule_list_partial( + project_id: str, + request: Request, + db: Session = Depends(get_db), +): + """ + Return HTML partial for schedule list. + """ + schedules = db.query(RecurringSchedule).filter_by( + project_id=project_id + ).order_by(RecurringSchedule.created_at.desc()).all() + + # Enrich with location info + schedule_data = [] + for s in schedules: + location = db.query(MonitoringLocation).filter_by(id=s.location_id).first() + schedule_data.append({ + "schedule": s, + "location": location, + "pattern": json.loads(s.weekly_pattern) if s.weekly_pattern else None, + }) + + return templates.TemplateResponse("partials/projects/recurring_schedule_list.html", { + "request": request, + "project_id": project_id, + "schedules": schedule_data, + }) diff --git a/backend/services/alert_service.py b/backend/services/alert_service.py new file mode 100644 index 0000000..e460799 --- /dev/null +++ b/backend/services/alert_service.py @@ -0,0 +1,407 @@ +""" +Alert Service + +Manages in-app alerts for device status changes and system events. +Provides foundation for future notification channels (email, webhook). +""" + +import json +import uuid +import logging +from datetime import datetime, timedelta +from typing import Optional, List, Dict, Any + +from sqlalchemy.orm import Session +from sqlalchemy import and_, or_ + +from backend.models import Alert, RosterUnit + +logger = logging.getLogger(__name__) + + +class AlertService: + """ + Service for managing alerts. + + Handles alert lifecycle: + - Create alerts from various triggers + - Query active alerts + - Acknowledge/resolve/dismiss alerts + - (Future) Dispatch to notification channels + """ + + def __init__(self, db: Session): + self.db = db + + def create_alert( + self, + alert_type: str, + title: str, + message: str = None, + severity: str = "warning", + unit_id: str = None, + project_id: str = None, + location_id: str = None, + schedule_id: str = None, + metadata: dict = None, + expires_hours: int = 24, + ) -> Alert: + """ + Create a new alert. + + Args: + alert_type: Type of alert (device_offline, device_online, schedule_failed) + title: Short alert title + message: Detailed description + severity: info, warning, or critical + unit_id: Related unit ID (optional) + project_id: Related project ID (optional) + location_id: Related location ID (optional) + schedule_id: Related schedule ID (optional) + metadata: Additional JSON data + expires_hours: Hours until auto-expiry (default 24) + + Returns: + Created Alert instance + """ + alert = Alert( + id=str(uuid.uuid4()), + alert_type=alert_type, + title=title, + message=message, + severity=severity, + unit_id=unit_id, + project_id=project_id, + location_id=location_id, + schedule_id=schedule_id, + alert_metadata=json.dumps(metadata) if metadata else None, + status="active", + expires_at=datetime.utcnow() + timedelta(hours=expires_hours), + ) + + self.db.add(alert) + self.db.commit() + self.db.refresh(alert) + + logger.info(f"Created alert: {alert.title} ({alert.alert_type})") + return alert + + def create_device_offline_alert( + self, + unit_id: str, + consecutive_failures: int = 0, + last_error: str = None, + ) -> Optional[Alert]: + """ + Create alert when device becomes unreachable. + + Only creates if no active offline alert exists for this device. + + Args: + unit_id: The unit that went offline + consecutive_failures: Number of consecutive poll failures + last_error: Last error message from polling + + Returns: + Created Alert or None if alert already exists + """ + # Check if active offline alert already exists + existing = self.db.query(Alert).filter( + and_( + Alert.unit_id == unit_id, + Alert.alert_type == "device_offline", + Alert.status == "active", + ) + ).first() + + if existing: + logger.debug(f"Offline alert already exists for {unit_id}") + return None + + # Get unit info for title + unit = self.db.query(RosterUnit).filter_by(id=unit_id).first() + unit_name = unit.id if unit else unit_id + + # Determine severity based on failure count + severity = "critical" if consecutive_failures >= 5 else "warning" + + return self.create_alert( + alert_type="device_offline", + title=f"{unit_name} is offline", + message=f"Device has been unreachable after {consecutive_failures} failed connection attempts." + + (f" Last error: {last_error}" if last_error else ""), + severity=severity, + unit_id=unit_id, + metadata={ + "consecutive_failures": consecutive_failures, + "last_error": last_error, + }, + expires_hours=48, # Offline alerts stay longer + ) + + def resolve_device_offline_alert(self, unit_id: str) -> Optional[Alert]: + """ + Auto-resolve offline alert when device comes back online. + + Also creates an "device_online" info alert to notify user. + + Args: + unit_id: The unit that came back online + + Returns: + The resolved Alert or None if no alert existed + """ + # Find active offline alert + alert = self.db.query(Alert).filter( + and_( + Alert.unit_id == unit_id, + Alert.alert_type == "device_offline", + Alert.status == "active", + ) + ).first() + + if not alert: + return None + + # Resolve the offline alert + alert.status = "resolved" + alert.resolved_at = datetime.utcnow() + self.db.commit() + + logger.info(f"Resolved offline alert for {unit_id}") + + # Create online notification + unit = self.db.query(RosterUnit).filter_by(id=unit_id).first() + unit_name = unit.id if unit else unit_id + + self.create_alert( + alert_type="device_online", + title=f"{unit_name} is back online", + message="Device connection has been restored.", + severity="info", + unit_id=unit_id, + expires_hours=6, # Info alerts expire quickly + ) + + return alert + + def create_schedule_failed_alert( + self, + schedule_id: str, + action_type: str, + unit_id: str = None, + error_message: str = None, + project_id: str = None, + location_id: str = None, + ) -> Alert: + """ + Create alert when a scheduled action fails. + + Args: + schedule_id: The ScheduledAction or RecurringSchedule ID + action_type: start, stop, download + unit_id: Related unit + error_message: Error from execution + project_id: Related project + location_id: Related location + + Returns: + Created Alert + """ + return self.create_alert( + alert_type="schedule_failed", + title=f"Scheduled {action_type} failed", + message=error_message or f"The scheduled {action_type} action did not complete successfully.", + severity="warning", + unit_id=unit_id, + project_id=project_id, + location_id=location_id, + schedule_id=schedule_id, + metadata={"action_type": action_type}, + expires_hours=24, + ) + + def get_active_alerts( + self, + project_id: str = None, + unit_id: str = None, + alert_type: str = None, + min_severity: str = None, + limit: int = 50, + ) -> List[Alert]: + """ + Query active alerts with optional filters. + + Args: + project_id: Filter by project + unit_id: Filter by unit + alert_type: Filter by alert type + min_severity: Minimum severity (info, warning, critical) + limit: Maximum results + + Returns: + List of matching alerts + """ + query = self.db.query(Alert).filter(Alert.status == "active") + + if project_id: + query = query.filter(Alert.project_id == project_id) + + if unit_id: + query = query.filter(Alert.unit_id == unit_id) + + if alert_type: + query = query.filter(Alert.alert_type == alert_type) + + if min_severity: + # Map severity to numeric for comparison + severity_levels = {"info": 1, "warning": 2, "critical": 3} + min_level = severity_levels.get(min_severity, 1) + + if min_level == 2: + query = query.filter(Alert.severity.in_(["warning", "critical"])) + elif min_level == 3: + query = query.filter(Alert.severity == "critical") + + return query.order_by(Alert.created_at.desc()).limit(limit).all() + + def get_all_alerts( + self, + status: str = None, + project_id: str = None, + unit_id: str = None, + alert_type: str = None, + limit: int = 50, + offset: int = 0, + ) -> List[Alert]: + """ + Query all alerts with optional filters (includes non-active). + + Args: + status: Filter by status (active, acknowledged, resolved, dismissed) + project_id: Filter by project + unit_id: Filter by unit + alert_type: Filter by alert type + limit: Maximum results + offset: Pagination offset + + Returns: + List of matching alerts + """ + query = self.db.query(Alert) + + if status: + query = query.filter(Alert.status == status) + + if project_id: + query = query.filter(Alert.project_id == project_id) + + if unit_id: + query = query.filter(Alert.unit_id == unit_id) + + if alert_type: + query = query.filter(Alert.alert_type == alert_type) + + return ( + query.order_by(Alert.created_at.desc()) + .offset(offset) + .limit(limit) + .all() + ) + + def get_active_alert_count(self) -> int: + """Get count of active alerts for badge display.""" + return self.db.query(Alert).filter(Alert.status == "active").count() + + def acknowledge_alert(self, alert_id: str) -> Optional[Alert]: + """ + Mark alert as acknowledged. + + Args: + alert_id: Alert to acknowledge + + Returns: + Updated Alert or None if not found + """ + alert = self.db.query(Alert).filter_by(id=alert_id).first() + if not alert: + return None + + alert.status = "acknowledged" + alert.acknowledged_at = datetime.utcnow() + self.db.commit() + + logger.info(f"Acknowledged alert: {alert.title}") + return alert + + def dismiss_alert(self, alert_id: str) -> Optional[Alert]: + """ + Dismiss alert (user chose to ignore). + + Args: + alert_id: Alert to dismiss + + Returns: + Updated Alert or None if not found + """ + alert = self.db.query(Alert).filter_by(id=alert_id).first() + if not alert: + return None + + alert.status = "dismissed" + self.db.commit() + + logger.info(f"Dismissed alert: {alert.title}") + return alert + + def resolve_alert(self, alert_id: str) -> Optional[Alert]: + """ + Manually resolve an alert. + + Args: + alert_id: Alert to resolve + + Returns: + Updated Alert or None if not found + """ + alert = self.db.query(Alert).filter_by(id=alert_id).first() + if not alert: + return None + + alert.status = "resolved" + alert.resolved_at = datetime.utcnow() + self.db.commit() + + logger.info(f"Resolved alert: {alert.title}") + return alert + + def cleanup_expired_alerts(self) -> int: + """ + Remove alerts past their expiration time. + + Returns: + Number of alerts cleaned up + """ + now = datetime.utcnow() + expired = self.db.query(Alert).filter( + and_( + Alert.expires_at.isnot(None), + Alert.expires_at < now, + Alert.status == "active", + ) + ).all() + + count = len(expired) + for alert in expired: + alert.status = "dismissed" + + if count > 0: + self.db.commit() + logger.info(f"Cleaned up {count} expired alerts") + + return count + + +def get_alert_service(db: Session) -> AlertService: + """Get an AlertService instance with the given database session.""" + return AlertService(db) diff --git a/backend/services/device_controller.py b/backend/services/device_controller.py index bb995e6..2024ba6 100644 --- a/backend/services/device_controller.py +++ b/backend/services/device_controller.py @@ -333,6 +333,76 @@ class DeviceController: else: raise UnsupportedDeviceTypeError(f"Unsupported device type: {device_type}") + # ======================================================================== + # Store/Index Management + # ======================================================================== + + async def increment_index( + self, + unit_id: str, + device_type: str, + ) -> Dict[str, Any]: + """ + Increment the store/index number on a device. + + For SLMs, this increments the store name to prevent "overwrite data?" prompts. + Should be called before starting a new measurement if auto_increment_index is enabled. + + Args: + unit_id: Unit identifier + device_type: "slm" | "seismograph" + + Returns: + Response dict with old_index and new_index + """ + if device_type == "slm": + try: + return await self.slmm_client.increment_index(unit_id) + except SLMMClientError as e: + raise DeviceControllerError(f"SLMM error: {str(e)}") + + elif device_type == "seismograph": + # Seismographs may not have the same concept of store index + return { + "status": "not_applicable", + "message": "Index increment not applicable for seismographs", + "unit_id": unit_id, + } + + else: + raise UnsupportedDeviceTypeError(f"Unsupported device type: {device_type}") + + async def get_index_number( + self, + unit_id: str, + device_type: str, + ) -> Dict[str, Any]: + """ + Get current store/index number from device. + + Args: + unit_id: Unit identifier + device_type: "slm" | "seismograph" + + Returns: + Response dict with current index_number + """ + if device_type == "slm": + try: + return await self.slmm_client.get_index_number(unit_id) + except SLMMClientError as e: + raise DeviceControllerError(f"SLMM error: {str(e)}") + + elif device_type == "seismograph": + return { + "status": "not_applicable", + "message": "Index number not applicable for seismographs", + "unit_id": unit_id, + } + + else: + raise UnsupportedDeviceTypeError(f"Unsupported device type: {device_type}") + # ======================================================================== # Health Check # ======================================================================== diff --git a/backend/services/device_status_monitor.py b/backend/services/device_status_monitor.py new file mode 100644 index 0000000..7cf2772 --- /dev/null +++ b/backend/services/device_status_monitor.py @@ -0,0 +1,184 @@ +""" +Device Status Monitor + +Background task that monitors device reachability via SLMM polling status +and triggers alerts when devices go offline or come back online. + +This service bridges SLMM's device polling with Terra-View's alert system. +""" + +import asyncio +import logging +from datetime import datetime +from typing import Optional, Dict + +from backend.database import SessionLocal +from backend.services.slmm_client import get_slmm_client, SLMMClientError +from backend.services.alert_service import get_alert_service + +logger = logging.getLogger(__name__) + + +class DeviceStatusMonitor: + """ + Monitors device reachability via SLMM's polling status endpoint. + + Detects state transitions (online→offline, offline→online) and + triggers AlertService to create/resolve alerts. + + Usage: + monitor = DeviceStatusMonitor() + await monitor.start() # Start background monitoring + monitor.stop() # Stop monitoring + """ + + def __init__(self, check_interval: int = 60): + """ + Initialize the monitor. + + Args: + check_interval: Seconds between status checks (default: 60) + """ + self.check_interval = check_interval + self.running = False + self.task: Optional[asyncio.Task] = None + self.slmm_client = get_slmm_client() + + # Track previous device states to detect transitions + self._device_states: Dict[str, bool] = {} + + async def start(self): + """Start the monitoring background task.""" + if self.running: + logger.warning("DeviceStatusMonitor is already running") + return + + self.running = True + self.task = asyncio.create_task(self._monitor_loop()) + logger.info(f"DeviceStatusMonitor started (checking every {self.check_interval}s)") + + def stop(self): + """Stop the monitoring background task.""" + self.running = False + if self.task: + self.task.cancel() + logger.info("DeviceStatusMonitor stopped") + + async def _monitor_loop(self): + """Main monitoring loop.""" + while self.running: + try: + await self._check_all_devices() + except Exception as e: + logger.error(f"Error in device status monitor: {e}", exc_info=True) + + # Sleep in small intervals for graceful shutdown + for _ in range(self.check_interval): + if not self.running: + break + await asyncio.sleep(1) + + logger.info("DeviceStatusMonitor loop exited") + + async def _check_all_devices(self): + """ + Fetch polling status from SLMM and detect state transitions. + + Uses GET /api/slmm/_polling/status (proxied to SLMM) + """ + try: + # Get status from SLMM + status_response = await self.slmm_client.get_polling_status() + devices = status_response.get("devices", []) + + if not devices: + logger.debug("No devices in polling status response") + return + + db = SessionLocal() + try: + alert_service = get_alert_service(db) + + for device in devices: + unit_id = device.get("unit_id") + if not unit_id: + continue + + is_reachable = device.get("is_reachable", True) + previous_reachable = self._device_states.get(unit_id) + + # Skip if this is the first check (no previous state) + if previous_reachable is None: + self._device_states[unit_id] = is_reachable + logger.debug(f"Initial state for {unit_id}: reachable={is_reachable}") + continue + + # Detect offline transition (was online, now offline) + if previous_reachable and not is_reachable: + logger.warning(f"Device {unit_id} went OFFLINE") + alert_service.create_device_offline_alert( + unit_id=unit_id, + consecutive_failures=device.get("consecutive_failures", 0), + last_error=device.get("last_error"), + ) + + # Detect online transition (was offline, now online) + elif not previous_reachable and is_reachable: + logger.info(f"Device {unit_id} came back ONLINE") + alert_service.resolve_device_offline_alert(unit_id) + + # Update tracked state + self._device_states[unit_id] = is_reachable + + # Cleanup expired alerts while we're here + alert_service.cleanup_expired_alerts() + + finally: + db.close() + + except SLMMClientError as e: + logger.warning(f"Could not reach SLMM for status check: {e}") + except Exception as e: + logger.error(f"Error checking device status: {e}", exc_info=True) + + def get_tracked_devices(self) -> Dict[str, bool]: + """ + Get the current tracked device states. + + Returns: + Dict mapping unit_id to is_reachable status + """ + return dict(self._device_states) + + def clear_tracked_devices(self): + """Clear all tracked device states (useful for testing).""" + self._device_states.clear() + + +# Singleton instance +_monitor_instance: Optional[DeviceStatusMonitor] = None + + +def get_device_status_monitor() -> DeviceStatusMonitor: + """ + Get the device status monitor singleton instance. + + Returns: + DeviceStatusMonitor instance + """ + global _monitor_instance + if _monitor_instance is None: + _monitor_instance = DeviceStatusMonitor() + return _monitor_instance + + +async def start_device_status_monitor(): + """Start the global device status monitor.""" + monitor = get_device_status_monitor() + await monitor.start() + + +def stop_device_status_monitor(): + """Stop the global device status monitor.""" + monitor = get_device_status_monitor() + monitor.stop() diff --git a/backend/services/recurring_schedule_service.py b/backend/services/recurring_schedule_service.py new file mode 100644 index 0000000..b606329 --- /dev/null +++ b/backend/services/recurring_schedule_service.py @@ -0,0 +1,550 @@ +""" +Recurring Schedule Service + +Manages recurring schedule definitions and generates ScheduledAction +instances based on patterns (weekly calendar, simple interval). +""" + +import json +import uuid +import logging +from datetime import datetime, timedelta, date, time +from typing import Optional, List, Dict, Any, Tuple +from zoneinfo import ZoneInfo + +from sqlalchemy.orm import Session +from sqlalchemy import and_ + +from backend.models import RecurringSchedule, ScheduledAction, MonitoringLocation, UnitAssignment + +logger = logging.getLogger(__name__) + +# Day name mapping +DAY_NAMES = ["monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"] + + +class RecurringScheduleService: + """ + Service for managing recurring schedules and generating ScheduledActions. + + Supports two schedule types: + - weekly_calendar: Specific days with start/end times + - simple_interval: Daily stop/download/restart cycles for 24/7 monitoring + """ + + def __init__(self, db: Session): + self.db = db + + def create_schedule( + self, + project_id: str, + location_id: str, + name: str, + schedule_type: str, + device_type: str = "slm", + unit_id: str = None, + weekly_pattern: dict = None, + interval_type: str = None, + cycle_time: str = None, + include_download: bool = True, + auto_increment_index: bool = True, + timezone: str = "America/New_York", + ) -> RecurringSchedule: + """ + Create a new recurring schedule. + + Args: + project_id: Project ID + location_id: Monitoring location ID + name: Schedule name + schedule_type: "weekly_calendar" or "simple_interval" + device_type: "slm" or "seismograph" + unit_id: Specific unit (optional, can use assignment) + weekly_pattern: Dict of day patterns for weekly_calendar + interval_type: "daily" or "hourly" for simple_interval + cycle_time: Time string "HH:MM" for cycle + include_download: Whether to download data on cycle + auto_increment_index: Whether to auto-increment store index before start + timezone: Timezone for schedule times + + Returns: + Created RecurringSchedule + """ + schedule = RecurringSchedule( + id=str(uuid.uuid4()), + project_id=project_id, + location_id=location_id, + unit_id=unit_id, + name=name, + schedule_type=schedule_type, + device_type=device_type, + weekly_pattern=json.dumps(weekly_pattern) if weekly_pattern else None, + interval_type=interval_type, + cycle_time=cycle_time, + include_download=include_download, + auto_increment_index=auto_increment_index, + enabled=True, + timezone=timezone, + ) + + # Calculate next occurrence + schedule.next_occurrence = self._calculate_next_occurrence(schedule) + + self.db.add(schedule) + self.db.commit() + self.db.refresh(schedule) + + logger.info(f"Created recurring schedule: {name} ({schedule_type})") + return schedule + + def update_schedule( + self, + schedule_id: str, + **kwargs, + ) -> Optional[RecurringSchedule]: + """ + Update a recurring schedule. + + Args: + schedule_id: Schedule to update + **kwargs: Fields to update + + Returns: + Updated schedule or None + """ + schedule = self.db.query(RecurringSchedule).filter_by(id=schedule_id).first() + if not schedule: + return None + + for key, value in kwargs.items(): + if hasattr(schedule, key): + if key == "weekly_pattern" and isinstance(value, dict): + value = json.dumps(value) + setattr(schedule, key, value) + + # Recalculate next occurrence + schedule.next_occurrence = self._calculate_next_occurrence(schedule) + + self.db.commit() + self.db.refresh(schedule) + + logger.info(f"Updated recurring schedule: {schedule.name}") + return schedule + + def delete_schedule(self, schedule_id: str) -> bool: + """ + Delete a recurring schedule and its pending generated actions. + + Args: + schedule_id: Schedule to delete + + Returns: + True if deleted, False if not found + """ + schedule = self.db.query(RecurringSchedule).filter_by(id=schedule_id).first() + if not schedule: + return False + + # Delete pending generated actions for this schedule + # Note: We don't have recurring_schedule_id field yet, so we can't clean up + # generated actions. This is fine for now. + + self.db.delete(schedule) + self.db.commit() + + logger.info(f"Deleted recurring schedule: {schedule.name}") + return True + + def enable_schedule(self, schedule_id: str) -> Optional[RecurringSchedule]: + """Enable a disabled schedule.""" + return self.update_schedule(schedule_id, enabled=True) + + def disable_schedule(self, schedule_id: str) -> Optional[RecurringSchedule]: + """Disable a schedule.""" + return self.update_schedule(schedule_id, enabled=False) + + def generate_actions_for_schedule( + self, + schedule: RecurringSchedule, + horizon_days: int = 7, + preview_only: bool = False, + ) -> List[ScheduledAction]: + """ + Generate ScheduledAction entries for the next N days based on pattern. + + Args: + schedule: The recurring schedule + horizon_days: Days ahead to generate + preview_only: If True, don't save to DB (for preview) + + Returns: + List of generated ScheduledAction instances + """ + if not schedule.enabled: + return [] + + if schedule.schedule_type == "weekly_calendar": + actions = self._generate_weekly_calendar_actions(schedule, horizon_days) + elif schedule.schedule_type == "simple_interval": + actions = self._generate_interval_actions(schedule, horizon_days) + else: + logger.warning(f"Unknown schedule type: {schedule.schedule_type}") + return [] + + if not preview_only and actions: + for action in actions: + self.db.add(action) + + schedule.last_generated_at = datetime.utcnow() + schedule.next_occurrence = self._calculate_next_occurrence(schedule) + + self.db.commit() + logger.info(f"Generated {len(actions)} actions for schedule: {schedule.name}") + + return actions + + def _generate_weekly_calendar_actions( + self, + schedule: RecurringSchedule, + horizon_days: int, + ) -> List[ScheduledAction]: + """ + Generate actions from weekly calendar pattern. + + Pattern format: + { + "monday": {"enabled": true, "start": "19:00", "end": "07:00"}, + "tuesday": {"enabled": false}, + ... + } + """ + if not schedule.weekly_pattern: + return [] + + try: + pattern = json.loads(schedule.weekly_pattern) + except json.JSONDecodeError: + logger.error(f"Invalid weekly_pattern JSON for schedule {schedule.id}") + return [] + + actions = [] + tz = ZoneInfo(schedule.timezone) + now_utc = datetime.utcnow() + now_local = now_utc.replace(tzinfo=ZoneInfo("UTC")).astimezone(tz) + + # Get unit_id (from schedule or assignment) + unit_id = self._resolve_unit_id(schedule) + + for day_offset in range(horizon_days): + check_date = now_local.date() + timedelta(days=day_offset) + day_name = DAY_NAMES[check_date.weekday()] + day_config = pattern.get(day_name, {}) + + if not day_config.get("enabled", False): + continue + + start_time_str = day_config.get("start") + end_time_str = day_config.get("end") + + if not start_time_str or not end_time_str: + continue + + # Parse times + start_time = self._parse_time(start_time_str) + end_time = self._parse_time(end_time_str) + + if not start_time or not end_time: + continue + + # Create start datetime in local timezone + start_local = datetime.combine(check_date, start_time, tzinfo=tz) + start_utc = start_local.astimezone(ZoneInfo("UTC")).replace(tzinfo=None) + + # Handle overnight schedules (end time is next day) + if end_time <= start_time: + end_date = check_date + timedelta(days=1) + else: + end_date = check_date + + end_local = datetime.combine(end_date, end_time, tzinfo=tz) + end_utc = end_local.astimezone(ZoneInfo("UTC")).replace(tzinfo=None) + + # Skip if start time has already passed + if start_utc <= now_utc: + continue + + # Check if action already exists + if self._action_exists(schedule.project_id, schedule.location_id, "start", start_utc): + continue + + # Build notes with automation metadata + start_notes = json.dumps({ + "schedule_name": schedule.name, + "schedule_id": schedule.id, + "auto_increment_index": schedule.auto_increment_index, + }) + + # Create START action + start_action = ScheduledAction( + id=str(uuid.uuid4()), + project_id=schedule.project_id, + location_id=schedule.location_id, + unit_id=unit_id, + action_type="start", + device_type=schedule.device_type, + scheduled_time=start_utc, + execution_status="pending", + notes=start_notes, + ) + actions.append(start_action) + + # Create STOP action + stop_notes = json.dumps({ + "schedule_name": schedule.name, + "schedule_id": schedule.id, + }) + stop_action = ScheduledAction( + id=str(uuid.uuid4()), + project_id=schedule.project_id, + location_id=schedule.location_id, + unit_id=unit_id, + action_type="stop", + device_type=schedule.device_type, + scheduled_time=end_utc, + execution_status="pending", + notes=stop_notes, + ) + actions.append(stop_action) + + # Create DOWNLOAD action if enabled (1 minute after stop) + if schedule.include_download: + download_time = end_utc + timedelta(minutes=1) + download_notes = json.dumps({ + "schedule_name": schedule.name, + "schedule_id": schedule.id, + "schedule_type": "weekly_calendar", + }) + download_action = ScheduledAction( + id=str(uuid.uuid4()), + project_id=schedule.project_id, + location_id=schedule.location_id, + unit_id=unit_id, + action_type="download", + device_type=schedule.device_type, + scheduled_time=download_time, + execution_status="pending", + notes=download_notes, + ) + actions.append(download_action) + + return actions + + def _generate_interval_actions( + self, + schedule: RecurringSchedule, + horizon_days: int, + ) -> List[ScheduledAction]: + """ + Generate actions from simple interval pattern. + + For daily cycles: stop, download (optional), start at cycle_time each day. + """ + if not schedule.cycle_time: + return [] + + cycle_time = self._parse_time(schedule.cycle_time) + if not cycle_time: + return [] + + actions = [] + tz = ZoneInfo(schedule.timezone) + now_utc = datetime.utcnow() + now_local = now_utc.replace(tzinfo=ZoneInfo("UTC")).astimezone(tz) + + # Get unit_id + unit_id = self._resolve_unit_id(schedule) + + for day_offset in range(horizon_days): + check_date = now_local.date() + timedelta(days=day_offset) + + # Create cycle datetime in local timezone + cycle_local = datetime.combine(check_date, cycle_time, tzinfo=tz) + cycle_utc = cycle_local.astimezone(ZoneInfo("UTC")).replace(tzinfo=None) + + # Skip if time has passed + if cycle_utc <= now_utc: + continue + + # Check if action already exists + if self._action_exists(schedule.project_id, schedule.location_id, "stop", cycle_utc): + continue + + # Build notes with metadata + stop_notes = json.dumps({ + "schedule_name": schedule.name, + "schedule_id": schedule.id, + "cycle_type": "daily", + }) + + # Create STOP action + stop_action = ScheduledAction( + id=str(uuid.uuid4()), + project_id=schedule.project_id, + location_id=schedule.location_id, + unit_id=unit_id, + action_type="stop", + device_type=schedule.device_type, + scheduled_time=cycle_utc, + execution_status="pending", + notes=stop_notes, + ) + actions.append(stop_action) + + # Create DOWNLOAD action if enabled (1 minute after stop) + if schedule.include_download: + download_time = cycle_utc + timedelta(minutes=1) + download_notes = json.dumps({ + "schedule_name": schedule.name, + "schedule_id": schedule.id, + "cycle_type": "daily", + }) + download_action = ScheduledAction( + id=str(uuid.uuid4()), + project_id=schedule.project_id, + location_id=schedule.location_id, + unit_id=unit_id, + action_type="download", + device_type=schedule.device_type, + scheduled_time=download_time, + execution_status="pending", + notes=download_notes, + ) + actions.append(download_action) + + # Create START action (2 minutes after stop, or 1 minute after download) + start_offset = 2 if schedule.include_download else 1 + start_time = cycle_utc + timedelta(minutes=start_offset) + start_notes = json.dumps({ + "schedule_name": schedule.name, + "schedule_id": schedule.id, + "cycle_type": "daily", + "auto_increment_index": schedule.auto_increment_index, + }) + start_action = ScheduledAction( + id=str(uuid.uuid4()), + project_id=schedule.project_id, + location_id=schedule.location_id, + unit_id=unit_id, + action_type="start", + device_type=schedule.device_type, + scheduled_time=start_time, + execution_status="pending", + notes=start_notes, + ) + actions.append(start_action) + + return actions + + def _calculate_next_occurrence(self, schedule: RecurringSchedule) -> Optional[datetime]: + """Calculate when the next action should occur.""" + if not schedule.enabled: + return None + + tz = ZoneInfo(schedule.timezone) + now_utc = datetime.utcnow() + now_local = now_utc.replace(tzinfo=ZoneInfo("UTC")).astimezone(tz) + + if schedule.schedule_type == "weekly_calendar" and schedule.weekly_pattern: + try: + pattern = json.loads(schedule.weekly_pattern) + except: + return None + + # Find next enabled day + for day_offset in range(8): # Check up to a week ahead + check_date = now_local.date() + timedelta(days=day_offset) + day_name = DAY_NAMES[check_date.weekday()] + day_config = pattern.get(day_name, {}) + + if day_config.get("enabled") and day_config.get("start"): + start_time = self._parse_time(day_config["start"]) + if start_time: + start_local = datetime.combine(check_date, start_time, tzinfo=tz) + start_utc = start_local.astimezone(ZoneInfo("UTC")).replace(tzinfo=None) + if start_utc > now_utc: + return start_utc + + elif schedule.schedule_type == "simple_interval" and schedule.cycle_time: + cycle_time = self._parse_time(schedule.cycle_time) + if cycle_time: + # Find next cycle time + for day_offset in range(2): + check_date = now_local.date() + timedelta(days=day_offset) + cycle_local = datetime.combine(check_date, cycle_time, tzinfo=tz) + cycle_utc = cycle_local.astimezone(ZoneInfo("UTC")).replace(tzinfo=None) + if cycle_utc > now_utc: + return cycle_utc + + return None + + def _resolve_unit_id(self, schedule: RecurringSchedule) -> Optional[str]: + """Get unit_id from schedule or active assignment.""" + if schedule.unit_id: + return schedule.unit_id + + # Try to get from active assignment + assignment = self.db.query(UnitAssignment).filter( + and_( + UnitAssignment.location_id == schedule.location_id, + UnitAssignment.status == "active", + ) + ).first() + + return assignment.unit_id if assignment else None + + def _action_exists( + self, + project_id: str, + location_id: str, + action_type: str, + scheduled_time: datetime, + ) -> bool: + """Check if an action already exists for this time slot.""" + # Allow 5-minute window for duplicate detection + time_window_start = scheduled_time - timedelta(minutes=5) + time_window_end = scheduled_time + timedelta(minutes=5) + + exists = self.db.query(ScheduledAction).filter( + and_( + ScheduledAction.project_id == project_id, + ScheduledAction.location_id == location_id, + ScheduledAction.action_type == action_type, + ScheduledAction.scheduled_time >= time_window_start, + ScheduledAction.scheduled_time <= time_window_end, + ScheduledAction.execution_status == "pending", + ) + ).first() + + return exists is not None + + @staticmethod + def _parse_time(time_str: str) -> Optional[time]: + """Parse time string "HH:MM" to time object.""" + try: + parts = time_str.split(":") + return time(int(parts[0]), int(parts[1])) + except (ValueError, IndexError): + return None + + def get_schedules_for_project(self, project_id: str) -> List[RecurringSchedule]: + """Get all recurring schedules for a project.""" + return self.db.query(RecurringSchedule).filter_by(project_id=project_id).all() + + def get_enabled_schedules(self) -> List[RecurringSchedule]: + """Get all enabled recurring schedules.""" + return self.db.query(RecurringSchedule).filter_by(enabled=True).all() + + +def get_recurring_schedule_service(db: Session) -> RecurringScheduleService: + """Get a RecurringScheduleService instance.""" + return RecurringScheduleService(db) diff --git a/backend/services/scheduler.py b/backend/services/scheduler.py index 3bcde91..d0852bd 100644 --- a/backend/services/scheduler.py +++ b/backend/services/scheduler.py @@ -4,22 +4,29 @@ Scheduler Service Executes scheduled actions for Projects system. Monitors pending scheduled actions and executes them by calling device modules (SLMM/SFM). +Extended to support recurring schedules: +- Generates ScheduledActions from RecurringSchedule patterns +- Cleans up old completed/failed actions + This service runs as a background task in FastAPI, checking for pending actions every minute and executing them when their scheduled time arrives. """ import asyncio import json +import logging from datetime import datetime, timedelta from typing import Optional, List, Dict, Any from sqlalchemy.orm import Session from sqlalchemy import and_ from backend.database import SessionLocal -from backend.models import ScheduledAction, RecordingSession, MonitoringLocation, Project +from backend.models import ScheduledAction, RecordingSession, MonitoringLocation, Project, RecurringSchedule from backend.services.device_controller import get_device_controller, DeviceControllerError import uuid +logger = logging.getLogger(__name__) + class SchedulerService: """ @@ -62,11 +69,26 @@ class SchedulerService: async def _run_loop(self): """Main scheduler loop.""" + # Track when we last generated recurring actions (do this once per hour) + last_generation_check = datetime.utcnow() - timedelta(hours=1) + while self.running: try: + # Execute pending actions await self.execute_pending_actions() + + # Generate actions from recurring schedules (every hour) + now = datetime.utcnow() + if (now - last_generation_check).total_seconds() >= 3600: + await self.generate_recurring_actions() + last_generation_check = now + + # Cleanup old actions (also every hour, during generation cycle) + if (now - last_generation_check).total_seconds() < 60: + await self.cleanup_old_actions() + except Exception as e: - print(f"Scheduler error: {e}") + logger.error(f"Scheduler error: {e}", exc_info=True) # Continue running even if there's an error await asyncio.sleep(self.check_interval) @@ -194,11 +216,34 @@ class SchedulerService: db: Session, ) -> Dict[str, Any]: """Execute a 'start' action.""" + # Parse action notes for automation settings + auto_increment_index = False + try: + if action.notes: + notes_data = json.loads(action.notes) + auto_increment_index = notes_data.get("auto_increment_index", False) + except json.JSONDecodeError: + pass # Notes is plain text, not JSON + + # If auto_increment_index is enabled, increment the store index before starting + increment_response = None + if auto_increment_index and action.device_type == "slm": + try: + logger.info(f"Auto-incrementing store index for unit {unit_id}") + increment_response = await self.device_controller.increment_index( + unit_id, + action.device_type, + ) + logger.info(f"Index incremented: {increment_response}") + except Exception as e: + logger.warning(f"Failed to increment index for {unit_id}: {e}") + # Continue with start anyway - don't fail the whole action + # Start recording via device controller response = await self.device_controller.start_recording( unit_id, action.device_type, - config={}, # TODO: Load config from action.notes or metadata + config={}, ) # Create recording session @@ -210,7 +255,11 @@ class SchedulerService: session_type="sound" if action.device_type == "slm" else "vibration", started_at=datetime.utcnow(), status="recording", - session_metadata=json.dumps({"scheduled_action_id": action.id}), + session_metadata=json.dumps({ + "scheduled_action_id": action.id, + "auto_increment_index": auto_increment_index, + "increment_response": increment_response, + }), ) db.add(session) @@ -218,6 +267,8 @@ class SchedulerService: "status": "started", "session_id": session.id, "device_response": response, + "index_incremented": auto_increment_index, + "increment_response": increment_response, } async def _execute_stop( @@ -295,6 +346,90 @@ class SchedulerService: "device_response": response, } + # ======================================================================== + # Recurring Schedule Generation + # ======================================================================== + + async def generate_recurring_actions(self) -> int: + """ + Generate ScheduledActions from all enabled recurring schedules. + + Runs once per hour to generate actions for the next 7 days. + + Returns: + Total number of actions generated + """ + db = SessionLocal() + total_generated = 0 + + try: + from backend.services.recurring_schedule_service import get_recurring_schedule_service + + service = get_recurring_schedule_service(db) + schedules = service.get_enabled_schedules() + + if not schedules: + logger.debug("No enabled recurring schedules found") + return 0 + + logger.info(f"Generating actions for {len(schedules)} recurring schedule(s)") + + for schedule in schedules: + try: + actions = service.generate_actions_for_schedule(schedule, horizon_days=7) + total_generated += len(actions) + except Exception as e: + logger.error(f"Error generating actions for schedule {schedule.id}: {e}") + + if total_generated > 0: + logger.info(f"Generated {total_generated} scheduled actions from recurring schedules") + + except Exception as e: + logger.error(f"Error in generate_recurring_actions: {e}", exc_info=True) + finally: + db.close() + + return total_generated + + async def cleanup_old_actions(self, retention_days: int = 30) -> int: + """ + Remove old completed/failed actions to prevent database bloat. + + Args: + retention_days: Keep actions newer than this many days + + Returns: + Number of actions cleaned up + """ + db = SessionLocal() + cleaned = 0 + + try: + cutoff = datetime.utcnow() - timedelta(days=retention_days) + + old_actions = db.query(ScheduledAction).filter( + and_( + ScheduledAction.execution_status.in_(["completed", "failed", "cancelled"]), + ScheduledAction.executed_at < cutoff, + ) + ).all() + + cleaned = len(old_actions) + for action in old_actions: + db.delete(action) + + if cleaned > 0: + db.commit() + logger.info(f"Cleaned up {cleaned} old scheduled actions (>{retention_days} days)") + + except Exception as e: + logger.error(f"Error cleaning up old actions: {e}") + db.rollback() + finally: + db.close() + + return cleaned + # ======================================================================== # Manual Execution (for testing/debugging) # ======================================================================== diff --git a/backend/services/slmm_client.py b/backend/services/slmm_client.py index f04badf..ce3c5d5 100644 --- a/backend/services/slmm_client.py +++ b/backend/services/slmm_client.py @@ -276,6 +276,124 @@ class SLMMClient: """ return await self._request("POST", f"/{unit_id}/reset") + # ======================================================================== + # Store/Index Management + # ======================================================================== + + async def get_index_number(self, unit_id: str) -> Dict[str, Any]: + """ + Get current store/index number from device. + + Args: + unit_id: Unit identifier + + Returns: + Dict with current index_number (store name) + """ + return await self._request("GET", f"/{unit_id}/index-number") + + async def set_index_number( + self, + unit_id: str, + index_number: int, + ) -> Dict[str, Any]: + """ + Set store/index number on device. + + Args: + unit_id: Unit identifier + index_number: New index number to set + + Returns: + Confirmation response + """ + return await self._request( + "PUT", + f"/{unit_id}/index-number", + data={"index_number": index_number}, + ) + + async def check_overwrite_status(self, unit_id: str) -> Dict[str, Any]: + """ + Check if data exists at the current store index. + + Args: + unit_id: Unit identifier + + Returns: + Dict with: + - overwrite_status: "None" (safe) or "Exist" (would overwrite) + - will_overwrite: bool + - safe_to_store: bool + """ + return await self._request("GET", f"/{unit_id}/overwrite-check") + + async def increment_index(self, unit_id: str, max_attempts: int = 100) -> Dict[str, Any]: + """ + Find and set the next available (unused) store/index number. + + Checks the current index - if it would overwrite existing data, + increments until finding an unused index number. + + Args: + unit_id: Unit identifier + max_attempts: Maximum number of indices to try before giving up + + Returns: + Dict with old_index, new_index, and attempts_made + """ + # Get current index + current = await self.get_index_number(unit_id) + old_index = current.get("index_number", 0) + + # Check if current index is safe + overwrite_check = await self.check_overwrite_status(unit_id) + if overwrite_check.get("safe_to_store", False): + # Current index is safe, no need to increment + return { + "success": True, + "old_index": old_index, + "new_index": old_index, + "unit_id": unit_id, + "already_safe": True, + "attempts_made": 0, + } + + # Need to find an unused index + attempts = 0 + test_index = old_index + 1 + + while attempts < max_attempts: + # Set the new index + await self.set_index_number(unit_id, test_index) + + # Check if this index is safe + overwrite_check = await self.check_overwrite_status(unit_id) + attempts += 1 + + if overwrite_check.get("safe_to_store", False): + return { + "success": True, + "old_index": old_index, + "new_index": test_index, + "unit_id": unit_id, + "already_safe": False, + "attempts_made": attempts, + } + + # Try next index (wrap around at 9999) + test_index = (test_index + 1) % 10000 + + # Avoid infinite loops if we've wrapped around + if test_index == old_index: + break + + # Could not find a safe index + raise SLMMDeviceError( + f"Could not find unused store index for {unit_id} after {attempts} attempts. " + f"Consider downloading and clearing data from the device." + ) + # ======================================================================== # Device Settings # ======================================================================== @@ -387,6 +505,73 @@ class SLMMClient: } return await self._request("POST", f"/{unit_id}/ftp/download", data=data) + # ======================================================================== + # Polling Status (for device monitoring/alerts) + # ======================================================================== + + async def get_polling_status(self) -> Dict[str, Any]: + """ + Get global polling status from SLMM. + + Returns device reachability information for all polled devices. + Used by DeviceStatusMonitor to detect offline/online transitions. + + Returns: + Dict with devices list containing: + - unit_id + - is_reachable + - consecutive_failures + - last_poll_attempt + - last_success + - last_error + """ + try: + async with httpx.AsyncClient(timeout=self.timeout) as client: + response = await client.get(f"{self.base_url}/api/nl43/_polling/status") + response.raise_for_status() + return response.json() + except httpx.ConnectError: + raise SLMMConnectionError("Cannot connect to SLMM for polling status") + except Exception as e: + raise SLMMClientError(f"Failed to get polling status: {str(e)}") + + async def get_device_polling_config(self, unit_id: str) -> Dict[str, Any]: + """ + Get polling configuration for a specific device. + + Args: + unit_id: Unit identifier + + Returns: + Dict with poll_enabled and poll_interval_seconds + """ + return await self._request("GET", f"/{unit_id}/polling/config") + + async def update_device_polling_config( + self, + unit_id: str, + poll_enabled: Optional[bool] = None, + poll_interval_seconds: Optional[int] = None, + ) -> Dict[str, Any]: + """ + Update polling configuration for a device. + + Args: + unit_id: Unit identifier + poll_enabled: Enable/disable polling + poll_interval_seconds: Polling interval (10-3600) + + Returns: + Updated config + """ + config = {} + if poll_enabled is not None: + config["poll_enabled"] = poll_enabled + if poll_interval_seconds is not None: + config["poll_interval_seconds"] = poll_interval_seconds + + return await self._request("PUT", f"/{unit_id}/polling/config", data=config) + # ======================================================================== # Health Check # ======================================================================== diff --git a/templates/partials/alerts/alert_dropdown.html b/templates/partials/alerts/alert_dropdown.html new file mode 100644 index 0000000..ea1985e --- /dev/null +++ b/templates/partials/alerts/alert_dropdown.html @@ -0,0 +1,87 @@ + + + +
+ {% if alerts %} + {% for item in alerts %} +
+
+ + {% if item.alert.severity == 'critical' %} + + + + + + {% elif item.alert.severity == 'warning' %} + + + + + + {% else %} + + + + + + {% endif %} + +
+

+ {{ item.alert.title }} +

+ {% if item.alert.message %} +

+ {{ item.alert.message }} +

+ {% endif %} +

+ {{ item.time_ago }} +

+
+ + +
+ + +
+
+
+ {% endfor %} + {% else %} +
+ + + +

No active alerts

+

All systems operational

+
+ {% endif %} +
+ + +{% if total_count > 0 %} + +{% endif %} diff --git a/templates/partials/alerts/alert_list.html b/templates/partials/alerts/alert_list.html new file mode 100644 index 0000000..2dca6a4 --- /dev/null +++ b/templates/partials/alerts/alert_list.html @@ -0,0 +1,125 @@ + + + +
+ {% if alerts %} + {% for item in alerts %} +
+
+ +
+ {% if item.alert.severity == 'critical' %} +
+ + + +
+ {% elif item.alert.severity == 'warning' %} +
+ + + +
+ {% else %} +
+ + + +
+ {% endif %} +
+ + +
+
+

+ {{ item.alert.title }} +

+ + {% if item.alert.status == 'active' %} + + Active + + {% elif item.alert.status == 'acknowledged' %} + + Acknowledged + + {% elif item.alert.status == 'resolved' %} + + Resolved + + {% elif item.alert.status == 'dismissed' %} + + Dismissed + + {% endif %} +
+ + {% if item.alert.message %} +

+ {{ item.alert.message }} +

+ {% endif %} + +
+ {{ item.time_ago }} + {% if item.alert.unit_id %} + + + + + {{ item.alert.unit_id }} + + {% endif %} + {{ item.alert.alert_type | replace('_', ' ') }} +
+
+ + + {% if item.alert.status == 'active' %} +
+ + + +
+ {% endif %} +
+
+ {% endfor %} + {% else %} +
+ + + +

No alerts

+

+ {% if status_filter %} + No {{ status_filter }} alerts found. + {% else %} + All systems are operating normally. + {% endif %} +

+
+ {% endif %} +
diff --git a/templates/partials/projects/recurring_schedule_list.html b/templates/partials/projects/recurring_schedule_list.html new file mode 100644 index 0000000..2a46984 --- /dev/null +++ b/templates/partials/projects/recurring_schedule_list.html @@ -0,0 +1,151 @@ + + + +
+ {% if schedules %} + {% for item in schedules %} +
+
+
+
+

+ {{ item.schedule.name }} +

+ + + {% if item.schedule.schedule_type == 'weekly_calendar' %} + + Weekly + + {% else %} + + 24/7 Cycle + + {% endif %} + + + {% if item.schedule.enabled %} + + Active + + {% else %} + + Disabled + + {% endif %} +
+ + + {% if item.location %} + + {% endif %} + + +
+ {% if item.schedule.schedule_type == 'weekly_calendar' and item.pattern %} +
+ {% set days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'] %} + {% set day_abbr = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] %} + {% for day in days %} + {% if item.pattern.get(day, {}).get('enabled') %} + + {{ day_abbr[loop.index0] }} + {{ item.pattern[day].get('start', '') }}-{{ item.pattern[day].get('end', '') }} + + {% endif %} + {% endfor %} +
+ {% elif item.schedule.schedule_type == 'simple_interval' %} +
+ Cycle at {{ item.schedule.cycle_time or '00:00' }} daily + {% if item.schedule.include_download %} + (with download) + {% endif %} +
+ {% endif %} + + {% if item.schedule.next_occurrence %} +
+ Next: + {{ item.schedule.next_occurrence.strftime('%Y-%m-%d %H:%M') }} {{ item.schedule.timezone }} +
+ {% endif %} +
+
+ + +
+ {% if item.schedule.enabled %} + + {% else %} + + {% endif %} + + + + +
+
+
+ {% endfor %} + {% else %} +
+ + + +

No recurring schedules

+

+ Create a schedule to automate monitoring start/stop times. +

+ +
+ {% endif %} +
+ + diff --git a/templates/partials/projects/schedule_calendar.html b/templates/partials/projects/schedule_calendar.html new file mode 100644 index 0000000..9d2b9dc --- /dev/null +++ b/templates/partials/projects/schedule_calendar.html @@ -0,0 +1,231 @@ + + + +
+
+

Weekly Schedule

+

+ Select which days to monitor and set start/end times for each day. + For overnight monitoring (e.g., 7pm to 7am), the end time will be on the following day. +

+
+ + +
+ {% set days = [ + ('monday', 'Monday'), + ('tuesday', 'Tuesday'), + ('wednesday', 'Wednesday'), + ('thursday', 'Thursday'), + ('friday', 'Friday'), + ('saturday', 'Saturday'), + ('sunday', 'Sunday') + ] %} + + {% for day_key, day_name in days %} +
+ + + + +
+ + + + to + + + + + +
+
+ {% endfor %} +
+ + +
+ Quick select: + + + + +
+ + +
+
Automation Options
+ +
+ +
+ +
+ + +
+ +
+
+
+
+ + diff --git a/templates/partials/projects/schedule_interval.html b/templates/partials/projects/schedule_interval.html new file mode 100644 index 0000000..eea72fa --- /dev/null +++ b/templates/partials/projects/schedule_interval.html @@ -0,0 +1,158 @@ + + + +
+
+

Continuous Monitoring (24/7)

+

+ For uninterrupted monitoring. The device will automatically stop, download data, + and restart at the configured cycle time each day. +

+
+ + +
+
+ + + +
+

How it works:

+
    +
  1. At the cycle time, the measurement will stop
  2. +
  3. If enabled, data will be downloaded via FTP
  4. +
  5. The measurement will restart automatically
  6. +
+
+
+
+ + +
+ +
+ + + Time when stop/download/restart cycle runs + +
+

+ Recommended: midnight (00:00) to minimize disruption to data collection +

+
+ + +
+ +
+ + +
+ +
+ + + + + +
+
Cycle Sequence Preview
+
+
+ 1 + Stop +
+ + + +
+ 2 + Download +
+ + + +
+ 3 + Start +
+
+

+ At 00:00: Stop → Download (1 min) → Start (2 min) +

+
+
+ + diff --git a/templates/projects/detail.html b/templates/projects/detail.html index 5fd4432..f1e71ba 100644 --- a/templates/projects/detail.html +++ b/templates/projects/detail.html @@ -132,23 +132,55 @@ + + +