From 45ded5353049c070eb95b6e79e7cfcc2ae4bf39e Mon Sep 17 00:00:00 2001 From: Matt Date: Fri, 13 Feb 2026 22:45:18 -0500 Subject: [PATCH] Add in forecasting, lifecycle phases, associated component and script changes --- .gitignore | 5 + .../scripts/calculate-metrics-new.js | 8 + .../forecast_engine.cpython-314.pyc | Bin 0 -> 67163 bytes .../scripts/forecast/forecast_engine.py | 1612 +++++++++++++++++ .../scripts/forecast/requirements.txt | 5 + .../scripts/forecast/run_forecast.js | 128 ++ .../scripts/forecast/sql/create_tables.sql | 51 + inventory-server/scripts/import-from-prod.js | 2 +- inventory-server/scripts/import/orders.js | 10 +- inventory-server/scripts/import/products.js | 7 +- .../scripts/import/purchase-orders.js | 14 +- inventory-server/scripts/import/utils.js | 31 + .../metrics-new/update_daily_snapshots.sql | 70 +- .../update_lifecycle_forecasts.sql | 131 ++ inventory-server/src/routes/dashboard.js | 764 ++++++-- inventory-server/src/routes/products.js | 45 + .../src/components/overview/BestSellers.tsx | 2 +- .../components/overview/ForecastAccuracy.tsx | 294 +++ .../components/overview/ForecastMetrics.tsx | 223 ++- .../components/overview/OverstockMetrics.tsx | 53 +- .../components/overview/PurchaseMetrics.tsx | 52 +- .../overview/ReplenishmentMetrics.tsx | 52 +- .../src/components/overview/SalesMetrics.tsx | 140 +- .../src/components/overview/StockMetrics.tsx | 192 +- .../overview/TopReplenishProducts.tsx | 2 +- .../src/components/products/ProductDetail.tsx | 103 +- inventory/src/pages/Overview.tsx | 6 +- inventory/src/utils/lifecyclePhases.ts | 15 + inventory/tsconfig.tsbuildinfo | 2 +- 29 files changed, 3643 insertions(+), 376 deletions(-) create mode 100644 inventory-server/scripts/forecast/__pycache__/forecast_engine.cpython-314.pyc create mode 100644 inventory-server/scripts/forecast/forecast_engine.py create mode 100644 inventory-server/scripts/forecast/requirements.txt create mode 100644 inventory-server/scripts/forecast/run_forecast.js create mode 100644 inventory-server/scripts/forecast/sql/create_tables.sql create mode 100644 inventory-server/scripts/metrics-new/update_lifecycle_forecasts.sql create mode 100644 inventory/src/components/overview/ForecastAccuracy.tsx create mode 100644 inventory/src/utils/lifecyclePhases.ts diff --git a/.gitignore b/.gitignore index 2c98b09..df5a263 100644 --- a/.gitignore +++ b/.gitignore @@ -80,3 +80,8 @@ chat-migration*/ **/chat-migration*/ chat-migration*/** **/chat-migration*/** + +venv/ +venv/** +**/venv/* +**/venv/** \ No newline at end of file diff --git a/inventory-server/scripts/calculate-metrics-new.js b/inventory-server/scripts/calculate-metrics-new.js index c0dd718..4920586 100644 --- a/inventory-server/scripts/calculate-metrics-new.js +++ b/inventory-server/scripts/calculate-metrics-new.js @@ -11,6 +11,7 @@ const RUN_PERIODIC_METRICS = true; const RUN_BRAND_METRICS = true; const RUN_VENDOR_METRICS = true; const RUN_CATEGORY_METRICS = true; +const RUN_LIFECYCLE_FORECASTS = true; // Maximum execution time for the entire sequence (e.g., 90 minutes) const MAX_EXECUTION_TIME_TOTAL = 90 * 60 * 1000; @@ -592,6 +593,13 @@ async function runAllCalculations() { historyType: 'product_metrics', statusModule: 'product_metrics' }, + { + run: RUN_LIFECYCLE_FORECASTS, + name: 'Lifecycle Forecast Update', + sqlFile: 'metrics-new/update_lifecycle_forecasts.sql', + historyType: 'lifecycle_forecasts', + statusModule: 'lifecycle_forecasts' + }, { run: RUN_PERIODIC_METRICS, name: 'Periodic Metrics Update', diff --git a/inventory-server/scripts/forecast/__pycache__/forecast_engine.cpython-314.pyc b/inventory-server/scripts/forecast/__pycache__/forecast_engine.cpython-314.pyc new file mode 100644 index 0000000000000000000000000000000000000000..419abe2817c1943570c0311014fcbf0a344e1a0c GIT binary patch literal 67163 zcmeFa33OZMeJ2VK0I>rE3GUmaD2ar)iKJF)r6^LQMUfOpg0>}#hDeY?L=w;isD(~! zw#~dywlksq{6lISFia0;xARIKc^Sz$`l_y+@evb zW>s!gSanY2R(GkxYWABE&S1Zqu!j9=!&>&M3+wQk(UsYm8P4p~hxMI?u%TQ?YYdxM z9CO&heyw3E`?ZO`cJ{03%Ib849W0HuE4wo%oYUzHJIhtiS^IWK)l2;L)g_*#r{_$4 z_0n_V*R9J}?aC0||B<=vOynXxxidd;ZcS1iiC?$=6X&)ok&E=?HYjsD_iVV>Z48&3 zvxZC0>B42_OyP33Da@Ta7_K;H3va=1CH}U$&Eai`yB*)E+?H_lIZfF0oUU&`$~Ey@ zp+dMP@h&~Nt!|s!z85owF7&ak?`h##d9P5pvv#S4kZ_&bk*^BZyQ|&V`Kk$ZxWS!+ z_nmGh?v3tT+?(8axHr4=ao_da?g~}C%I)e^%Q@_E7vRfYcOmZk+(o$WcNgQ{;x57c zfV&j;gYGii54p>6KkVjkKjN+kA4REKSgEbgormF&+|;~htnOx^x5-^jI*314076)*4OIs&|J#2X58?Nj6a zDWA!7!Z+pPy#yRhl zcOozzm=19hQW>U^Y5p>zPWZh1ly8(9;{%i21%D_U;Qb@s3GO`aof>TzL22Uw{u(#o zot_%Gzy-bGu#cY#)tgL%A@8`a#l+zXUJG9cOv$AV`&fPJgV!GYm8+{e?;W`~=AW40 zns=H^?NgWid|+zQHx=eCdwIY2Jhi2o)vVq(b=f79cKq0IXK(*NEr*A}{&qKe=xgoo zAL?}air`m;DoRJYOsGY5C~sDZ^pHqfr;>lI0@NQ)ZtDO_d_>sv#PT! zkZyhAmLUgF7{yMx&g!KAk4l=H(r=~e+v=WgYvYDs=-NmiINlUD`L2e2Qz3s~DiqhY zhPpy=-PlCH8~(OBuD%+gifxbY@=skxM`G;iLOzT%-w+z%{lRdkL7E)$Lt?z*hJzE{ z$@8P$!+bV!DaYT3CAiJ1ez0%-K+o!dp7jI$s|Whm2A+vLdnP(?cJ07(AL6^}pR+f3 zBMRgR`tWw#g!b|ddZ$XW8%Ga0baUeN&tAK4Yk4yjZlRkMx46bX6@GYiD-cEM=z{hA z-K+b%*Y}@Z-G6$`Js9x}Mcq%Y?SJO~nWpgh)P{$4^cMPBNH^+ee;o7uCtrSpuHQW3 z|F2jR{z=TonW}Bo-p~lGGFON@v#olxNX&wz7XvuuF#BOTW8C*Q!U&2 zu}XC;Ry-3EcG4S;>uITuO@`wJX$Fs=Ncp0^MtRx~FQdT6t-%V_S*?_^PeNCu0Q~mR z980;}YIlZ&0(^qh8n^c6NUd{c{v4_GZiAefCMLT+K^u&2la!CNJGsq!#r>>O<+9AQ zwDCR+6X)gn0-^9Y@9RI^&5Z=6rhFKEf)NB}u$OP%=4e5Ga^fyeq6=k(Y7DAB;t*pXTf~{0{(ruDwRtY*Ro8tEC)l-8wyg*DAH!d0 z8n;>1`{tZ=bMZUo;=5M+jc2bvd*j^oa|>OIFGQ`)NgcN7G%qGDz$hk!2&nQ=w0xr=% zLZ1$3N>zTfD|4o*{i<(dnoYb*(>@?9z+O1s7oO&)I4^hH8}@ea7~=Y#c&2}R3h2>H zW&MsIB6&kxV2Ti@=^%}Mh{J?!ofx^`o4i)P1K5CG+MCqz%%B%}gfNW^z|W?D3WVY| zU`Qhuh7<3=W`H5d?-{|-xD88UbU1WrVwhFUrHyN8uEaG6;`gC_{C?a*v>|QZzMT`M zl+`CRNJzsU#m7?=>jZAIsz2A}Y#1HuMsC%}tsA$*j9WGgS?h-KRYUorA<{GyF_f0)8q zsYlg^-_+~j(46v9_xe76;%_&zf15*kA5)%Ps%ji)evQ&1t>S<7u`(6W% z4OFE>byl8keezb9NafaeGAt^$wn>wyhdTp!IQ||z-MWv9lTA0%eag#U#0~xybpaxGKETw5URmHw|R@EN{$v_R^KzVqH6qpWTNnn#Eq62FPZQ;XJNf`}ww!7OERgl`96p70jt`50e znej~E>!ARTJz7{XaRVQ?G7^}c3gc5?a?&4;Ye4PbHe50(jS@(9c`IyEyqFgGp|FXsTEUS)3{b-7#LgmSD zsXVG$#l9^~e`T0QQ;Ynh3-DY$Q`1L#9$=n-g!5eu2Btt$_(2|yf-=3vUGax6aOb@t z-vp*gJ;{sict z+%*XTECIsYY6-9fm0d0GG$>2F(A5gir%0D^Hc>)`)cNN$`#`dc3;ULog{qJ0o^sI; z%BfV0qkwjmuzf2*JSwKaWAc#l3To1(CS*G0q8g-aLq@F#^eKTmL%oTB4Qkz*psrYT zmZ%7Joz0S-hO&otd$jc$@MVm>YLDiePB|gnT92k)(+otxow-*;D6XJ1e%zCJ1?}gr z2Tf;*@?)1rU9a|J_NsShR;aY9pjpuyL>Q)AKtXD16}@yWqgFAVa{pK!mkWAD&biMi zKgtivk$ZyjtRC<+88ky5m8j0ChyJKm0mqa%oKm~>L0e+{JO-)OiZElEFjJZ^n-tcp zB}jNSPinwf!gkr!oRO&--Ac6z+RqkAp?#&&Q{vZc7_eJZP4bK-q|KcXv>&kxu$G?u zYQKcOnP%QGb@;9mJe6$pBnXD1?;1A+|=~sc^~gGncNIp6tk?Bx2{^DN^I+@ zl{dRujt53J4L04Vivu|C!OqC}#{h%F4CIGQz#Vm?;cG!(&rh1!6+QA3p@*DD|6&fj z2~fBh!JYBU^8i(&p?Kz`_v)DMvKR+6b9NhX=;R!Iqb;0E^B0uGPhN2T%8CCn^VuUj zO+A;L@5WC&6Fc?PxDUY2H^tj1M8`TJu0hVY`KNf=g4oUq+;e&|uAKzHk6YXTm7vVq z2{VYBJl=_E;hFDXnT-0*PmjB@;6jxu1XLxtUGe&mCwO}6M9t;G-arH3Y zOQrSE?KEYSd2Kj|?uuuILc_o+*l#!xj%%qW<7!`sHU&W8q)UJdc695o038@}jwiZm z5`UpFAWyTZ2Tm0ztFD*3UK?4ckLJ0)pSxz+JKKKOV7g&=*)U)B=JcAOZnpJ7rpjQR zYy73l^Xkveyym{;`GRMm^^4CeR=jrZJ$nwmn(VLgH!ifnTzVL85d0RlTrKDsBzouu?@3r_V|5mw$6DsyZEjpe<61xD|=zf+>3E1x7f6# zerwNC^KwRHOLHV=*G5kM1EVg-bX`BEy>C&~G^`q`=8OwhQE+zY!eGR)V{str*fEz$ z6ky&L@)jIlD_T;=3S7y7Uv+>W+wc z+XGbwS^%uXc-_=ybE61*L>T@!4Buh02LbO3Ooza1%a;e#^IiGP7Wg1mw z2n%W|0R!FIT6ymSqezBnQhq9#R1RuATDMNQdk1w5s$eEsD#NVJsj^fZCsZm1<*uG3 z7ACtq26v{iTxqus8a$tr;3&snJsEJq1-p^C;zV>phw0)XMg`4OUYoCN$q7hSP-h)6xWK-q~2t4Z3xw;#%+?}BIwo3O3Z?CHWU2VcD)WX z+0+)83=-o3@|e27SRI2YJlLC4(*Oulqy7<+#}L;G!W@!pfZ+r{BuHAW0gQ=Yhx2lX z8|Egx!61ojy83$=d?GPVb>pE^pvda$>$$y$`lqK{1R1^DB!n7%AlN?M1dhg}cfvmd z>Max?I7$qSNs6|&ekWJ$1^*xXRe-$4`kk(NZUDstI09I5C~o2!6^KNWCT!45S`cEe zSlIB93urLz$^{UzL=Il?vI;U_&16oakQ(v+^V7uOt2cRp0SfeNe|vX(+ki|Hwm&`K zZfzT=M$3-(4sjjs-cwwlo|w9>mKLzbxz>JeG;k$Bvi1+2st(kHTpsapI}jD%7Jw>z z%|$WAE@rt4Sy8`&nZrT^0yR)S;+-1t5&hlO&mHgTAL#078xTq!a1XYNRU8FGD(@(!Q%zmb)4!NxSQ4|uVjE1{=drux~Z99pEKhq!A z`6h$mYy2=i3uu5|WNF2949nn0D51d*UKgnC5MzmoNC!{&439lbe0B4&8E16^F05>y$h#bJ3t^0 zw0p$qSu+gX<+d$)mmF`JqxRZwm=_x7o4=6x4fCqKHe#r~XR*y)`~0bOOX;emG-@fI zZQn52=PN&7Guw90mNk1~Binha=4Q>8>aJ(bsqfkx^Of_y*LS=UnA2?3?ObmB&d{sA z`L+e)>pf9h<>JV@HrGn!oaSzR$wL2P#TP#_r+>xrfnJrvJ+P>3IrEM4-hX*!PQ78a zylXDrC@h&f@yb&l=v21C2L_eS@VU-k=!}4;m%rqAt7xe$!tGqvEsw8wfBjRpb0WLj zZx7zt8|feTqn3#0naESm-dCy5sGS?m;)T}NpIQh-aw->hFXb;g{!P(xTcmp5if(26 zw)eL_btfls=+vFTNdL2uGiPJ{&qdC8BkuF}HEP?a`hiNV^QrHdvuAsL_S0>uthx}9 zVs96k+pDxma9@vLKmhHBKmO-{`W12raDhzV0{SP$1+FV_ z0VA*q1r8!X_8>vV^tgaA8EcVJcrukWVz_`=feV-wxWLRqaREgyinzeuO}IcP2^Ub} zUW(L&d({)}1w!CbGq|*wLm_25-FmC4Nz;>Y68IkB6hRBP7YU3n9bRE@8~cheD^o6y zMe2cMD%GP`LU;wNH_fcdfPOMC6}~{ksLvY;K(RvF$kRZ1ao>J8lxKN~Bq+dqU1QTG z*v=(jyGerW5W8&&V{0n%UDPm)jX0Ehw zA7|8ZE}H;<^ZMIc`+Iv@yOV)1{~5|x2EqJ`^!gIr2>9}|bR*^rgS-4M&=W!3xb{;4 z{}f<5F}fM-<$sZa)sy^Z>DMqd%xW3p{{=<(-{?jFnExEzUZL9@ZgD+B2B*RyTCNI6 zEb+-9))ine%WdM?u+(8PVE!T!{vrNChXufFO9ssQBU@Uc#siNA%!6wNj{umn-?T*S zb>FavfZ4KYuZtM!HUs9n_U!qlSFX%<0z$`3MQH(aRt})C{`Kx?R>hp|Q4qSYY$3GR z`jt{ZXj>A5KL0Pzrh(8!fY6P5mc1+4-?v0I7pK!07#k82N2PL zWKSaY`eYJ0QJ5r5M0|i`w?PSZ9uwc#q))VFQ&}=$y}7KuGyqGgM^NK71+|J%0VDjZ z49L!wNV!QRE5=7)G3ffrr8tRSDR;0K&dV%@3hDd9Sqwv8B7i;$mPiQ{Co0YMpTBAgZGCV1TRry6q(X5y`Cr1S|z-EU4gcnd!oiI$O zgBBorE~$jXFLZOzyL4e(uv!@F=RbQa5K#ON2|+DaodlbOkRBk7c28y>R$1z$7~Odx zm!L72HMAvp_Ir%`I#nuV3xW=h<57Jxiq%%4ItkGr;^CfpolD9sxwnE=PZnBr9Q8pi z!R%m8(CM*yvYRu4xt?5)oy{vJ=~_v8`^Nx#avyR(&V zeT`De#IHvdfS?aMgg!j^1>yIcDwyZ7Kc+7)3VlhV611WP`RLK4{lR0U9^JI^n-g`A zo;>-<^_9lGRvdqdr)*EKfM$WGfO-sRlhz8}lU9g3N7BQRrmo10!%Fw$fw`vGhl6=1 zCxsq(-0s_7D%CMq=qY^UECD70_M_C)^!vV3$_>o4LNHTReY@oZ@`E($n{|(ZMV_L* zy>dkP!JVtj_t86FySf9jKwz&9q|Yr6`%=;fD|%_4T&Vnj5i;~0kYD8ocfM3B=~o&V zVMV#KPGvnVSy3(oTmi$ z3QsZaTas`t>?Ds--C7b=x3*5-##U=OHtnAn6(!9~p2#dFIG=X}+AL)E0FF6s!sAs= zHi7_y7L0u;7hV9*-OG<$fL#PBGP^+FcaTmOc-hQaLddih415lH`J+P4qM=3=3Y&(- zLkMIzMAUK`D5F9%k&GWgrfTm99|(atk9a9%gxN~Em0_WZ^`@EKYye4U!TZM0Sn&9z`lp()2zD#J5M(Fb z(^^q5G1LXVh9Hg^mLiZ$p;{COaGW5z;DVF&LH}qi7p!M>kv}tmgV>)?J0R_-5F*Rp zpX%-Ek^7qqa=mz?oFIS^vXo;Af{I}MWq-&Ia~DJi^V2^0v!FvK7EeNqp56hjXRy0F zHO3?pq`+u}4}~FJAMs&yM@gK))Bsad?vmvgg1nuCAgCLOV^VWTtGBm{1YRLQaZgk; zWEM!ng0e-L-c(c>EQAT6XYtFrHo;4x>Wl_M^`R+mFmxdRxj(9u{JE{SwY$B)t-YG! zO;7p5q2W+q0^_ukiG~y*NoBI=N!g-b(zm&#g-T_)p_#*hu`w)HIWso=sq=@QkiRR3 zUokz{G+;9d!z^?_auT5frFkXQG(_D&X{oisBuYsvOrlMlM#KN8=E;Q%vutzvgO5zk z)gooYl_ zPa_u~^CW0c%i)o%8fasqAbMtj3}i{6?1g2Nw5N$s$qxt-9ATf?oEC$x4JUvSKqjW9 zTrBNyBVrX;I&p(z)x*cuo@c7Jg@nl=OgpKXw6kH?6ospD$0IkIHdKO8@#9$eEN5u< zvX;gTQuAQChY%LcOO|&mn$$t5`%GWEyA5*QuI~2XHZZ!Ych>KAaReDq31MPXOO;^x zNNP#)=2I=^EQqNS!fDo#Ov*1wqot&YU!hkZ+!kO0`xV5{Pb9HUWF$Q)WY`Bexr*sm zf2{x%T)-rDN+ZE>Y()twbfl-n8j!lQBou-RvMza&eU8CB7F(PNDWHxii9Z~`F|Y(3 z0I@zyt(!Y6u3r&4riWTj1pDM2KndYHTRmMpC(w(7m=-%+v|lu{7Q}PY_D|1<%zXk{ zOoBP6?Px#z5@z7Rj9E~*?j+TgM;THC!L1zAGfEn%h3k|qVW}0&9P%)+C*#SuNg*k% zaRhA5M4m#js4$&fn}o*)^=H977A}v>(>$k5DNmn{qnl2(SFS;(TfbLqP0#>}7vj6r z%bk%>30A9uMui%a%)fzn(=?W7x9JMdIsOOye;$H4hmyj92sDMmmCk%5!2wU{p+|6R zq4t{SA(^5}61mDeUGSBRsjW=VOj6%KUX3QQo0QuHB_L$2?lS;3{^4_CVNf`*U}KS0Q38X**}=f8y3^>n}#N`%yLixM33XYd&+uqLwZ9==RE zrTi@f#jQNdsD~v$!Y@!58BsC9N!Ytn6-SM-s%_jHBE4MSa0seS@qBSL3P>-j2SYyj zuOQO_mp!hZ0>S{Bt9ah2uAbpOckl7Rwt@cPj$ZdLw0u16@uE%P$K2R)hTB>P;(FEt z=dVFAm+2KVK^&>#K<&hLmC51a*({skp{^d%pc!g!KiMD8WQ`3m6;(k{$8>Qsg>K}b zr>Jt}#0@O>;n6YHE36y%H}Hx7GIfYm!T|*{77KKDu_rS-f9TB${fw>!897WZp}0xx zdno+IGohme&9IbS zl7aj%=Etzmuz16HqN+iAh!mMbt{r5?oT0<@DG8p#1{FGn+uy>vqkvn?{hEEfpkcM3 zAzIKlXWeMr``xlwe%)I8@SNqIxnRv)^E|!F28bM!SU)5ikn+}>)g$AZ?>=PI2he}=uYMQxoB?BoM|Jsd_8yjYVP(( zRm)m+OC zIJ9Bews>vBQn`3`ecSHUZM&C!(QOCs=Uehk4^$ST`GX==mXle(K=0Z=8iIWjEvdv@ znkfW+DlS~iCh$GZGo5nSWX+UA39bioz5OAiw)dBaTcQ|z_Sn?}dtAcR26`WoZc`FU zS1qE@N$VeYorJ9>Z2<|iKdJP`DdndvIH9Ru3-=iE_E9TZ2Z+=zEeQeLc}Ur5K|ks* z2*JtdE`du)m4=DiU_1B<+d{QKY@h6p5$wrX@ryL1-=JGB-TLTunr`2u+qdZUCT<^R z4)Fn~qR!-OW{gzNR0^Y2BW<>vg2}{iW>j#-B1Ebqn^>1i)6>)As$dF$OtnBzR}go- zPUutix}RbCg8aXxIxu|&rloKYuW{pXiN7N#F}y{Anc|k584gT9Zxs4~)7a4C_T%kP zf1tT|qSxKk-mlPZ_&1bHOUkiv&5S?DFVVXWnun9{e&NUN0%HarzIjJS@eY3{{4vDf z+gLI`sA#t5^6e}cX|MX?x~V{T%Ihi@Yypi7YLJ1hpeSJ$ctXRpo?s)035rhwwE-PQ z1A+;Dg%nB?1Pc|LCq*~QTPgPV-=qpL@MmDF0Q-L2COlF0tROAr88-=`Os6t=A;&cr zeb?d+iZ3i@+E1~Hg@O4Ms)69SBt%dJ7!Y=QAu}3Ffiz;em;7%~ZB}p#k&HNLcb6uv z^n3KEu-oUQ-`x2>MF#(qa{3C8v7f;ZBTrILv>sp6w%<3DIt;U24-N~en63kBhk7H` zeNp3SSWZaVn6eJ$ku}3nGIjs-%b$Mb#fYKweL+QH?#gSSg{CiFnfFJ^TUMG@e807S z{XqBXf$r#mo=8h?r1$xVcO)`;G3K3E_l8%!;i&g=SD$Wqyg7j`Wj_<`;Rf#|-T$d2Bq zwQsiLp4|aelI((84L2L!&92<6-)Xk3naegx%H}$+pMn+k=dJ68l2t>=S4S2r7cab3 z7cJYlT)F(IUvKzf^X<@`1F;iB(YB`}rO&);c=iLMDrf6`o6TvM?S8;P{c};fw);e6 zS7$W4Yxd;E^B32jzl8sh=P$js3+5*$m-Mlcoy#?m!-KIVPqbiYE$8XTm20!z_pJ8W zj*qlkb-IU_dxrc4!@GuRn3$F{+&8K63TAr%*a$$r zo4f70aZWQgu#ua;u={Jq#s1ZT>R7G|kkXtBw*~kgF&DpQac*Q4-Ll`bFREi%TNd}m zvTEjZe*zm8OVwP*d?z4SPQk+V`G&c+4SV)N{hGacqpERv-^!UguE^k-wdbCXK6gH{ zePrIY5L(Tu+^`faG{-FEi&D-({hDnhe7obfJ{{fJ z9m_iv%juajyk{<2*ta;iyk}!;^-@u6>#p0{+h<}&Pk*FQTl&?1t)lxqOX;E`X4&$N zr4$3QWgm2KVSi)!f@Q%I&DuKKwV}`Y+)JN(Y4#;~qBg7ftuX;7FdOdH)^^UpL4U7H8$?GTYtMr9g_p?+E=lqG+Eq4owzOMhWeo^zw z`fuo$>``vtdcpp83ijXBu7rO3%5Pq|{ZjPEP;w+glxtisXnLoh>8ADvnw7TOhTl3B z-S<>dBxv?Vx&7+}E$S1bGt3l`E=~~Gtr~ZZb0|xWGsur?3{>gXx35Hc$D$|3?>mgywvX&tRy!tG zehv8<$iH7LTRq=%6fNvud^%QKzh2zDTHG8h-W_x7S$7;n-#7DBzM-{8@2M^)%oNg}7Sj!McuiJ+G9mwus0 zaY|4h0AZR`lXR>BkD*yj-t#78H5XEqpjs(}u&@$Jz1(_*AW*5$Wk@gw+{XM(iWgb3 zU{b0t1cs)EjYGFN%R*=EBiw{K~PBMDKMUaiBEb_pNagjfny9u5?+YaLXyZ33|=r7$>Iec zM}58^a;amq9V6c*_AK+{CQ$VgfxyM-Ahdq}gM`xy>`NJC2Coo!NCNp4Ibw={$$Df3 zX&Vq0vdKyN1@#@dnDj+Jt{^21jKFpW(j2&{OB11ar?Tv{EWM;uB%gus4#X@I{Un-^ z!YoWAN^nK%;eY^wP2jtNz({(PGm%V{UvDUyPFu~PHf4Zqp&Jg!>qOTLQ?I)TBGGnKMNTU1Pwv8c6ewu6xvK-wQkD9m46GgoYsSInJU(5zZYHVjZh zj~NOC$4zf)7TcB#Z=H&AP460-McP1B412AwHLMpktQIw_?Hr62d1gE3o>|rAE2*P> zU;NB`VDZe|ten-H)`hD}#&^q`qB*V6tk#JA*zJlRw%sxO?y1{HB5og9bd58Mt_!f} z%9-um>^X(77yM~I{4BbI#24unU4ngnKrJaj33HNs?_!RAmget6(#p^r%H-*Dm(_gJ zGS6LF0H11E$HkBeb*xp&NOofFCs<959lV zF}YO|7$op-L1F`!mTj$Zdv`0iiJil(C)x!Y(!x`%1B34NVNZK^Z(G;EGs6R&?)Lu9 z-tOaZ+wu0c)@S6XE}gJoKSXzdkpm|d1_@z^+mh=QBB;QYvcQ?JiMSV>OHkwaf57Lz zrgi*%Jb<&Dm2*pfQy(d)j%K;mv+5&R^-HIhD^~5zWFKO^QFXm)en-?)I!k2hjVsr$ zMDpvR_WE^ubHv`fT!Bcr1-FK74n<0vqPfkpJs-h^MArPdcXDf%HQ%-0Zu{|(wd2pe zXD`I^H{`veFI~*}X7PqOXDzQ`&D`)_PVsy8f_r+?tARCr>3jD44Ws2X+nTWqG3LFm z9(*sSh`uLB7ZxM|BSgpOu!YIOT!#df==rSw`u}U^>lk z4xl6(6bs=y&{Re*OP4Yh^C_sTzZfY^xsO@Kn-_n*ikvEdEczpCFS?=TWZ9iZmn!*# zvxUsZ_rXbMlH+jK3T6`A~up@BoSR{VOCtL)=_OvKnP_YCcYZOy*$`B^^w`mzPV1 zu`Eq80rLL|VhdQhgr*DNJ(qdbrIf>9v`FL$uf?Rz#x4b)WXfS z-_u)u@iPm?uiL+FU$Dcq6QHxRbi*m_t&&r)2S*VHSZI5PE-DNMe zvAn8iR<-c5)s zZ2xN6{%F~OnByQAuX@M2J|F)h`h2wZp2>Nm;d;YDMa)z(+xEV}IyW+36SI{?4dr*) zOKr?n9yM^n%Z|?nqJ}c|9LYws+I3rF#MZd1jvAWoHZ=a~)qiz$Mf2PG-_%Er4n_Ap zz25N5I}Ojgs+)szDOb$4BWkEYEk%#TsP^;fQL1*53uB->@} zz;=LqtvjPuR@4Ea0PjL6AqF54#stsSBhwo|EuKdT)6kAuxmfaQn3SK{qfbx|>AY3} z*y`YL)2#GmD1l35m`9!TW>A(1b3tP|-f&`jhc8=;#D$h}2Or?PXxb$M(R5~8sG+P_ z1u&qU{+|Q}qyr|z!z8fcgaas;R=t-C!7Df8H zKkc35Ss7Fc?fim@5&UR_6BvXhD(QPNa3i@7MNUYHeT&X5089ylC1@dP#$qc76G(y| zSNO_7_GE%k7sjSbP>I!)4yNb;l>(E77NHS>?h!Okt6LlPy5PMB+>YwHKmPZ467*m~ z6eu*HhO2HvB&BOX*2-bQ@pB8fw30*9hj|5roq-^`XE_cFUDBd()x-ek=Kf$DJ&8M*A$PK7 z&mVl(%E8<}r}*`Qi_Nj@?Q?oa_uky{%iF)XeQC#1@$!|J>u_|-kyzf5+t}B8BBzIA zdBbz&4TEFdP_Sw!_-YOu{nSJY>)$mrz>R>d;(UF!XNF1v7>m$%KeopGpKJ8#AUO0wj48k!~_F`EMDY!w55UnicFC zB7dw2pM5%yE}biQDL3q;&xz8jafL@5y*n)#0jW@unrvd9^WgXbC5a&Ib=y&=qr5vdzQ~Fz$ zIwCy#k^P2DdU zKzRNm`@7)h=x?Zk%l-X1Hopr~`rDLHC|5eiz>WphSn8GJf}vWR>p$>V9_9K2{hs87 zOVRJLx;5=kCwDy2%q~jm>7<+=($g~I20dM@>}e0lu}h}EL>S#MBC+a7)WHPWQ2ACI z7#8+&Q$Cz~C73HoAX7lGI9M|Iu5ff>1qf}Ee<~ElDHdd?NuG|`K^AoUn1rDK?~)u@ zGB=it;Mjs@mqn-mOs7J>A=MFSVY;Nli# z1A!P3mtC%L+?HH^TqiKTKg3*P5-;zs(7;n;=n|#G%+x3$3#Tu#JUqpKNs@}obz;Mm zoc4sf9jcoB1Jw_el!IXa=ba>2AUmT*CL)(}l;z}9kM#CZ-R&I%oKjT&Fhfe#GP$|Q zhE>VdW>nZe(#VRUSEm>HGizK};&Q!i$$RL->?IT0yt2Jfa;+5;f_3&x1zWdFB#YIV zSR2B8!ouMAzr=XYWYafc*~pU6zrhFZOqw|$+K<*Ik35C4LYX23vk4>PK&C3ng5YU8T3d8Qi4qnX;6xGB+n zGi6C*DJ*SaKVlogOqOImDv#htG_-#sca6A&#X@SiR?mrQdGS(cV)$t73fd@_#|2dz zIhQ9My`N(w9_l-{c_*Vy6X(1uf9rq(nSB~w+&NF4l$)TJU-x)-FT~hY3#n!`s0y^`?+Y(xyY&E$Z#Oy2(B3} zp?29tP%kg4S-P@Xv?u1+yP}IZ4zD}fR~_w9$BEge{!H( zqLx}zu(($4zuNEgFFPZ9I%16{qII2-+O9i}A2;85A$symHOYk2LsF>a6y}EXGl_XxsQZN&?vN{{ZI+wCHd{^ZEkX({-w6?2HO?eDrE+s-P;u};5F^X`M zoRk7~#U4je2<6dD{e|C7%mEXd1G6l(CdC((%rIn)W4EL^ke#UiInq)|6W=YnXihh} zQojXr|Hxxg2+xIuY^`yuXY#~eXDiQ>2exif-IPaLc?y&{qOa}n zVJNIy>}tZgJ*8v)54JJ9OiFtaTbFJF?CKrCB1J8eR<@@|s-yG^hDr8JRCe)A@);$o zBuTzS)E5(6F;8I+6@!gL*mVM#n=5M%@A;d5|{>qV`1 z9*Y1wJKdPv=WpoANzq7r!xJsS=h7>9`KodJ0e+l5Dm)CZ9mj$MUQH80FDg}-_LxD({Yr*NP#jvO@ zp<39tY=>^dylG8ev{6_Y*}5xMxO=^D|7sy&a+jiot#h`Gf-Q?jVg=3X1$$Qu_O57F zPDcxlz#u=nkPf(5@JF+&*R$&)*>y{;b9&%Hmcq5-+BI|Sa@p;QwZ^s$XZ~FK-JHDn z&{x|Qb&H{|^hR@PBbHjktl7WP2xI&=Y`3eBU)Amv^;*^bo0C5X-97}h`RI|sIWuAv zY)xi)eD95n%kG6d%9v*25 z@mobS@8F!7PVA_dAG&qs<{3Bw$gN$^ZH(lS9)qn3CwM?tk!`zUMSIqZT2_mI;@?(Bi;m6NA6^-*WToDoKx3r1+2F9y)v+xKteZe6^(T=~|E zvE1D&MKNdFb#x##!^&agP*1vH6dcYu9aLcoN*7IQ`8#fEmNgKB!oNc3wYCM_!nLT= z6*cdOXm@NDTax-ec46&@6WFQYha6K>xUfej3HT()!j!<%VF0q4csdLKqDr-|lEIa< z#|T%6rEWSnOxBk?CmbBcu%U#f^YqFRXJu#!RS^+qBfZ&2X*EJ1ktMY~ozELUTWvD# zEcFP5ljs1(fn;KmawUBG^yQ~~!mC>rDL%@-cshSGz|qCAVh5Kc`%FhKrCxpIsUZ-@ za#EO_mrl{QTcodvUs9Ek;-1T3zJ`8|5y2s&+oZgKmq6g?HtrSY7SjcmriBy^P4n#%T=gzgA_VI zbvDc1k}?M}`yN~4fq$|Z4Wy3)u;b z{!oxSsfw~U6U10RN<$Dkk%*2^(1a!wxuGT9C#5DVWIy2JGheaLm*OP6gbw$nGsA(L z0zYJU)(04pp13uMvmRF;kDC&#&$tn4!G3h6Z*%~*WVtwNoDPEa&oGl$63(E@fLTG6 z2WS!N4Y+?Y;wVu6B%MnKGqeL3otcvMtHJ(B_<&;C#p3Dcgi?c@heJ_t#%WOkp;VlJ zLfB(mOD>P%xp3eF^O636;ZFDpdA7Glz%)ycJ<~^xo5KO(@cHOe)3_1NLFhyh-&Gt8 z?h_oq@FSF4Ci$jM4Q-%DFskd2u~(H~%Eg zPc^n*9r2-ofhj%{A9%(O9mkW)BqFyWZ*j;2=7Ul5;dOKSs=58PH)`&jJ-%Ue%-6=O80x z{-~`AXd4b;KX~)t8n-)|vuDn5Hz)U2%NJS}E-a~|IXmVI8}|G;``_L*J0GY_x`X$% z2IIlIIQjJQ?K5l6({t^lzU#W_S{$8sMYC(brOPk6_34|RUfa4q3aqsAZhqmdnJ>&N zRxh>UB=WgVsD{1V`?q)P1vp6+8Nt;<*Y&PQ0Wi$v_Lc0F)|KW+-jOxSQL4q>FYH|_ z-nm>I%{j1cIY7s&8V`Pa-+@|u93t=BKiuDH?a-@!q&IgqYkri|YB{kbl{$pq*`cH71}&axYReZUhk%Uj!2^&n`9nIRmf?gpm$Y@lZ8Y3M(}r0`>w_c*Y#*zK_bIkcbrc-pkJ6kb=)kZ z6VL}_DqJ|DU_D6WGRES&)GFk? zRm|HGgliE+-Y}rP^u34V%_*uUJ|^Xf@2tMopt)bwSGhU1RFgDEXV^TpJ{dJmq;$)= zN^lJcBS6^oVpY*qu0k1UkWZ93X4GiZ( zsar=eOTea<5oj<)oj80!zf0Wdz{1=u=UC_|@)Rg$z7(eztj>~CKYd8uvmaIWV5z56 zWUB)8Kuritu&WR3^wbORfyBP-@|4Q0D|Y9g{O_JUAbpd@HCXI+;>%lTeJ*2=`~H`; zKJ>|2|8umr1+B_st%ANF##HalPuhVYs!6>(@-a2Rva^SzuNd(%MPC-6$1D%)OHYa0 z!OFzgg9%<3EI(yISb-v}2w@rS;$ZpNBXS9{rXV!3|Es9o_!S6|62h)xX_hbo96L4U zLSj7?dy6@5aIIM`!U-3#WBvY26 z+|`3yJmsD(%CEu7{%w7&Qf`S~96x0~+a|qxD$dEfw=y(Y7?L;-6)Uww886X}b81gz zQvGEnfU;KUOJixJD&aSMie_1-8P-03DZm7{8%{UlN)jvx^r|9f9HdpkFo&#^qoJ}q zS!X+?h_yu4h!^&Wry&4fGp{}4JYA{nC?B zeljbc9(@!|yQcd)!eKOE1|66B7R(?wt-0V;qc1movfpiUZCGA(hg*~HAEroEFOU^ zLS8UQ*d=C67NG1yh-{%oE=*6sP#B-RmjmD(ct=L2$?l7oBc@4E_8`mQBWsLfxgf!$ z4+1!N+G^f=QpzzzXWQ%-bkm@7*j(RyibaRutcOX?X0~@jb(+{MQsbDKl<0Fp^7mC= z&s_ZA(1uipVc%BAwO|&6n80hs*+(uak`8%q;buw`+E|K$pyL(x!kUb7AnDUgk?gE@ zQ+Le#nCW&Y9rJFABWr(EgV9necRS;gC|()ENAWHmAzUjbPk0*<9BwG$3Opcki90oh zgMr}MY#8Sa`*=lS#iwbw73T%bSkXg<&COyjI#GhQY#2tn`HEEoJOq0F4p? zHc5_}eqzsk&yo$E(v36M&)j$plE9I>7UzxL>%Hri9jlfdB;>DPu0d$8?vZi4;JSB&Sz^xQwe#Ra%IlWx6xj86I$9ZCo4Gzd09S$`Xa@+I;R$zR zTs1Z(wh4-7Yq_wk%#?MAo2j%nW4mNZ2ot`rub$)IN7*w*4r3bN$uYE!WigX`iZOaq z_AvMw`w;Q@zroG5O|Thd%us@1%(WJqE&P9@*MGn*Zeg=(nDJTRWSJPYaPZ7jGI=6E z;TJl9ZBJk!(GnGW44M2P>?ppA^yw7}W%HJ>IAKoC(i0IWf|jb6p2$}KPm*$G0LmCt z{1JL3#uMYAg13zWe;9yb-p7Qqw7cl@i^w7F?C2is?;LLTw8P!Cbc}$jn$33R=KwaT zfh*Fn*H9>+6WMVfHn`64lNjRXsT%*CaZwL!pnhRLWZ2WjHl@y?AsOjlT^h7!sFdiXSj!ese z-%g)gbhFYYoO?h#ePR0QMNW3yOa{7OK=b4NDN@+L3xgSPo$zCm%zXvZXL24zQ5jd8 zKS!Al)9rcOLZF;fV=3ngBorSMPe)}x)aiE#{v~ARrcGfd_;!B>U0#dI048$T-I|@t zj^$(DIT$V4_w9p=-i47b@A>w@IeVmNpL77Xb@!Tf&s~V};pA=0Y^UU9mYhyAw`!4X zdm`4oYubI_gcnwP-S&p<%@p2Hj za}Gpv4$XE;rPwPMyB7~ftW9g$W-z&R26p(jwRTNgch^z9U~Srji+9I z>Xq)<_PhHJuN@g!+dml1^{nn6TyFiUf9dpES;Kl>LnN;OA+tS^T+auYst)ylI+{1| z_PN{YTQA*wY4Or8fBGAr{??1jp?C8SynSxAH!A~ zwAD7-vEj&`ed@k8Gutrx)JItsazstH5>-BYAXlyCRjuc_uAAo6b9+hAp=x2<>-AAv z1$gvX`3rhDBc9Wd?~Cz;KrF9*JrBHc_}+?U?LzRQ6f^E?kE@Ms>br&YOIKEm(ZXYM zPu;USBL(n2UH@k}rFY>hJ9kBYyXeld(UPa<+wXGC%TL|b-43mFd!yX>`L2!9`kUmq zkE{Cn3vav-se%8(W9#K7BIPG;d*?eh@*#eB?bCO+ZvW*I-#oEa+j@InbZhs5aigN< zjm-DA?uu^RLoZctWZrY+EmW;JDxSE^d*$2Hm3Wui^2_>f>es3dtz3(UE!m-IAK66U#%^nBf zDsGxL9Qg}J)*N`*a?^aTtYY5$enG`z{ksK?H?{NXja|FpQo8Hk+tzkiWP&1l`vy{AB)|*!7nJhT&Itv%JEjqr~D?DubTHTT(>a5v7 z@E#TfU3c%&_P0KTd9Ys2=J$&ZhfTeR=lZsPvAU5A3SK zru&=y`^23}>kFcYH9Kq0%`% z+atPQjM(dz{7Yw6%(pvk@4K@pZ&B)+isQidOgJlKbCnFpG*Khzcv?A88oTlK&}?eFDm9aL+7 z4__a%?O`C8<3E@fl7_uhOLt`ifFzNG9 zXfdD^fSJ@wsyR7P7y~01EHs&YlI|=hx2(U8TB#mQFBC&aME?>L#yL<1sHug7T5!gooCA*;507qIGYtdq%V%QIT4+&HOsFjf2dM zj?&Xhg7N{61H26p?SkhpLJvus;~Epb@;(r8BfimkQ!n}0p~HyTdA<@7DIolKBR&Fn zD&Y&z0HM=>VFnKF%P7=P;IiBYpW9GR!9l?|b`WNWqV04nIzgI*-R@=+Lf?8etHU?nCVm@ zmIIw}tKb`mu}DYcSl5Y1N0PdL;nCc2I^IY+HFtB4a%6_R;%MDd?e4C&>c*Y5P2#Dx zQU&Q(jxDl967hG{?n)CsK@Kv!pBsR=^dssZAlwg$rC^;fu1ccp^mSFlXRMaT#g{N^ zI?0F|=sE@K?$%R%n`_(W?mE@#eug{Q{tQCO#5Mk>uW(Wy zvw5{??k7bN=9WD7Xccfc_;t|j#+kOzhZ|^5ijK8}LvA~|yV?dGHF_@YcrR_1WWOt? zpE0%y&;4*ndgqTcei6>PTx{# z;)yFrDo0qo$*oK}*<9r0SIy}AFhju#n6gQ0d;3);V zn|y+C)s7GRaoiA-aW*rBi7)V5$U*kyVT7==zSt8WSll2kw$XTIkkuljM-HMcpTPNa zh*1*z8vi*kFv$B_s09ye^z# zqA9ZD)SZiWo{Qw3iCNCRFOXW1+{p0b`axy>#HqNE>{*V zFI<$DRA4OrFQP-zDv(JO!b36JWIBcBP0A(Zkb?Jv;t<5MqZJI_B}$83>8J{&8XM>e z*c!m10R2H1BpF)x2G#|&Zk*an+VgIc+w8Wut!|s!4tH}pgl8e#f$(gE=OEmPaNyf; zv#CpW0FYE?2qgc5sC;mJ=t2_RVshsao#4*%KwsRQ?=j(C(4>dzR9bq*lS~0TLfH*g zz!sDXE-q{Ytyj=5{9gsLf{tJ|DH;WH2h!&6E_|lonZG*g5c&jyZdn0H5$2H8AUxSV z&>*0P67glF8>mQVJvp1c=SXSUcju<>&ZO_To4(76B$U3{B&m;JUg=W=!TOW4f-v{w zZBdy1$!2DeR3kQbNmDcB3g&@&d<0dTr29_-m6Qw0AbN~?Ns~sl^u+v!l1r&458-7^ z8nm^%R*@=EqjYol(NZ&-zV+wPDB%YST|ANQGd@ zK!*Y|$+;ANm_+8RiDB|S4Y55RxCjFYW{7b98u<*ZV|EOX$hB}dSaTdMs=ec|q8KN@ z4rt0^oEMK?Xd&x_gm_puj6#Yr0h)7|^zmD`^V1_Zdaq4 z@+F`Ik0D%9S;$$2$-;no{p;KC!s{~m^idHAiPc{ zX60@KdDYQ6*iHR9RGo%yPfOz}#t`0hl1C{)-_qnryv@{$v`1U-V9!AH4%cHa-kCkX zou!_a_ZZo+Jbg-ROhOx({Y{VeCEzmh+#Ko}=;UCYei0oYaggB1D0%VFepGwN-F~kNckrbVIF_UYBWlnCW>9jU<1&IGctop+0 zm(wZclDxS-X~**y>Qm`TIbUiLU5F#sNTiJ)8HXsa&4X2~%!UzZb(C{rd|O&5l=UIo zxJdzNb2iFc1k!m^T^`}4j67>8ccsnwCsk9}h3XQ%>o)u9V)bPL@6CC&9_#-^*?G^0 zhFO+LO=B}DWqt9&}vtE=vW;{T67^Kfw;^e z1O{|@B~}CrlSQ01Nb(A7ytQlGKQbb9VqueEePq0eNvvs{8Kre=I&G$&v`(D#53ksj zS+7Y#n@l^IHZyBMO5$WX{e0iO(h9*OeTd)w-tT(9`~BYc_ z*~-9SA?grzy*k{~(LiIn%;>~}LbP3o-U%x4o<5Ky7R`Gh2ftT}>nLMFe(4wy{wF%L z@vV{FMddDur<70}%%zRU**TJ3pI@5t{vF*z!`Vy4I%ki%59)QN((# zS6kbJGCG@D+aJ7;)^@IxwHw5mbTq+kW-dALKI+oO4{?WBOj~Qyx_cGV-80zjuUpsJ zv|+ha-I$~NFfZwIZmo!em=l}POJ}>KQ$m{B{5F?=N@%&Y742L!?$R5SdzWn7corgU zc?*cDTeq}Ey^E*c7Mib<&2Yjbos&~(>fU9i5LVqEGkuoHe*aTvnF1bU+ALGVgG`xw zPZydhatG)1ZPdGX)a8WHaw(2BJ7G{2@#fs3N^;);G@vvE>cc%a|I2$84JSW)au^ySV&u|N2r(3o z7vE*vixMaHr3qBysYoi{-ScYYw4FF>}!pm1rm}}A^w;LOa)L2_(Ae>gW#u5&0SSuse*8KJ#k-F z{-3{$yzGm6H93DVU~cRm;26S#-9yXvsiL#6QFR~a>e;`uduOvEc=B-W=rMG>-P4!nD^xfutjxZpcJoCl8M6-w)y~U!%BhafZ-56^=HD|2 z(nU{CPD**UMvJzKG&z2HPeMV&!ZX;!zKeVXL{Okc6K9L#Xz^^ONG*Z^Jh1~mkuGvL z_SFkGz$jQ+A0R^Db>VEzOG)pfj3vA@^IuT%hbfU+ARO?*${CZ`<%fZL20UJo{sBb+ zz-T~l+C&cPU9#==i?UJ2mXQQ-B;#4i~9Id$eRXFVWe&dJAN|7JAQAkHEjtj!`N zq{Jg{QEg1hwIe=4oDLZ^CUi#2&EJfM<|PWkJdAt^t(hc{BpFIW7#049XUExuo|U%S z$eYNw1_YJKP8=4%jVsNOjg`1@R${MN!Cv!AY6J>)*3Oi!JMH$pX?oT4x+VVPygLBY zGow4UJ!LGDxNp5FQy=8^?zZtvx6P$))08O--~_WPQPP+;xe{)~d$Ww8%m!wg*`4t8 zr%jH8vp-v5a~Y-#*;*u>KF6hO=ThpBvWQFB!KLiT`Yc|ZbK2~g>dP{$&jkZV(~pAVSm{9Xiz}yFvW%+AoUJT! z*rvL&^^BQg8OGEp*s6*H4_Fq00>$?2-wc>Pb!}Z$rxa+=r z`r1%>Rp9!n4&3d${KU2P+`Ft%>)OuV@fF#3u;>mu#CGRx)h}CaH!$wf&pMd84cQ~? z(hy_6LS;H-3K9XxzPJ8D8XT;ECh_bYRvlzGQXF z35EIUd|a0xEc$|LqEgDV$!M4%8L~pHP{@P?y~s%d6o$m3QKTk=Pm<{RRsYKt5msE2}Azln&7{w?7^-BRn!76|#6bjCSsGLuESLB=Kk%Wc$n|weN zK_e8ZiM!2Ra&L_Ig&=02O7zLUl`v2hVp}9Fw4j-rJC?QTHjUV3aQzJV;3LF3F5*WU zgI4+{bCuD;I{AsBPt3id^04iI5hG*gxWhR{oM6V$G$KJu0CE8vH(dbSFbiNF7%T98 z^@?#YE5?CO&ba{Xz-V#+8y4g!7-&#L3SP0sfUHpMiVBbw)(M+JS>6_ODCf3VqY1mL zA|A2@i3AJT&1{KvUR4(rf_O zh73#Lgn~4%d?m<=l8^Y=(ffPO0Dz$Wk(z<7x`+68nsRE$*RNCglRLHZi~MBx}mAAlKhLW30u^-olX z)cIy?$UQ(u*hQDP`ht~Gy=p>Lp_)d8JOZ9mCCI{{uTjV9pkIc^=w|ozW8PGSij+Mz zu?ne5xl~+@gH@;PifEe0$4txD`S1-wJPbgQIv(wYkSLe=@p z1SmwUa#bj6E|nvLSLAzM6<3B>Vf@~^jF28OCBptwfwjEvSNy&R*oWtMp8}>JkIYA~ zM)4MsaNMzjuTq{zLMCaIK|Ti~^}uhzVC_Vm3|29)SJ6PhJJ7o{EMy4Pg-pS^kv=gl zSg+9J{?op>0?YKoYGqH1JR;U)*eK3yyc5MhWg5?7GPX&x4f0}t+Wu+`)d!o*Ot4wmKOy5vnibuep0&I@j{wtQKE>Qy0;uQLeEJnq zofM>m)`Y*01tA+qj^LYi97J0vT}a#p4zht`(UTwxNx=HS&I9X|gmsPp@)E!o;R%EI zAuS3PHT(DDvzZfq5md?{h(kCTxO)l_oGCqs)<}l?2m&0!ID`}rMEJ-!z$ifxhYfnj zjnk7kKuQ18&qmH2 z^N*i9+cbV$h>*k_8VyIzafXne0;a?ykEDMDmY^YPBw&#GMPyP=&1DSsny80Jz(~=J z&4#FH2cA!upCl3=UCR#*079ez0b?P&Yp3n#~%=p>EqZot}Kp26v zdKieKkiQp0z_tYV>JqBhO2(w6hY2)`$l=K+&ykIQMogWfr@~_j^m*Q}q7)%oh2w*z zf;dVySLeer&x@aCT$;!rCOHqz{~K&IQJb>IXxyWI4A)qb0Np6cxJ~-&B~=4p1_-f% z$bOU-U67tm&B#mUCfSOdRFtw_7Ds@HzN{JnYBr})0a<=tJ7iKKPcP|VKQDwP=-W5) zXa*1`o16sng-ms$$0^Cwyi}0Cj^-=PmpUcoIyYOeduV^K590FvQ%9vHWXxhtFdzZl zV(TS{R}STurawq}<^1GZZfW;Lon6q<;)LXcI`E?P4s;8kXGv3waC9J#v(cYn1H3~2 zC=Ff~ zrZ3@IKpi4)c|@-ZA4F!U@zj~hQC@Z<$;m;vBxc5+MoD9_RDnX#0~ARh8{|m6v(3w266jFDQSs{(gBUkpnb}|AB&VR6MPnlYCLc zM4jRu#JzM#e~QqbK36xl9?!6oK%AB5Ibo0l0vQd-nJQ@B7in5=Bj?xT{1-X@OAdz^ zeVBY?3Xa1a;ovwT_NWqWD59$i2`55lADXC@LQ!oo@WBp6+rKB6!-~xl$?n;?vso0L!PEIM&ZbE z7#k|TlW?Y8YN745~z|RP?jtoTu`2B0@^!JmvaxiXh~}j9qG-IQ1rpP_zsufTn2f#q$PS zQYqpofn-X;k-x)p{vFd~vJ>AwfZld7zKS;kuLWLjNN7q#43TGJy|Jx{)guY>p_Jw^ z05dph<41ueePJK`_VUc-h5$~gfmcaRUO~;mNE7m$oNg(o8Ge6$=93Ssh&ChorZaDN5Zpi z_Ec8QT0k*pSnUoLFxo|dnc}201B{AzvS)ycp($tx@l*gWnjVGG+B}_fQ8?YGen2ma4*3J0J-)wxf z@%82zJ8b<;S?~HlIR~dftLIThq~_Gb4<~9m(3W*o>8dq}%GOysRll8T-;vcZwxSCU z|FWVcekfJZc3aK5IzFjri&bV}dT`a%XJ*5xy3QM}&V;KI!3fkkP?DnbQ8UtYND67R;=e=iN;=O4{a1+NK`9)%MW! zw!JZ9{N%^(W-w0>VlBCDt)9`%w!UNfH=7&Px497j?m4Qop)6TN1|1p;NLdS*;=Ocv5oQbH!G^*t?`p@pNXFV zTt-Fvv?=YaopH>28>b6zRn*V4ef{}&tKO^q>)LCpule48dVWJ+a&7;7d4D1}JYPPX zcyu&z{M7uTr{>EapEllf`(j;Jo$)_QR<b>UT5KG+E9Fom*H|n;|)oo4IZGXQlS+W}_ z>n8j3fq7HeR5x&5V(g32*zuWyRC#m8=Dr11(y_0dncAH$txD9cPnE8}G7EFMnU>eDuFY?qKA{>Y5$TL>F z11cU+?QSC%{`q7BHspUP-fj=(?)dMVtbpcre~B!dsMCLeGn-uEuC!m8AR>TdvT z*{Gx~uaL(?y2xu4df6+aH~%rc_drjwguoMjfSOr}d;S3F6?@qI?6Q7lXobC4w9|EO zihH-Jr%yr2=VrbDuPyjj$j|l!M9RL*6$ZfeEtq2bS599P*wqWEJ-edG!q`R^O1OO_J+by83z@ z_wLVLHQ$@4d4=3w(I;*c6-=)kfaVex9{r7vW3p~n#`FR*oi9UiCfO{OApce}fcnR0 zHC>bsZ%*r0E5OIM@je-R{Ql=!z!^0(HHqKwQR@zono2wp&|$S49kws(-X%~e{M}3ET?pM+>C0Yg%$8ssuJP|A)(UC)1-0D*k;+~`>nnMJ+3IPMwmAE1U& zxXtem5~#d@gp3e*PeVh)&LFosf?Dh#~0y9 zuN>`k7p2-w4iVNe+R@YF5Z{$WnO~ryIHD^@%zcGY{*)ZT&Ebfo90YLW+jRRjIUgVv zk31XUZ3w5E^P?k-*stO*PhiwQ$80R}Bjo%lzCkB{h$HU>=BC|w#q+Y~ z6(2})Mo-e{yRef!=;(?sPhL5H>3piN0sy%$oxFJRrLl`+v5}-@H86Dp zpsaki87YQ5*CsVd%{GBw{KO5er3FZO(*vj~KMsRJpEOQhSG>r$F} zkq}A|x6JknyRw>IR_laU25V3Ac&2IFi92eQ*@D)&JXc08jlA+$eEYn!c52sMJ&`*M z61f9CQL=ibCh2Mc2@WQd+mqgHQ&xmJ8)n**#cQVY@aw&?nv|}Ne4bcwN>>Zt`d$`5 zN3ln5t5xd_bGo)EL;P7#Ka1U0HoUyyl}&N)yrW@ChYDDVmQ)XvPLB<=4|~;3Tk#dc z%Z4nYDJ@F7secx-JZM`};V}C0&^T;>9fPm{Y zFiKtbzV*ho{d3#)r?w5J0te={4gbC78Vl-d_&Wfs{i^^1t_Kc$R)|7xGa9=C?XFw% z_W0Z5@$t**={=V%*IGe}nm!q4leQ{Ayk;GSBFi1K&1?mA2HjL6b(0{#Jgpeogrk1O zJzG6vnGGlGohjW0adbB){H=+KwF!G$O4lw1Zk*loUjKXjv;A+3#shDReP~aV?Mv8u zQo3F#MeKA8a?y<`U6T-$a8$?l#y2PI1RE_Tj{D*I(2piW_GhndwRt zt-01drBCU${io5MRT<3Aw9`Z8yssR(d?-E&%;78hF6{%oz0wWpmajY(e!ceUDiBl& zUU>F+!m|Y=F6UNsqO1DKp(}@`4}DIf?IelyubF>)(@_d~v)TEHtr)~-v-5MBSf-bB zU)D|AUow8#`1@=%s`z_IpYyH!is|#o53%O|xz*UuG9TF7Fn9YweW2f^{h@tF5pF)L z?&x2q{js~Gf3^3=EhX^(q@bjKP5DpCyyWjJhx@OtE)SA^#1{2C)E~Kw{dM|}D)khy z+Jxwj))@y1^$C_j5(eX7yE@@~Xs|^+SL_~Cspc93!ScfEPCfkB%Zs6duOmU=&YYgZs!o&@Rkv^YnVj6rLVGfdku#2#Hzp96TI6 zvC9L{f8rUA*9IImNYjKTIoq%tlPzPNn0#h*eB#8~X2IBnLm9xsuRxJya`e>1GtHt2 z+X&GJGd?=#!DIs%`jgFPC&SGrVLANHxJ%GLx|ZDF>!rp&JuE8lJ#! zrttXb@$lJo?SiLmExiiqo_^ZaMxgk?M7~c2e21KGlk+w?zk`#p_HKu?LHFV9ySsUl zw=!qZM;SSe;j@&Sp0`XjNG5)=S=e))BFpj=u^h=DP#SVaec~of6CPSoY{u0oz>r99CM8022(S~ z)Wm~tANCT4 zN|#k67;m~1t|whoe8I{)cE<%vy2O)UioqMWU`Q9c5{w;MGZ)NhPbp<9b|T;wQ;2$A zXB=sR{iVi>jcFs)eq6sP)UYZ%vCwj(pyYZ%Nz9NesJ@_HFzMKWJ1#A|X~AY@_p%GM zDt6<7p`I;X7-0jfW8pD!KZ9KcN9h8iE>qoR)DHCp>w@W773*BsS 0: + log.info(f"Cleaned up {cleaned} stale 'running' forecast run(s)") + + +# --------------------------------------------------------------------------- +# Decay curve model: sales(t) = A * exp(-λt) + C +# --------------------------------------------------------------------------- +def decay_model(t, amplitude, decay_rate, baseline): + """Parametric exponential decay with baseline.""" + return amplitude * np.exp(-decay_rate * t) + baseline + + +def fit_decay_curve(weekly_medians): + """ + Fit the decay model to median weekly sales data. + + Args: + weekly_medians: array of median sales per week (index = week number) + + Returns: + (amplitude, decay_rate, baseline, r_squared) or None if fit fails + """ + weeks = np.arange(len(weekly_medians), dtype=float) + y = np.array(weekly_medians, dtype=float) + + # Skip if all zeros or too few points + if len(y) < 3 or np.max(y) == 0: + return None + + # Initial guesses + a0 = float(np.max(y)) + c0 = float(np.min(y[len(y)//2:])) # baseline from second half + lam0 = 0.3 # moderate decay + + try: + popt, _ = curve_fit( + decay_model, weeks, y, + p0=[a0, lam0, c0], + bounds=([0, 0.01, 0], [a0 * 5, 5.0, a0]), + maxfev=5000, + ) + amplitude, decay_rate, baseline = popt + + # R-squared + y_pred = decay_model(weeks, *popt) + ss_res = np.sum((y - y_pred) ** 2) + ss_tot = np.sum((y - np.mean(y)) ** 2) + r_sq = 1 - (ss_res / ss_tot) if ss_tot > 0 else 0.0 + + return float(amplitude), float(decay_rate), float(baseline), float(r_sq) + except (RuntimeError, ValueError) as e: + log.debug(f"Curve fit failed: {e}") + return None + + +# --------------------------------------------------------------------------- +# Day-of-week indices +# --------------------------------------------------------------------------- +def compute_dow_indices(conn): + """ + Compute day-of-week revenue indices from recent order history. + + Returns a dict mapping ISO weekday (1=Mon ... 7=Sun) to a multiplier + normalized so they sum to 7.0 (average = 1.0). This means applying them + preserves the weekly total while reshaping the daily distribution. + """ + sql = """ + SELECT + EXTRACT(ISODOW FROM o.date)::int AS dow, + SUM(o.price * o.quantity) AS revenue + FROM orders o + WHERE o.canceled IS DISTINCT FROM TRUE + AND o.date >= CURRENT_DATE - INTERVAL '1 day' * %s + GROUP BY 1 + ORDER BY 1 + """ + + df = execute_query(conn, sql, [DOW_LOOKBACK_DAYS]) + + if df.empty or len(df) < 7: + log.warning("Insufficient order data for DOW indices, using flat distribution") + return {d: 1.0 for d in range(1, 8)} + + total = df['revenue'].sum() + avg = total / 7.0 + + indices = {} + for _, row in df.iterrows(): + dow = int(row['dow']) + idx = float(row['revenue']) / avg if avg > 0 else 1.0 + indices[dow] = round(idx, 4) + + # Fill any missing days + for d in range(1, 8): + if d not in indices: + indices[d] = 1.0 + + log.info(f"DOW indices: Mon={indices[1]:.3f} Tue={indices[2]:.3f} Wed={indices[3]:.3f} " + f"Thu={indices[4]:.3f} Fri={indices[5]:.3f} Sat={indices[6]:.3f} Sun={indices[7]:.3f}") + return indices + + +# --------------------------------------------------------------------------- +# Monthly seasonal indices +# --------------------------------------------------------------------------- +def compute_monthly_seasonal_indices(conn): + """ + Compute monthly seasonal indices from recent order revenue. + + Returns a dict mapping month number (1-12) to a multiplier normalized + so they average 1.0. Months with above-average revenue get >1, below get <1. + """ + sql = """ + SELECT + EXTRACT(MONTH FROM o.date)::int AS month, + SUM(o.price * o.quantity) AS revenue + FROM orders o + WHERE o.canceled IS DISTINCT FROM TRUE + AND o.date >= CURRENT_DATE - INTERVAL '1 day' * %s + GROUP BY 1 + ORDER BY 1 + """ + + df = execute_query(conn, sql, [SEASONAL_LOOKBACK_DAYS]) + + if df.empty or len(df) < 6: + log.warning("Insufficient data for seasonal indices, using flat distribution") + return {m: 1.0 for m in range(1, 13)} + + total = df['revenue'].sum() + n_months = len(df) + avg = total / n_months + + indices = {} + for _, row in df.iterrows(): + month = int(row['month']) + idx = float(row['revenue']) / avg if avg > 0 else 1.0 + indices[month] = round(idx, 4) + + # Fill any missing months with 1.0 + for m in range(1, 13): + if m not in indices: + indices[m] = 1.0 + + present = [f"{m}={indices[m]:.3f}" for m in range(1, 13)] + log.info(f"Monthly seasonal indices: {', '.join(present)}") + return indices + + +# --------------------------------------------------------------------------- +# Phase 1: Build brand-category reference curves +# --------------------------------------------------------------------------- +DEAL_CATEGORIES = frozenset([ + 'Deals', 'Black Friday', 'Week 1', 'Week 2', 'Week 3', + '28 Off', '5 Dollar Deals', '10 Dollar Deals', 'Fall Sale', +]) + + +def build_reference_curves(conn): + """ + Build decay curves for each brand (and brand x category at every hierarchy level). + + For category curves, we load each product's full set of category assignments + (across all hierarchy levels), then fit brand×cat_id curves wherever we have + enough products. This gives granular curves like "49 and Market × 12x12 Paper Pads" + alongside coarser fallbacks like "49 and Market × Paper". + + Returns DataFrame of curves written to brand_lifecycle_curves. + """ + log.info("Building reference curves from historical launches...") + + # Get daily sales aligned by days-since-first-received for recent launches + # (no category join here — we attach categories separately) + sales_sql = """ + WITH recent_launches AS ( + SELECT pm.pid, p.brand + FROM product_metrics pm + JOIN products p ON p.pid = pm.pid + WHERE p.visible = true + AND p.brand IS NOT NULL + AND pm.date_first_received >= NOW() - INTERVAL '1 day' * %s + AND pm.date_first_received < NOW() - INTERVAL '14 days' + ), + daily_sales AS ( + SELECT + rl.pid, rl.brand, + dps.snapshot_date, + COALESCE(dps.units_sold, 0) AS units_sold, + (dps.snapshot_date - pm.date_first_received::date) AS day_offset + FROM recent_launches rl + JOIN product_metrics pm ON pm.pid = rl.pid + JOIN daily_product_snapshots dps ON dps.pid = rl.pid + WHERE dps.snapshot_date >= pm.date_first_received::date + AND dps.snapshot_date < pm.date_first_received::date + INTERVAL '1 week' * %s + ) + SELECT brand, pid, + FLOOR(day_offset / 7)::int AS week_num, + SUM(units_sold) AS weekly_sales + FROM daily_sales + WHERE day_offset >= 0 + GROUP BY brand, pid, week_num + ORDER BY brand, pid, week_num + """ + + df = execute_query(conn, sales_sql, [CURVE_HISTORY_DAYS, CURVE_WINDOW_WEEKS]) + if df.empty: + log.warning("No launch data found for reference curves") + return pd.DataFrame() + + log.info(f"Loaded {len(df)} weekly sales records from {df['pid'].nunique()} products across {df['brand'].nunique()} brands") + + # Load all category assignments for these products (every hierarchy level) + launch_pids = df['pid'].unique().tolist() + cat_sql = """ + SELECT pc.pid, ch.cat_id, ch.name AS cat_name, ch.level AS cat_level + FROM product_categories pc + JOIN category_hierarchy ch ON ch.cat_id = pc.cat_id + WHERE pc.pid = ANY(%s) + AND ch.name NOT IN %s + ORDER BY pc.pid, ch.level DESC + """ + cat_df = execute_query(conn, cat_sql, [launch_pids, tuple(DEAL_CATEGORIES)]) + # Build pid -> list of (cat_id, cat_name, cat_level) + pid_cats = {} + for _, row in cat_df.iterrows(): + pid = int(row['pid']) + if pid not in pid_cats: + pid_cats[pid] = [] + pid_cats[pid].append((int(row['cat_id']), row['cat_name'], int(row['cat_level']))) + + # Also get pre-order stats per brand (median pre-order sales AND accumulation window). + # Uses de-facto preorders: any product that had orders before date_first_received, + # regardless of the preorder_count flag. This gives us 6000+ completed cycles vs ~19 + # from the explicit flag alone. + preorder_sql = """ + WITH preorder_stats AS ( + SELECT p.pid, p.brand, + COALESCE((SELECT SUM(o.quantity) FROM orders o + WHERE o.pid = p.pid AND o.canceled IS DISTINCT FROM TRUE + AND o.date < pm.date_first_received), 0) AS preorder_units, + GREATEST(EXTRACT(DAY FROM pm.date_first_received - MIN(o.date)), 1) AS preorder_days + FROM products p + JOIN product_metrics pm ON pm.pid = p.pid + LEFT JOIN orders o ON o.pid = p.pid AND o.canceled IS DISTINCT FROM TRUE + AND o.date < pm.date_first_received + WHERE p.visible = true AND p.brand IS NOT NULL + AND pm.date_first_received IS NOT NULL + AND pm.date_first_received >= NOW() - INTERVAL '1 day' * %s + GROUP BY p.pid, p.brand, pm.date_first_received + ) + SELECT brand, + PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY preorder_units) AS median_preorder_sales, + PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY preorder_days) AS median_preorder_days + FROM preorder_stats + WHERE preorder_units > 0 + GROUP BY brand + HAVING COUNT(*) >= 3 + """ + + preorder_df = execute_query(conn, preorder_sql, [CURVE_HISTORY_DAYS]) + preorder_map = dict(zip(preorder_df['brand'], preorder_df['median_preorder_sales'])) if not preorder_df.empty else {} + preorder_days_map = dict(zip(preorder_df['brand'], preorder_df['median_preorder_days'])) if not preorder_df.empty else {} + + curves = [] + + def _fit_and_append(group_df, brand, cat_id=None, cat_name=None, cat_level=None): + """Helper: fit a decay curve for a group and append to curves list.""" + product_count = group_df['pid'].nunique() + min_products = MIN_PRODUCTS_FOR_CURVE if cat_id is None else MIN_PRODUCTS_FOR_BRAND_CAT + + if product_count < min_products: + return False + + weekly = group_df.groupby('week_num')['weekly_sales'].median() + if len(weekly) < 4: + return False + + full_weeks = weekly.reindex(range(CURVE_WINDOW_WEEKS), fill_value=0.0) + weekly_arr = full_weeks.values[:CURVE_WINDOW_WEEKS] + + result = fit_decay_curve(weekly_arr) + if result is None: + return False + + amplitude, decay_rate, baseline, r_sq = result + + # Quality gate: only store curves above the reliability threshold + if r_sq < MIN_R_SQUARED: + return False + + first_week = group_df[group_df['week_num'] == 0].groupby('pid')['weekly_sales'].sum() + median_fw = float(first_week.median()) if len(first_week) > 0 else 0.0 + + curves.append({ + 'brand': brand, + 'root_category': cat_name, # kept for readability; cat_id is the real key + 'cat_id': cat_id, + 'category_level': cat_level, + 'amplitude': amplitude, + 'decay_rate': decay_rate, + 'baseline': baseline, + 'r_squared': r_sq, + 'sample_size': product_count, + 'median_first_week_sales': median_fw, + 'median_preorder_sales': preorder_map.get(brand), + 'median_preorder_days': preorder_days_map.get(brand), + }) + return True + + # 1. Fit brand-level curves (aggregate across all categories) + for brand, brand_df in df.groupby('brand'): + _fit_and_append(brand_df, brand) + + # 2. Fit brand × category curves at every hierarchy level + # Build a mapping of (brand, cat_id) -> list of pids + brand_cat_pids = {} + for pid, cats in pid_cats.items(): + brand_rows = df[df['pid'] == pid] + if brand_rows.empty: + continue + brand = brand_rows.iloc[0]['brand'] + for cat_id, cat_name, cat_level in cats: + key = (brand, cat_id) + if key not in brand_cat_pids: + brand_cat_pids[key] = {'cat_name': cat_name, 'cat_level': cat_level, 'pids': set()} + brand_cat_pids[key]['pids'].add(pid) + + cat_curves_fitted = 0 + for (brand, cat_id), info in brand_cat_pids.items(): + group_df = df[(df['brand'] == brand) & (df['pid'].isin(info['pids']))] + if _fit_and_append(group_df, brand, cat_id=cat_id, + cat_name=info['cat_name'], cat_level=info['cat_level']): + cat_curves_fitted += 1 + + curves_df = pd.DataFrame(curves) + if curves_df.empty: + log.warning("No curves could be fitted") + return curves_df + + # Write to database + with conn.cursor() as cur: + cur.execute("TRUNCATE brand_lifecycle_curves") + for _, row in curves_df.iterrows(): + cur.execute(""" + INSERT INTO brand_lifecycle_curves + (brand, root_category, cat_id, category_level, + amplitude, decay_rate, baseline, + r_squared, sample_size, median_first_week_sales, + median_preorder_sales, median_preorder_days, computed_at) + VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, NOW()) + """, ( + row['brand'], + None if pd.isna(row.get('root_category')) else row.get('root_category'), + None if pd.isna(row.get('cat_id')) else int(row['cat_id']), + None if pd.isna(row.get('category_level')) else int(row['category_level']), + row['amplitude'], row['decay_rate'], row['baseline'], + row['r_squared'], row['sample_size'], + row['median_first_week_sales'], + None if pd.isna(row.get('median_preorder_sales')) else row.get('median_preorder_sales'), + None if pd.isna(row.get('median_preorder_days')) else row.get('median_preorder_days'), + )) + conn.commit() + + brand_only = curves_df[curves_df['cat_id'].isna()].shape[0] + cat_total = curves_df[curves_df['cat_id'].notna()].shape[0] + log.info(f"Wrote {len(curves_df)} reference curves ({cat_total} brand+category across all levels, {brand_only} brand-only)") + return curves_df + + +# --------------------------------------------------------------------------- +# Phase 2: Classify products and generate forecasts +# --------------------------------------------------------------------------- +def load_products(conn): + """ + Load all visible products with their metrics for classification. + + Also loads each product's full category ancestry (all hierarchy levels), + stored as a list of cat_ids ordered deepest-first for hierarchical curve lookup. + """ + sql = """ + SELECT + pm.pid, + p.brand, + pm.current_price, + pm.current_stock, + pm.sales_velocity_daily, + pm.sales_30d, + pm.date_first_received, + pm.date_last_sold, + p.preorder_count, + COALESCE(p.baskets, 0) AS baskets, + EXTRACT(DAY FROM NOW() - pm.date_first_received) AS age_days + FROM product_metrics pm + JOIN products p ON p.pid = pm.pid + WHERE p.visible = true + """ + products = execute_query(conn, sql) + + # Load category assignments for all products (every hierarchy level, deepest first) + cat_sql = """ + SELECT pc.pid, ch.cat_id, ch.level AS cat_level + FROM product_categories pc + JOIN category_hierarchy ch ON ch.cat_id = pc.cat_id + WHERE ch.name NOT IN %s + ORDER BY pc.pid, ch.level DESC + """ + cat_df = execute_query(conn, cat_sql, [tuple(DEAL_CATEGORIES)]) + + # Build pid -> [cat_id, ...] ordered deepest-first + pid_cat_ids = {} + for _, row in cat_df.iterrows(): + pid = int(row['pid']) + if pid not in pid_cat_ids: + pid_cat_ids[pid] = [] + pid_cat_ids[pid].append(int(row['cat_id'])) + + # Attach category list to each product row as a Python object column + products['cat_ids'] = products['pid'].apply(lambda p: pid_cat_ids.get(int(p), [])) + + log.info(f"Loaded {len(products)} products, " + f"{sum(1 for c in products['cat_ids'] if len(c) > 0)}/{len(products)} with category data") + return products + + +def classify_phase(row): + """Classify a product into its lifecycle phase.""" + preorder = (row.get('preorder_count') or 0) > 0 + age = row.get('age_days') + velocity = row.get('sales_velocity_daily') or 0 + first_received = row.get('date_first_received') + + # Pre-order: has preorder_count and either not received or very recently received + if preorder and (first_received is None or (age is not None and age <= LAUNCH_AGE_DAYS)): + return 'preorder' + + # No first_received date — can't determine lifecycle + if first_received is None or age is None: + if velocity > MATURE_VELOCITY_THRESHOLD: + return 'mature' + if velocity > 0: + return 'slow_mover' + return 'dormant' + + if age <= LAUNCH_AGE_DAYS: + return 'launch' + elif age <= DECAY_AGE_DAYS: + if velocity > 0: + return 'decay' + return 'dormant' + else: + if velocity > MATURE_VELOCITY_THRESHOLD: + return 'mature' + if velocity > 0: + return 'slow_mover' + return 'dormant' + + +def get_curve_for_product(product, curves_df): + """ + Look up the best matching reference curve for a product. + + Uses hierarchical category fallback: tries the product's deepest category + first, walks up the hierarchy to coarser categories, then falls back to + brand-only. This ensures e.g. "49 and Market × 12x12 Paper Pads" is + preferred over "49 and Market × Paper Crafts" when available. + + Skips curves with R² below MIN_R_SQUARED (unreliable fits). + Returns (amplitude, decay_rate, baseline, median_first_week, median_preorder, median_preorder_days) or None. + """ + brand = product.get('brand') + + if brand is None or curves_df.empty: + return None + + # Filter to this brand's reliable curves once + brand_curves = curves_df[ + (curves_df['brand'] == brand) + & (curves_df['r_squared'] >= MIN_R_SQUARED) + ] + if brand_curves.empty: + return None + + def _extract(row): + return ( + float(row['amplitude']), + float(row['decay_rate']), + float(row['baseline']), + float(row['median_first_week_sales'] or 1), + float(row['median_preorder_sales']) if pd.notna(row.get('median_preorder_sales')) else None, + float(row['median_preorder_days']) if pd.notna(row.get('median_preorder_days')) else None, + ) + + # Try each category from deepest to shallowest + cat_ids = product.get('cat_ids') or [] + for cat_id in cat_ids: + match = brand_curves[brand_curves['cat_id'] == cat_id] + if not match.empty: + return _extract(match.iloc[0]) + + # Fall back to brand-only curve (cat_id is NaN/None) + brand_only = brand_curves[brand_curves['cat_id'].isna()] + if brand_only.empty: + return None + + return _extract(brand_only.iloc[0]) + + +def forecast_from_curve(curve_params, scale_factor, age_days, horizon_days): + """ + Generate daily forecast from a scaled decay curve. + + The scale factor is applied only to the decay envelope, NOT the baseline. + This prevents hot products from getting inflated long-tail forecasts. + + Formula: daily_value = (A/7) * exp(-λ * t_weeks) * scale + (C/7) + + Args: + curve_params: (amplitude, decay_rate, baseline, ...) + scale_factor: multiplier for the decay envelope + age_days: current product age in days + horizon_days: how many days to forecast + + Returns: + array of daily forecast values + """ + amplitude, decay_rate, baseline = curve_params[:3] + # The curve is in weekly units; convert to daily + daily_amp = amplitude / 7.0 + daily_baseline = baseline / 7.0 + + forecasts = [] + for d in range(horizon_days): + t_weeks = (age_days + d) / 7.0 + daily_value = daily_amp * np.exp(-decay_rate * t_weeks) * scale_factor + daily_baseline + forecasts.append(max(0.0, daily_value)) + + return np.array(forecasts) + + +# --------------------------------------------------------------------------- +# Batch data loading (eliminates N+1 per-product queries) +# --------------------------------------------------------------------------- +def batch_load_product_data(conn, products): + """ + Batch-load all per-product data needed for forecasting in a few queries + instead of one query per product. + + Returns dict with keys: + preorder_sales: {pid: units} — pre-order units (before first received) + launch_sales: {pid: units} — first 14 days of sales + decay_velocity: {pid: avg} — recent 30-day daily average + mature_history: {pid: DataFrame} — daily sales history for SES + """ + data = { + 'preorder_sales': {}, + 'preorder_days': {}, + 'launch_sales': {}, + 'decay_velocity': {}, + 'mature_history': {}, + } + + # Pre-order sales: orders placed BEFORE first received date + # Also compute the number of days pre-orders accumulated over (for daily-rate normalization) + preorder_pids = products[products['phase'] == 'preorder']['pid'].tolist() + if preorder_pids: + sql = """ + SELECT o.pid, + COALESCE(SUM(o.quantity), 0) AS preorder_units, + GREATEST(EXTRACT(DAY FROM NOW() - MIN(o.date)), 1) AS preorder_days + FROM orders o + LEFT JOIN product_metrics pm ON pm.pid = o.pid + WHERE o.pid = ANY(%s) + AND o.canceled IS DISTINCT FROM TRUE + AND (pm.date_first_received IS NULL OR o.date < pm.date_first_received) + GROUP BY o.pid + """ + df = execute_query(conn, sql, [preorder_pids]) + for _, row in df.iterrows(): + data['preorder_sales'][int(row['pid'])] = float(row['preorder_units']) + data['preorder_days'][int(row['pid'])] = float(row['preorder_days']) + log.info(f"Batch loaded pre-order sales for {len(data['preorder_sales'])}/{len(preorder_pids)} preorder products") + + # Launch sales: first 14 days after first received + launch_pids = products[products['phase'] == 'launch']['pid'].tolist() + if launch_pids: + sql = """ + SELECT dps.pid, COALESCE(SUM(dps.units_sold), 0) AS total_sold + FROM daily_product_snapshots dps + JOIN product_metrics pm ON pm.pid = dps.pid + WHERE dps.pid = ANY(%s) + AND dps.snapshot_date >= pm.date_first_received::date + AND dps.snapshot_date < pm.date_first_received::date + INTERVAL '14 days' + GROUP BY dps.pid + """ + df = execute_query(conn, sql, [launch_pids]) + for _, row in df.iterrows(): + data['launch_sales'][int(row['pid'])] = float(row['total_sold']) + log.info(f"Batch loaded launch sales for {len(data['launch_sales'])}/{len(launch_pids)} launch products") + + # Decay recent velocity: average daily sales over last 30 days + decay_pids = products[products['phase'] == 'decay']['pid'].tolist() + if decay_pids: + sql = """ + SELECT dps.pid, AVG(COALESCE(dps.units_sold, 0)) AS avg_daily + FROM daily_product_snapshots dps + WHERE dps.pid = ANY(%s) + AND dps.snapshot_date >= CURRENT_DATE - INTERVAL '30 days' + GROUP BY dps.pid + """ + df = execute_query(conn, sql, [decay_pids]) + for _, row in df.iterrows(): + data['decay_velocity'][int(row['pid'])] = float(row['avg_daily']) + log.info(f"Batch loaded decay velocity for {len(data['decay_velocity'])}/{len(decay_pids)} decay products") + + # Mature daily history: full time series for exponential smoothing + mature_pids = products[products['phase'] == 'mature']['pid'].tolist() + if mature_pids: + sql = """ + SELECT dps.pid, dps.snapshot_date, COALESCE(dps.units_sold, 0) AS units_sold + FROM daily_product_snapshots dps + WHERE dps.pid = ANY(%s) + AND dps.snapshot_date >= CURRENT_DATE - INTERVAL '1 day' * %s + ORDER BY dps.pid, dps.snapshot_date + """ + df = execute_query(conn, sql, [mature_pids, EXP_SMOOTHING_WINDOW]) + for pid, group in df.groupby('pid'): + data['mature_history'][int(pid)] = group.copy() + log.info(f"Batch loaded history for {len(data['mature_history'])}/{len(mature_pids)} mature products") + + return data + + +# --------------------------------------------------------------------------- +# Per-product scale factor computation +# --------------------------------------------------------------------------- +def compute_scale_factor(phase, product, curve_info, batch_data): + """ + Compute the per-product scale factor for the brand curve. + + The scale factor captures how much more/less this product sells compared + to the brand average. It's applied to the decay envelope only (not baseline). + """ + if curve_info is None: + return 1.0 + + pid = int(product['pid']) + amplitude, decay_rate, baseline, median_fw, median_preorder, med_preorder_days = curve_info + + if phase == 'preorder': + preorder_units = batch_data['preorder_sales'].get(pid, 0) + preorder_days = batch_data['preorder_days'].get(pid, 1) + baskets = product.get('baskets') or 0 + + # Too few days of accumulation → noisy signal, use brand average + if preorder_days < MIN_PREORDER_DAYS and preorder_units > 0: + scale = 1.0 + return max(0.1, min(scale, 5.0)) + + # Use order units as primary signal; fall back to baskets if no orders + demand_signal = preorder_units if preorder_units > 0 else baskets + signal_days = preorder_days if preorder_units > 0 else max(preorder_days, 14) + + # Normalize to daily rate before comparing to brand median daily rate. + # Use the brand's stored median pre-order window for the denominator + # (not the current product's signal_days) to avoid systematic bias. + demand_daily = demand_signal / max(signal_days, 1) + if median_preorder and median_preorder > 0: + brand_preorder_window = max(med_preorder_days or signal_days, 1) + median_preorder_daily = median_preorder / brand_preorder_window + scale = demand_daily / median_preorder_daily + elif median_fw > 0 and demand_daily > 0: + median_fw_daily = median_fw / 7.0 + scale = demand_daily / median_fw_daily + else: + scale = 1.0 + + elif phase == 'launch': + actual_sold = batch_data['launch_sales'].get(pid, 0) + age = max(0, product.get('age_days') or 0) + if median_fw > 0 and actual_sold > 0: + days_observed = min(age, 14) + if days_observed > 0: + projected_first_week = (actual_sold / days_observed) * 7 + scale = projected_first_week / median_fw + else: + scale = 1.0 + else: + scale = 1.0 + + elif phase == 'decay': + actual_velocity = batch_data['decay_velocity'].get(pid, 0) + age = max(0, product.get('age_days') or 0) + t_weeks = age / 7.0 + # With baseline fix: value = (A/7)*exp(-λt)*scale + C/7 + # Solve for scale: scale = (actual - C/7) / ((A/7)*exp(-λt)) + decay_part = (amplitude / 7.0) * np.exp(-decay_rate * t_weeks) + # Use a higher floor for the denominator at high ages to prevent + # extreme scale factors when the decay envelope is nearly zero + min_decay = max(0.01, amplitude / 70.0) # at least 10% of week-1 daily value + if decay_part > min_decay and actual_velocity > 0: + scale = (actual_velocity - baseline / 7.0) / decay_part + elif actual_velocity > 0: + scale = 1.0 + else: + scale = 1.0 + + else: + scale = 1.0 + + # Clamp to avoid extreme values — tighter for preorder since the signal + # is noisier (pre-orders accumulate differently than post-launch sales) + max_scale = 5.0 if phase == 'preorder' else 8.0 + return max(0.1, min(scale, max_scale)) + + +# --------------------------------------------------------------------------- +# Mature product forecast (Holt's double exponential smoothing) +# --------------------------------------------------------------------------- +def forecast_mature(product, history_df): + """ + Forecast for a mature/evergreen product using Holt's linear trend method + on recent daily sales history. Holt's adds a trend component over SES, + so it naturally pulls the forecast back down after a sales spike instead + of persisting the inflated level. + + Falls back to SES then flat velocity on failure. + """ + pid = int(product['pid']) + velocity = product.get('sales_velocity_daily') or 0 + + if history_df is None or history_df.empty or len(history_df) < 7: + # Not enough data — flat velocity + return np.full(FORECAST_HORIZON_DAYS, velocity) + + # Fill date gaps with 0 sales (days where product had no snapshot = no sales) + hist = history_df.copy() + hist['snapshot_date'] = pd.to_datetime(hist['snapshot_date']) + hist = hist.set_index('snapshot_date').resample('D').sum().fillna(0) + series = hist['units_sold'].values.astype(float) + + # Need at least 2 non-zero values for smoothing + if np.count_nonzero(series) < 2: + return np.full(FORECAST_HORIZON_DAYS, velocity) + + try: + # Holt's with damped trend: the phi parameter dampens the trend over + # the horizon so forecasts converge to a level instead of extrapolating + # a linear trend indefinitely. + model = Holt(series, initialization_method='estimated', damped_trend=True) + fit = model.fit(optimized=True) + forecast = fit.forecast(FORECAST_HORIZON_DAYS) + forecast = np.maximum(forecast, 0) + return forecast + except Exception: + # Fall back to SES if Holt's fails (e.g. insufficient data points) + try: + model = SimpleExpSmoothing(series, initialization_method='estimated') + fit = model.fit(optimized=True) + forecast = fit.forecast(FORECAST_HORIZON_DAYS) + forecast = np.maximum(forecast, 0) + return forecast + except Exception as e: + log.debug(f"ExpSmoothing failed for pid {pid}: {e}") + return np.full(FORECAST_HORIZON_DAYS, velocity) + + +def forecast_dormant(): + """Dormant products get near-zero forecast.""" + return np.zeros(FORECAST_HORIZON_DAYS) + + +# --------------------------------------------------------------------------- +# Accuracy-driven confidence margins +# --------------------------------------------------------------------------- +DEFAULT_MARGINS = { + 'preorder': 0.4, + 'launch': 0.35, + 'decay': 0.3, + 'mature': 0.35, + 'slow_mover': 0.5, + 'dormant': 0.5, +} +MIN_MARGIN = 0.15 # intervals shouldn't be tighter than ±15% +MAX_MARGIN = 1.0 # intervals shouldn't exceed ±100% + + +def load_accuracy_margins(conn): + """ + Load per-phase WMAPE from the most recent forecast accuracy run. + Returns a dict of phase -> base_margin, falling back to DEFAULT_MARGINS. + WMAPE is already a ratio (e.g. 1.7 = 170%), which we use directly as margin. + """ + margins = dict(DEFAULT_MARGINS) + try: + df = execute_query(conn, """ + SELECT fa.dimension_value AS phase, fa.wmape + FROM forecast_accuracy fa + JOIN forecast_runs fr ON fr.id = fa.run_id + WHERE fa.metric_type = 'by_phase' + AND fr.status IN ('completed', 'backfill') + AND fa.wmape IS NOT NULL + ORDER BY fr.finished_at DESC + """) + if df.empty: + log.info("No accuracy data available, using default confidence margins") + return margins + + # Take the most recent run's values (they appear first due to ORDER BY) + seen = set() + for _, row in df.iterrows(): + phase = row['phase'] + if phase not in seen: + wmape = float(row['wmape']) + margins[phase] = max(MIN_MARGIN, min(wmape, MAX_MARGIN)) + seen.add(phase) + + log.info(f"Loaded accuracy-based margins: {', '.join(f'{k}={v:.2f}' for k, v in margins.items())}") + except Exception as e: + log.warning(f"Could not load accuracy margins, using defaults: {e}") + + return margins + + +# --------------------------------------------------------------------------- +# Main orchestration +# --------------------------------------------------------------------------- +FLUSH_EVERY_PRODUCTS = 5000 # Flush forecast rows to DB every N products + +def generate_all_forecasts(conn, curves_df, dow_indices, monthly_indices=None, + accuracy_margins=None): + """Classify all products, batch-load data, generate and stream-write forecasts. + + Writes forecast rows to product_forecasts in chunks to avoid accumulating + millions of rows in memory (37K products × 90 days = 3.3M rows). + """ + if monthly_indices is None: + monthly_indices = {m: 1.0 for m in range(1, 13)} + if accuracy_margins is None: + accuracy_margins = dict(DEFAULT_MARGINS) + log.info("Loading products for classification...") + products = load_products(conn) + log.info(f"Loaded {len(products)} visible products") + + # Classify each product + products['phase'] = products.apply(classify_phase, axis=1) + phase_counts = products['phase'].value_counts().to_dict() + log.info(f"Phase distribution: {phase_counts}") + + # Batch-load per-product data (replaces per-product queries) + log.info("Batch loading product data...") + batch_data = batch_load_product_data(conn, products) + + today = date.today() + forecast_dates = [today + timedelta(days=i) for i in range(FORECAST_HORIZON_DAYS)] + + # Pre-compute DOW and seasonal multipliers for each forecast date + dow_multipliers = [dow_indices.get(d.isoweekday(), 1.0) for d in forecast_dates] + seasonal_multipliers = [monthly_indices.get(d.month, 1.0) for d in forecast_dates] + + # TRUNCATE before streaming writes + with conn.cursor() as cur: + cur.execute("TRUNCATE product_forecasts") + conn.commit() + + buffer = [] + methods = {} + processed = 0 + errors = 0 + total_rows = 0 + + insert_sql = """ + INSERT INTO product_forecasts + (pid, forecast_date, forecast_units, forecast_revenue, + lifecycle_phase, forecast_method, confidence_lower, + confidence_upper) + VALUES %s + """ + + def flush_buffer(): + nonlocal buffer, total_rows + if not buffer: + return + with conn.cursor() as cur: + psycopg2.extras.execute_values( + cur, insert_sql, buffer, + template="(%s, %s, %s, %s, %s, %s, %s, %s)", + page_size=BATCH_SIZE, + ) + conn.commit() + total_rows += len(buffer) + buffer = [] + + for _, product in products.iterrows(): + pid = int(product['pid']) + phase = product['phase'] + price = float(product['current_price'] or 0) + age = max(0, product.get('age_days') or 0) + + try: + curve_info = get_curve_for_product(product, curves_df) + + if phase in ('preorder', 'launch'): + if curve_info: + scale = compute_scale_factor(phase, product, curve_info, batch_data) + forecasts = forecast_from_curve(curve_info, scale, age, FORECAST_HORIZON_DAYS) + method = 'lifecycle_curve' + else: + # No reliable curve — fall back to velocity if available + velocity = product.get('sales_velocity_daily') or 0 + if velocity > 0: + forecasts = np.full(FORECAST_HORIZON_DAYS, velocity) + method = 'velocity' + else: + forecasts = forecast_dormant() + method = 'zero' + + elif phase == 'decay': + if curve_info: + scale = compute_scale_factor(phase, product, curve_info, batch_data) + forecasts = forecast_from_curve(curve_info, scale, age, FORECAST_HORIZON_DAYS) + method = 'lifecycle_curve' + else: + velocity = product.get('sales_velocity_daily') or 0 + forecasts = np.full(FORECAST_HORIZON_DAYS, velocity) + method = 'velocity' + + elif phase == 'mature': + history = batch_data['mature_history'].get(pid) + forecasts = forecast_mature(product, history) + method = 'exp_smoothing' + + elif phase == 'slow_mover': + velocity = product.get('sales_velocity_daily') or 0 + forecasts = np.full(FORECAST_HORIZON_DAYS, velocity) + method = 'velocity' + + else: # dormant + forecasts = forecast_dormant() + method = 'zero' + + # Confidence interval: use accuracy-calibrated margins per phase + base_margin = accuracy_margins.get(phase, 0.5) + + for i, d in enumerate(forecast_dates): + base_units = float(forecasts[i]) if i < len(forecasts) else 0.0 + # Apply day-of-week and seasonal adjustments + units = base_units * dow_multipliers[i] * seasonal_multipliers[i] + # Widen confidence interval as horizon grows: day 0 = base, day 89 ≈ +50% wider + horizon_factor = 1.0 + 0.5 * (i / max(FORECAST_HORIZON_DAYS - 1, 1)) + margin = base_margin * horizon_factor + buffer.append(( + pid, d, + round(units, 2), + round(units * price, 4), + phase, method, + round(units * max(1 - margin, 0), 2), + round(units * (1 + margin), 2), + )) + + methods[method] = methods.get(method, 0) + 1 + + except Exception as e: + log.warning(f"Error forecasting pid {pid}: {e}") + errors += 1 + # Write zero forecast so we have complete coverage + for d in forecast_dates: + buffer.append((pid, d, 0, 0, phase, 'zero', 0, 0)) + + processed += 1 + if processed % FLUSH_EVERY_PRODUCTS == 0: + flush_buffer() + log.info(f" Processed {processed}/{len(products)} products ({total_rows} rows written)...") + + # Final flush + flush_buffer() + + log.info(f"Forecast generation complete. {processed} products, {errors} errors, {total_rows} rows") + log.info(f"Method distribution: {methods}") + + return total_rows, processed, phase_counts + + +def archive_forecasts(conn, run_id): + """ + Copy current product_forecasts into history before they get replaced. + Only archives forecast rows for dates that have already passed, + so we can later compare them against actuals. + """ + with conn.cursor() as cur: + # Ensure history table exists + cur.execute(""" + CREATE TABLE IF NOT EXISTS product_forecasts_history ( + run_id INT NOT NULL, + pid BIGINT NOT NULL, + forecast_date DATE NOT NULL, + forecast_units NUMERIC(10,2), + forecast_revenue NUMERIC(14,4), + lifecycle_phase TEXT, + forecast_method TEXT, + confidence_lower NUMERIC(10,2), + confidence_upper NUMERIC(10,2), + generated_at TIMESTAMP, + PRIMARY KEY (run_id, pid, forecast_date) + ) + """) + cur.execute("CREATE INDEX IF NOT EXISTS idx_pfh_date ON product_forecasts_history(forecast_date)") + cur.execute("CREATE INDEX IF NOT EXISTS idx_pfh_pid_date ON product_forecasts_history(pid, forecast_date)") + + # Find the previous completed run (whose forecasts are still in product_forecasts) + cur.execute(""" + SELECT id FROM forecast_runs + WHERE status = 'completed' + ORDER BY finished_at DESC + LIMIT 1 + """) + prev_run = cur.fetchone() + if prev_run is None: + log.info("No previous completed run found, skipping archive") + conn.commit() + return 0 + + prev_run_id = prev_run[0] + + # Archive only past-date forecasts (where actuals now exist) + cur.execute(""" + INSERT INTO product_forecasts_history + (run_id, pid, forecast_date, forecast_units, forecast_revenue, + lifecycle_phase, forecast_method, confidence_lower, confidence_upper, generated_at) + SELECT %s, pid, forecast_date, forecast_units, forecast_revenue, + lifecycle_phase, forecast_method, confidence_lower, confidence_upper, generated_at + FROM product_forecasts + WHERE forecast_date < CURRENT_DATE + ON CONFLICT (run_id, pid, forecast_date) DO NOTHING + """, (prev_run_id,)) + + archived = cur.rowcount + conn.commit() + + if archived > 0: + log.info(f"Archived {archived} historical forecast rows from run {prev_run_id}") + else: + log.info("No past-date forecasts to archive") + + # Prune old history (keep 90 days for accuracy analysis) + cur.execute("DELETE FROM product_forecasts_history WHERE forecast_date < CURRENT_DATE - INTERVAL '90 days'") + pruned = cur.rowcount + if pruned > 0: + log.info(f"Pruned {pruned} old history rows (>90 days)") + conn.commit() + + return archived + + +def compute_accuracy(conn, run_id): + """ + Compute forecast accuracy metrics from archived history vs. actual sales. + + Joins product_forecasts_history with daily_product_snapshots on + (pid, forecast_date = snapshot_date) to compare forecasted vs. actual units. + + Stores results in forecast_accuracy table, broken down by: + - overall: single aggregate row + - by_phase: per lifecycle phase + - by_lead_time: bucketed by how far ahead the forecast was + - by_method: per forecast method + - daily: per forecast_date (for trend charts) + """ + with conn.cursor() as cur: + # Ensure accuracy table exists + cur.execute(""" + CREATE TABLE IF NOT EXISTS forecast_accuracy ( + run_id INT NOT NULL, + metric_type TEXT NOT NULL, + dimension_value TEXT NOT NULL, + sample_size INT, + total_actual_units NUMERIC(12,2), + total_forecast_units NUMERIC(12,2), + mae NUMERIC(10,4), + wmape NUMERIC(10,4), + bias NUMERIC(10,4), + rmse NUMERIC(10,4), + computed_at TIMESTAMP NOT NULL DEFAULT NOW(), + PRIMARY KEY (run_id, metric_type, dimension_value) + ) + """) + conn.commit() + + # Check if we have any history to analyze + cur.execute("SELECT COUNT(*) FROM product_forecasts_history") + history_count = cur.fetchone()[0] + if history_count == 0: + log.info("No forecast history available for accuracy computation") + return + + # For each (pid, forecast_date) pair, keep only the most recent run's + # forecast row. This prevents double-counting when multiple runs have + # archived forecasts for the same product×date combination. + accuracy_cte = """ + WITH ranked_history AS ( + SELECT + pfh.*, + fr.started_at, + ROW_NUMBER() OVER ( + PARTITION BY pfh.pid, pfh.forecast_date + ORDER BY fr.started_at DESC + ) AS rn + FROM product_forecasts_history pfh + JOIN forecast_runs fr ON fr.id = pfh.run_id + ), + accuracy AS ( + SELECT + rh.lifecycle_phase, + rh.forecast_method, + rh.forecast_date, + (rh.forecast_date - rh.started_at::date) AS lead_days, + rh.forecast_units, + COALESCE(dps.units_sold, 0) AS actual_units, + (rh.forecast_units - COALESCE(dps.units_sold, 0)) AS error, + ABS(rh.forecast_units - COALESCE(dps.units_sold, 0)) AS abs_error + FROM ranked_history rh + LEFT JOIN daily_product_snapshots dps + ON dps.pid = rh.pid AND dps.snapshot_date = rh.forecast_date + WHERE rh.rn = 1 + AND NOT (rh.forecast_units = 0 AND COALESCE(dps.units_sold, 0) = 0) + ) + """ + + # Compute and insert metrics for each dimension + dimensions = { + 'overall': "SELECT 'all' AS dim", + 'by_phase': "SELECT DISTINCT lifecycle_phase AS dim FROM accuracy", + 'by_lead_time': """ + SELECT DISTINCT + CASE + WHEN lead_days BETWEEN 0 AND 6 THEN '1-7d' + WHEN lead_days BETWEEN 7 AND 13 THEN '8-14d' + WHEN lead_days BETWEEN 14 AND 29 THEN '15-30d' + WHEN lead_days BETWEEN 30 AND 59 THEN '31-60d' + ELSE '61-90d' + END AS dim + FROM accuracy + """, + 'by_method': "SELECT DISTINCT forecast_method AS dim FROM accuracy", + 'daily': "SELECT DISTINCT forecast_date::text AS dim FROM accuracy", + } + + filter_clauses = { + 'overall': "lifecycle_phase != 'dormant'", + 'by_phase': "lifecycle_phase = dims.dim", + 'by_lead_time': """ + CASE + WHEN lead_days BETWEEN 0 AND 6 THEN '1-7d' + WHEN lead_days BETWEEN 7 AND 13 THEN '8-14d' + WHEN lead_days BETWEEN 14 AND 29 THEN '15-30d' + WHEN lead_days BETWEEN 30 AND 59 THEN '31-60d' + ELSE '61-90d' + END = dims.dim + """, + 'by_method': "forecast_method = dims.dim", + 'daily': "forecast_date::text = dims.dim", + } + + total_inserted = 0 + + for metric_type, dim_query in dimensions.items(): + filter_clause = filter_clauses[metric_type] + + sql = f""" + {accuracy_cte}, + dims AS ({dim_query}) + SELECT + dims.dim, + COUNT(*) AS sample_size, + COALESCE(SUM(a.actual_units), 0) AS total_actual, + COALESCE(SUM(a.forecast_units), 0) AS total_forecast, + AVG(a.abs_error) AS mae, + CASE WHEN SUM(a.actual_units) > 0 + THEN SUM(a.abs_error) / SUM(a.actual_units) + ELSE NULL END AS wmape, + AVG(a.error) AS bias, + SQRT(AVG(POWER(a.error, 2))) AS rmse + FROM dims + CROSS JOIN accuracy a + WHERE {filter_clause} + GROUP BY dims.dim + """ + + cur.execute(sql) + rows = cur.fetchall() + + for row in rows: + dim_val, sample_size, total_actual, total_forecast, mae, wmape, bias, rmse = row + cur.execute(""" + INSERT INTO forecast_accuracy + (run_id, metric_type, dimension_value, sample_size, + total_actual_units, total_forecast_units, mae, wmape, bias, rmse) + VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) + ON CONFLICT (run_id, metric_type, dimension_value) + DO UPDATE SET + sample_size = EXCLUDED.sample_size, + total_actual_units = EXCLUDED.total_actual_units, + total_forecast_units = EXCLUDED.total_forecast_units, + mae = EXCLUDED.mae, wmape = EXCLUDED.wmape, + bias = EXCLUDED.bias, rmse = EXCLUDED.rmse, + computed_at = NOW() + """, (run_id, metric_type, dim_val, sample_size, + float(total_actual), float(total_forecast), + float(mae) if mae is not None else None, + float(wmape) if wmape is not None else None, + float(bias) if bias is not None else None, + float(rmse) if rmse is not None else None)) + total_inserted += 1 + + conn.commit() + + # Prune old accuracy data (keep 90 days of runs + any in-progress run) + cur.execute(""" + DELETE FROM forecast_accuracy + WHERE run_id NOT IN ( + SELECT id FROM forecast_runs + WHERE finished_at >= NOW() - INTERVAL '90 days' + OR finished_at IS NULL + ) + """) + pruned = cur.rowcount + conn.commit() + + log.info(f"Accuracy metrics: {total_inserted} rows computed" + + (f", {pruned} old rows pruned" if pruned > 0 else "")) + + +def backfill_accuracy_data(conn, backfill_days=30): + """ + Generate retroactive forecast data for the past N days to bootstrap + accuracy metrics. Uses the current brand curves with per-product scaling + to approximate what the model would have predicted for each past day, + then stores results in product_forecasts_history for comparison against + actual snapshots. + + This is a model backtest (in-sample), not true out-of-sample accuracy, + but provides much better initial estimates than unscaled brand curves. + """ + backfill_start_time = time.time() + log.info(f"Backfilling {backfill_days} days of accuracy data with per-product scaling...") + + # Load DOW indices + dow_indices = compute_dow_indices(conn) + + # Load brand curves (already fitted) + curves_df = execute_query(conn, """ + SELECT brand, root_category, cat_id, category_level, + amplitude, decay_rate, baseline, + r_squared, median_first_week_sales, median_preorder_sales, + median_preorder_days + FROM brand_lifecycle_curves + """) + + # Load products + products = load_products(conn) + products['phase'] = products.apply(classify_phase, axis=1) + + # Skip dormant — they forecast 0 and are filtered from accuracy anyway + active = products[products['phase'] != 'dormant'].copy() + log.info(f"Backfilling for {len(active)} non-dormant products " + f"(skipping {len(products) - len(active)} dormant)") + + # Batch load product data for per-product scaling + batch_data = batch_load_product_data(conn, active) + + today = date.today() + backfill_start = today - timedelta(days=backfill_days) + + # Create a synthetic run entry + with conn.cursor() as cur: + cur.execute(""" + INSERT INTO forecast_runs + (started_at, finished_at, status, products_forecast, + phase_counts, error_message) + VALUES (%s, NOW(), 'backfill', %s, %s, %s) + RETURNING id + """, ( + backfill_start, + len(active), + json.dumps({'backfill_days': backfill_days}), + f'Model backtest: {backfill_days} days with per-product scaling', + )) + backfill_run_id = cur.fetchone()[0] + conn.commit() + log.info(f"Created backfill run {backfill_run_id} " + f"(simulated start: {backfill_start})") + + # Generate retroactive forecasts + all_rows = [] + backfill_dates = [backfill_start + timedelta(days=i) + for i in range(backfill_days)] + + for _, product in active.iterrows(): + pid = int(product['pid']) + price = float(product['current_price'] or 0) + current_age = product.get('age_days') + velocity = float(product.get('sales_velocity_daily') or 0) + phase = product['phase'] + + curve_info = get_curve_for_product(product, curves_df) + + # Compute per-product scale factor (same logic as main forecast) + scale = compute_scale_factor(phase, product, curve_info, batch_data) + + for forecast_date in backfill_dates: + # How many days ago was this date? + days_ago = (today - forecast_date).days + # Product's age on that date + past_age = (current_age - days_ago) if current_age is not None else None + + if past_age is not None and past_age < 0: + # Product didn't exist yet on this date + continue + + # Determine what phase the product was likely in + if past_age is not None: + if past_age <= LAUNCH_AGE_DAYS: + past_phase = 'launch' + elif past_age <= DECAY_AGE_DAYS: + past_phase = 'decay' + else: + past_phase = phase # use current classification + else: + past_phase = phase + + # Compute forecast value for this date + if past_phase in ('launch', 'decay', 'preorder') and curve_info: + amplitude, decay_rate, baseline = curve_info[:3] + age_for_calc = max(0, past_age or 0) + t_weeks = age_for_calc / 7.0 + # Use corrected formula: scale only the decay envelope, not the baseline + daily_value = (amplitude / 7.0) * np.exp(-decay_rate * t_weeks) * scale + (baseline / 7.0) + units = max(0.0, float(daily_value)) + method = 'lifecycle_curve' + elif past_phase == 'mature' and velocity > 0: + units = velocity + method = 'exp_smoothing' + else: + units = velocity if velocity > 0 else 0.0 + method = 'velocity' if velocity > 0 else 'zero' + + # Apply DOW multiplier + dow_mult = dow_indices.get(forecast_date.isoweekday(), 1.0) + units *= dow_mult + + if units == 0 and method == 'zero': + continue # skip zero-zero rows + + revenue = units * price + margin = 0.3 if method == 'lifecycle_curve' else 0.4 + + all_rows.append(( + backfill_run_id, pid, forecast_date, + round(float(units), 2), + round(float(revenue), 4), + past_phase, method, + round(float(units * (1 - margin)), 2), + round(float(units * (1 + margin)), 2), + backfill_start, # generated_at + )) + + log.info(f"Generated {len(all_rows)} backfill forecast rows") + + # Write to history table + if all_rows: + with conn.cursor() as cur: + sql = """ + INSERT INTO product_forecasts_history + (run_id, pid, forecast_date, forecast_units, forecast_revenue, + lifecycle_phase, forecast_method, confidence_lower, + confidence_upper, generated_at) + VALUES %s + ON CONFLICT (run_id, pid, forecast_date) DO NOTHING + """ + psycopg2.extras.execute_values( + cur, sql, all_rows, + template="(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", + page_size=BATCH_SIZE, + ) + conn.commit() + log.info(f"Wrote {len(all_rows)} rows to product_forecasts_history") + + # Now compute accuracy on the backfilled data + compute_accuracy(conn, backfill_run_id) + + # Mark the backfill run as completed + backfill_duration = time.time() - backfill_start_time + with conn.cursor() as cur: + cur.execute(""" + UPDATE forecast_runs + SET finished_at = NOW(), status = 'backfill', + duration_seconds = %s + WHERE id = %s + """, (round(backfill_duration, 2), backfill_run_id)) + conn.commit() + + log.info(f"Backfill complete in {backfill_duration:.1f}s") + return backfill_run_id + + +def main(): + start_time = time.time() + + conn = get_connection() + + # Clean up any stale "running" entries from prior crashes + cleanup_stale_runs(conn) + + # Check for --backfill flag + if '--backfill' in sys.argv: + idx = sys.argv.index('--backfill') + days = int(sys.argv[idx + 1]) if idx + 1 < len(sys.argv) else 30 + log.info("=" * 60) + log.info(f"Backfill mode: {days} days") + log.info("=" * 60) + try: + backfill_accuracy_data(conn, days) + finally: + conn.close() + return + + log.info("=" * 60) + log.info("Forecast Engine starting") + log.info("=" * 60) + + run_id = None + + try: + # Record run start + with conn.cursor() as cur: + cur.execute( + "INSERT INTO forecast_runs (started_at, status) VALUES (NOW(), 'running') RETURNING id" + ) + run_id = cur.fetchone()[0] + conn.commit() + + # Phase 0: Compute day-of-week and monthly seasonal indices + dow_indices = compute_dow_indices(conn) + monthly_indices = compute_monthly_seasonal_indices(conn) + + # Phase 1: Build reference curves + curves_df = build_reference_curves(conn) + + # Phase 2: Archive historical forecasts (before TRUNCATE in generation) + archive_forecasts(conn, run_id) + + # Phase 3: Compute accuracy from archived history vs actuals + compute_accuracy(conn, run_id) + + # Phase 3b: Load accuracy-calibrated confidence margins + accuracy_margins = load_accuracy_margins(conn) + + # Phase 4: Generate and stream-write forecasts (TRUNCATE + chunked INSERT) + total_rows, products_forecast, phase_counts = generate_all_forecasts( + conn, curves_df, dow_indices, monthly_indices, accuracy_margins + ) + + duration = time.time() - start_time + + # Record run completion (include DOW indices in metadata) + with conn.cursor() as cur: + cur.execute(""" + UPDATE forecast_runs + SET finished_at = NOW(), status = 'completed', + products_forecast = %s, phase_counts = %s, + curve_count = %s, duration_seconds = %s + WHERE id = %s + """, ( + products_forecast, + json.dumps({ + **phase_counts, + '_dow_indices': {str(k): v for k, v in dow_indices.items()}, + '_seasonal_indices': {str(k): v for k, v in monthly_indices.items()}, + }), + len(curves_df) if not curves_df.empty else 0, + round(duration, 2), + run_id, + )) + conn.commit() + + log.info("=" * 60) + log.info(f"Forecast complete in {duration:.1f}s") + log.info(f" Products: {products_forecast}") + log.info(f" Curves: {len(curves_df) if not curves_df.empty else 0}") + log.info(f" Phases: {phase_counts}") + log.info(f" Rows written: {total_rows}") + log.info("=" * 60) + + except Exception as e: + duration = time.time() - start_time + log.error(f"Forecast engine failed: {e}", exc_info=True) + + if run_id: + try: + with conn.cursor() as cur: + cur.execute(""" + UPDATE forecast_runs + SET finished_at = NOW(), status = 'failed', + error_message = %s, duration_seconds = %s + WHERE id = %s + """, (str(e), round(duration, 2), run_id)) + conn.commit() + except Exception: + pass + + sys.exit(1) + finally: + conn.close() + + +if __name__ == '__main__': + main() diff --git a/inventory-server/scripts/forecast/requirements.txt b/inventory-server/scripts/forecast/requirements.txt new file mode 100644 index 0000000..d1990d8 --- /dev/null +++ b/inventory-server/scripts/forecast/requirements.txt @@ -0,0 +1,5 @@ +numpy>=1.24 +scipy>=1.10 +pandas>=2.0 +psycopg2-binary>=2.9 +statsmodels>=0.14 diff --git a/inventory-server/scripts/forecast/run_forecast.js b/inventory-server/scripts/forecast/run_forecast.js new file mode 100644 index 0000000..3ea3d32 --- /dev/null +++ b/inventory-server/scripts/forecast/run_forecast.js @@ -0,0 +1,128 @@ +#!/usr/bin/env node +/** + * Forecast Pipeline Orchestrator + * + * Spawns the Python forecast engine with database credentials from the + * environment. Can be run manually, via cron, or integrated into the + * existing metrics pipeline. + * + * Usage: + * node run_forecast.js + * + * Environment: + * Reads DB_HOST, DB_USER, DB_PASSWORD, DB_NAME, DB_PORT from + * /var/www/html/inventory/.env (or current process env). + */ + +const { spawn } = require('child_process'); +const path = require('path'); +const fs = require('fs'); + +// Load .env file if it exists (production path) +const envPaths = [ + '/var/www/html/inventory/.env', + path.join(__dirname, '../../.env'), +]; + +for (const envPath of envPaths) { + if (fs.existsSync(envPath)) { + const envContent = fs.readFileSync(envPath, 'utf-8'); + for (const line of envContent.split('\n')) { + const trimmed = line.trim(); + if (!trimmed || trimmed.startsWith('#')) continue; + const eqIndex = trimmed.indexOf('='); + if (eqIndex === -1) continue; + const key = trimmed.slice(0, eqIndex); + const value = trimmed.slice(eqIndex + 1); + if (!process.env[key]) { + process.env[key] = value; + } + } + console.log(`Loaded env from ${envPath}`); + break; + } +} + +// Verify required env vars +const required = ['DB_HOST', 'DB_USER', 'DB_PASSWORD', 'DB_NAME']; +const missing = required.filter(k => !process.env[k]); +if (missing.length > 0) { + console.error(`Missing required environment variables: ${missing.join(', ')}`); + process.exit(1); +} + +const SCRIPT_DIR = __dirname; +const PYTHON_SCRIPT = path.join(SCRIPT_DIR, 'forecast_engine.py'); +const VENV_DIR = path.join(SCRIPT_DIR, 'venv'); +const REQUIREMENTS = path.join(SCRIPT_DIR, 'requirements.txt'); + +// Determine python binary (prefer venv if it exists) +function getPythonBin() { + const venvPython = path.join(VENV_DIR, 'bin', 'python'); + if (fs.existsSync(venvPython)) return venvPython; + + // Fall back to system python + return 'python3'; +} + +// Ensure venv and dependencies are installed +async function ensureDependencies() { + if (!fs.existsSync(path.join(VENV_DIR, 'bin', 'python'))) { + console.log('Creating virtual environment...'); + await runCommand('python3', ['-m', 'venv', VENV_DIR]); + } + + // Always run pip install — idempotent, fast when packages already present + console.log('Checking dependencies...'); + const python = path.join(VENV_DIR, 'bin', 'python'); + await runCommand(python, ['-m', 'pip', 'install', '--quiet', '-r', REQUIREMENTS]); +} + +function runCommand(cmd, args, options = {}) { + return new Promise((resolve, reject) => { + const proc = spawn(cmd, args, { + stdio: 'inherit', + ...options, + }); + proc.on('close', code => { + if (code === 0) resolve(); + else reject(new Error(`${cmd} exited with code ${code}`)); + }); + proc.on('error', reject); + }); +} + +async function main() { + const startTime = Date.now(); + console.log('='.repeat(60)); + console.log(`Forecast Pipeline - ${new Date().toISOString()}`); + console.log('='.repeat(60)); + + try { + await ensureDependencies(); + + const pythonBin = getPythonBin(); + console.log(`Using Python: ${pythonBin}`); + console.log(`Running: ${PYTHON_SCRIPT}`); + console.log(''); + + await runCommand(pythonBin, [PYTHON_SCRIPT], { + env: { + ...process.env, + PYTHONUNBUFFERED: '1', // Real-time output + }, + }); + + const duration = ((Date.now() - startTime) / 1000).toFixed(1); + console.log(''); + console.log('='.repeat(60)); + console.log(`Forecast pipeline completed in ${duration}s`); + console.log('='.repeat(60)); + } catch (err) { + const duration = ((Date.now() - startTime) / 1000).toFixed(1); + console.error(`Forecast pipeline FAILED after ${duration}s:`, err.message); + process.exit(1); + } +} + +main(); diff --git a/inventory-server/scripts/forecast/sql/create_tables.sql b/inventory-server/scripts/forecast/sql/create_tables.sql new file mode 100644 index 0000000..fcc6cd2 --- /dev/null +++ b/inventory-server/scripts/forecast/sql/create_tables.sql @@ -0,0 +1,51 @@ +-- Forecasting Pipeline Tables +-- Run once to create the schema. Safe to re-run (IF NOT EXISTS). + +-- Precomputed reference decay curves per brand (or brand x category at any hierarchy level) +CREATE TABLE IF NOT EXISTS brand_lifecycle_curves ( + id SERIAL PRIMARY KEY, + brand TEXT NOT NULL, + root_category TEXT, -- NULL = brand-level fallback curve, else category name + cat_id BIGINT, -- NULL = brand-only; else category_hierarchy.cat_id for precise matching + category_level SMALLINT, -- NULL = brand-only; 0-3 = hierarchy depth + amplitude NUMERIC(10,4), -- A in: sales(t) = A * exp(-λt) + C + decay_rate NUMERIC(10,6), -- λ (higher = faster decay) + baseline NUMERIC(10,4), -- C (long-tail steady-state daily sales) + r_squared NUMERIC(6,4), -- goodness of fit + sample_size INT, -- number of products that informed this curve + median_first_week_sales NUMERIC(10,2), -- for scaling new launches + median_preorder_sales NUMERIC(10,2), -- for scaling pre-order products + median_preorder_days NUMERIC(10,2), -- median pre-order accumulation window (days) + computed_at TIMESTAMP DEFAULT NOW(), + UNIQUE(brand, cat_id) +); + +-- Per-product daily forecasts (next 90 days, regenerated each run) +CREATE TABLE IF NOT EXISTS product_forecasts ( + pid BIGINT NOT NULL, + forecast_date DATE NOT NULL, + forecast_units NUMERIC(10,2), + forecast_revenue NUMERIC(14,4), + lifecycle_phase TEXT, -- preorder, launch, decay, mature, slow_mover, dormant + forecast_method TEXT, -- lifecycle_curve, exp_smoothing, velocity, zero + confidence_lower NUMERIC(10,2), + confidence_upper NUMERIC(10,2), + generated_at TIMESTAMP DEFAULT NOW(), + PRIMARY KEY (pid, forecast_date) +); + +CREATE INDEX IF NOT EXISTS idx_pf_date ON product_forecasts(forecast_date); +CREATE INDEX IF NOT EXISTS idx_pf_phase ON product_forecasts(lifecycle_phase); + +-- Forecast run history (for monitoring) +CREATE TABLE IF NOT EXISTS forecast_runs ( + id SERIAL PRIMARY KEY, + started_at TIMESTAMP NOT NULL, + finished_at TIMESTAMP, + status TEXT DEFAULT 'running', -- running, completed, failed + products_forecast INT, + phase_counts JSONB, -- {"launch": 50, "decay": 200, ...} + curve_count INT, -- brand curves computed + error_message TEXT, + duration_seconds NUMERIC(10,2) +); diff --git a/inventory-server/scripts/import-from-prod.js b/inventory-server/scripts/import-from-prod.js index c8ffa99..cd1ef50 100644 --- a/inventory-server/scripts/import-from-prod.js +++ b/inventory-server/scripts/import-from-prod.js @@ -40,7 +40,7 @@ const sshConfig = { password: process.env.PROD_DB_PASSWORD, database: process.env.PROD_DB_NAME, port: process.env.PROD_DB_PORT || 3306, - timezone: '-05:00', // Production DB always stores times in EST (UTC-5) regardless of DST + timezone: '-05:00', // mysql2 driver timezone — corrected at runtime via adjustDateForMySQL() in utils.js }, localDbConfig: { // PostgreSQL config for local diff --git a/inventory-server/scripts/import/orders.js b/inventory-server/scripts/import/orders.js index ecdc523..74a7c50 100644 --- a/inventory-server/scripts/import/orders.js +++ b/inventory-server/scripts/import/orders.js @@ -58,8 +58,12 @@ async function importOrders(prodConnection, localConnection, incrementalUpdate = "SELECT last_sync_timestamp FROM sync_status WHERE table_name = 'orders'" ); const lastSyncTime = syncInfo?.rows?.[0]?.last_sync_timestamp || '1970-01-01'; + // Adjust for mysql2 driver timezone vs MySQL server timezone mismatch + const mysqlSyncTime = prodConnection.adjustDateForMySQL + ? prodConnection.adjustDateForMySQL(lastSyncTime) + : lastSyncTime; - console.log('Orders: Using last sync time:', lastSyncTime); + console.log('Orders: Using last sync time:', lastSyncTime, '(adjusted:', mysqlSyncTime, ')'); // First get count of order items - Keep MySQL compatible for production const [[{ total }]] = await prodConnection.query(` @@ -82,7 +86,7 @@ async function importOrders(prodConnection, localConnection, incrementalUpdate = ) ) ` : ''} - `, incrementalUpdate ? [lastSyncTime, lastSyncTime, lastSyncTime] : []); + `, incrementalUpdate ? [mysqlSyncTime, mysqlSyncTime, mysqlSyncTime] : []); totalOrderItems = total; console.log('Orders: Found changes:', totalOrderItems); @@ -116,7 +120,7 @@ async function importOrders(prodConnection, localConnection, incrementalUpdate = ) ) ` : ''} - `, incrementalUpdate ? [lastSyncTime, lastSyncTime, lastSyncTime] : []); + `, incrementalUpdate ? [mysqlSyncTime, mysqlSyncTime, mysqlSyncTime] : []); console.log('Orders: Found', orderItems.length, 'order items to process'); diff --git a/inventory-server/scripts/import/products.js b/inventory-server/scripts/import/products.js index ac66ccf..aa191a8 100644 --- a/inventory-server/scripts/import/products.js +++ b/inventory-server/scripts/import/products.js @@ -669,8 +669,13 @@ async function importProducts(prodConnection, localConnection, incrementalUpdate // Setup temporary tables await setupTemporaryTables(localConnection); + // Adjust sync time for mysql2 driver timezone vs MySQL server timezone mismatch + const mysqlSyncTime = prodConnection.adjustDateForMySQL + ? prodConnection.adjustDateForMySQL(lastSyncTime) + : lastSyncTime; + // Materialize calculations into temp table - const materializeResult = await materializeCalculations(prodConnection, localConnection, incrementalUpdate, lastSyncTime, startTime); + const materializeResult = await materializeCalculations(prodConnection, localConnection, incrementalUpdate, mysqlSyncTime, startTime); // Get the list of products that need updating const [products] = await localConnection.query(` diff --git a/inventory-server/scripts/import/purchase-orders.js b/inventory-server/scripts/import/purchase-orders.js index 39eee9e..0b882bc 100644 --- a/inventory-server/scripts/import/purchase-orders.js +++ b/inventory-server/scripts/import/purchase-orders.js @@ -65,8 +65,12 @@ async function importPurchaseOrders(prodConnection, localConnection, incremental "SELECT last_sync_timestamp FROM sync_status WHERE table_name = 'purchase_orders'" ); const lastSyncTime = syncInfo?.rows?.[0]?.last_sync_timestamp || '1970-01-01'; + // Adjust for mysql2 driver timezone vs MySQL server timezone mismatch + const mysqlSyncTime = prodConnection.adjustDateForMySQL + ? prodConnection.adjustDateForMySQL(lastSyncTime) + : lastSyncTime; - console.log('Purchase Orders: Using last sync time:', lastSyncTime); + console.log('Purchase Orders: Using last sync time:', lastSyncTime, '(adjusted:', mysqlSyncTime, ')'); // Create temp tables for processing await localConnection.query(` @@ -254,7 +258,7 @@ async function importPurchaseOrders(prodConnection, localConnection, incremental OR p.date_estin > ? ) ` : ''} - `, incrementalUpdate ? [lastSyncTime, lastSyncTime, lastSyncTime] : []); + `, incrementalUpdate ? [mysqlSyncTime, mysqlSyncTime, mysqlSyncTime] : []); const totalPOs = poCount[0].total; console.log(`Found ${totalPOs} relevant purchase orders`); @@ -291,7 +295,7 @@ async function importPurchaseOrders(prodConnection, localConnection, incremental ` : ''} ORDER BY p.po_id LIMIT ${PO_BATCH_SIZE} OFFSET ${offset} - `, incrementalUpdate ? [lastSyncTime, lastSyncTime, lastSyncTime] : []); + `, incrementalUpdate ? [mysqlSyncTime, mysqlSyncTime, mysqlSyncTime] : []); if (poList.length === 0) { allPOsProcessed = true; @@ -426,7 +430,7 @@ async function importPurchaseOrders(prodConnection, localConnection, incremental OR r.date_created > ? ) ` : ''} - `, incrementalUpdate ? [lastSyncTime, lastSyncTime] : []); + `, incrementalUpdate ? [mysqlSyncTime, mysqlSyncTime] : []); const totalReceivings = receivingCount[0].total; console.log(`Found ${totalReceivings} relevant receivings`); @@ -463,7 +467,7 @@ async function importPurchaseOrders(prodConnection, localConnection, incremental ` : ''} ORDER BY r.receiving_id LIMIT ${PO_BATCH_SIZE} OFFSET ${offset} - `, incrementalUpdate ? [lastSyncTime, lastSyncTime] : []); + `, incrementalUpdate ? [mysqlSyncTime, mysqlSyncTime] : []); if (receivingList.length === 0) { allReceivingsProcessed = true; diff --git a/inventory-server/scripts/import/utils.js b/inventory-server/scripts/import/utils.js index b7d888d..564b86a 100644 --- a/inventory-server/scripts/import/utils.js +++ b/inventory-server/scripts/import/utils.js @@ -48,6 +48,37 @@ async function setupConnections(sshConfig) { stream: tunnel.stream, }); + // Detect MySQL server timezone and calculate correction for the driver timezone mismatch. + // The mysql2 driver is configured with timezone: '-05:00' (EST), but the MySQL server + // may be in a different timezone (e.g., America/Chicago = CST/CDT). When the driver + // formats a JS Date as EST and MySQL interprets it in its own timezone, DATETIME + // comparisons can be off. This correction adjusts Date objects before they're passed + // to MySQL queries so the formatted string matches the server's local time. + const [[{ utcDiffSec }]] = await prodConnection.query( + "SELECT TIMESTAMPDIFF(SECOND, NOW(), UTC_TIMESTAMP()) as utcDiffSec" + ); + const mysqlOffsetMs = -utcDiffSec * 1000; // MySQL UTC offset in ms (e.g., -21600000 for CST) + const driverOffsetMs = -5 * 3600 * 1000; // Driver's -05:00 in ms (-18000000) + const tzCorrectionMs = driverOffsetMs - mysqlOffsetMs; + // CST (winter): -18000000 - (-21600000) = +3600000 (1 hour correction needed) + // CDT (summer): -18000000 - (-18000000) = 0 (no correction needed) + + if (tzCorrectionMs !== 0) { + console.log(`MySQL timezone correction: ${tzCorrectionMs / 1000}s (server offset: ${utcDiffSec}s from UTC)`); + } + + /** + * Adjusts a Date/timestamp for the mysql2 driver timezone mismatch before + * passing it as a query parameter to MySQL. This ensures that the string + * mysql2 generates matches the timezone that DATETIME values are stored in. + */ + function adjustDateForMySQL(date) { + if (!date || tzCorrectionMs === 0) return date; + const d = date instanceof Date ? date : new Date(date); + return new Date(d.getTime() - tzCorrectionMs); + } + prodConnection.adjustDateForMySQL = adjustDateForMySQL; + // Setup PostgreSQL connection pool for local const localPool = new Pool(sshConfig.localDbConfig); diff --git a/inventory-server/scripts/metrics-new/update_daily_snapshots.sql b/inventory-server/scripts/metrics-new/update_daily_snapshots.sql index af37a00..3c69104 100644 --- a/inventory-server/scripts/metrics-new/update_daily_snapshots.sql +++ b/inventory-server/scripts/metrics-new/update_daily_snapshots.sql @@ -1,6 +1,7 @@ -- Description: Calculates and updates daily aggregated product data. --- Self-healing: automatically detects and fills gaps in snapshot history. --- Always reprocesses recent days to pick up new orders and data corrections. +-- Self-healing: detects gaps (missing snapshots), stale data (snapshot +-- aggregates that don't match source tables after backfills), and always +-- reprocesses recent days to pick up new orders and data corrections. -- Dependencies: Core import tables (products, orders, purchase_orders), calculate_status table. -- Frequency: Hourly (Run ~5-10 minutes after hourly data import completes). @@ -18,28 +19,26 @@ DECLARE BEGIN RAISE NOTICE 'Running % script. Start Time: %', _module_name, _start_time; - -- Find the latest existing snapshot date to determine where gaps begin + -- Find the latest existing snapshot date (for logging only) SELECT MAX(snapshot_date) INTO _latest_snapshot FROM public.daily_product_snapshots; - -- Determine how far back to look for gaps, capped at _max_backfill_days - _backfill_start := GREATEST( - COALESCE(_latest_snapshot + 1, CURRENT_DATE - _max_backfill_days), - CURRENT_DATE - _max_backfill_days - ); + -- Always scan the full backfill window to catch holes in the middle, + -- not just gaps at the end. The gap fill and stale detection queries + -- need to see the entire range to find missing or outdated snapshots. + _backfill_start := CURRENT_DATE - _max_backfill_days; IF _latest_snapshot IS NULL THEN RAISE NOTICE 'No existing snapshots found. Backfilling up to % days.', _max_backfill_days; - ELSIF _backfill_start > _latest_snapshot + 1 THEN - RAISE NOTICE 'Latest snapshot: %. Gap exceeds % day cap — backfilling from %. Use rebuild script for full history.', - _latest_snapshot, _max_backfill_days, _backfill_start; ELSE - RAISE NOTICE 'Latest snapshot: %. Checking for gaps from %.', _latest_snapshot, _backfill_start; + RAISE NOTICE 'Latest snapshot: %. Scanning from % for gaps and stale data.', _latest_snapshot, _backfill_start; END IF; -- Process all dates that need snapshots: -- 1. Gap fill: dates with orders/receivings but no snapshots (older than recent window) - -- 2. Recent recheck: last N days always reprocessed (picks up new orders, corrections) + -- 2. Stale detection: existing snapshots where aggregates don't match source data + -- (catches backfilled imports that arrived after snapshot was calculated) + -- 3. Recent recheck: last N days always reprocessed (picks up new orders, corrections) FOR _target_date IN SELECT d FROM ( -- Gap fill: find dates with activity but missing snapshots @@ -55,6 +54,36 @@ BEGIN SELECT 1 FROM public.daily_product_snapshots dps WHERE dps.snapshot_date = activity_dates.d ) UNION + -- Stale detection: compare snapshot aggregates against source tables + SELECT snap_agg.snapshot_date AS d + FROM ( + SELECT snapshot_date, + COALESCE(SUM(units_received), 0)::bigint AS snap_received, + COALESCE(SUM(units_sold), 0)::bigint AS snap_sold + FROM public.daily_product_snapshots + WHERE snapshot_date >= _backfill_start + AND snapshot_date < CURRENT_DATE - _recent_recheck_days + GROUP BY snapshot_date + ) snap_agg + LEFT JOIN ( + SELECT received_date::date AS d, SUM(qty_each)::bigint AS actual_received + FROM public.receivings + WHERE received_date::date >= _backfill_start + AND received_date::date < CURRENT_DATE - _recent_recheck_days + GROUP BY received_date::date + ) recv_agg ON snap_agg.snapshot_date = recv_agg.d + LEFT JOIN ( + SELECT date::date AS d, + SUM(CASE WHEN quantity > 0 AND COALESCE(status, 'pending') NOT IN ('canceled', 'returned') + THEN quantity ELSE 0 END)::bigint AS actual_sold + FROM public.orders + WHERE date::date >= _backfill_start + AND date::date < CURRENT_DATE - _recent_recheck_days + GROUP BY date::date + ) orders_agg ON snap_agg.snapshot_date = orders_agg.d + WHERE snap_agg.snap_received != COALESCE(recv_agg.actual_received, 0) + OR snap_agg.snap_sold != COALESCE(orders_agg.actual_sold, 0) + UNION -- Recent days: always reprocess SELECT d::date FROM generate_series( @@ -66,11 +95,18 @@ BEGIN ORDER BY d LOOP _days_processed := _days_processed + 1; - RAISE NOTICE 'Processing date: % [%/%]', _target_date, _days_processed, - _days_processed; -- count not known ahead of time, but shows progress - + + -- Classify why this date is being processed (for logging) + IF _target_date >= CURRENT_DATE - _recent_recheck_days THEN + RAISE NOTICE 'Processing date: % [recent recheck]', _target_date; + ELSIF NOT EXISTS (SELECT 1 FROM public.daily_product_snapshots WHERE snapshot_date = _target_date) THEN + RAISE NOTICE 'Processing date: % [gap fill — no existing snapshot]', _target_date; + ELSE + RAISE NOTICE 'Processing date: % [stale data — snapshot aggregates mismatch source]', _target_date; + END IF; + -- IMPORTANT: First delete any existing data for this date to prevent duplication - DELETE FROM public.daily_product_snapshots + DELETE FROM public.daily_product_snapshots WHERE snapshot_date = _target_date; -- Proceed with calculating daily metrics only for products with actual activity diff --git a/inventory-server/scripts/metrics-new/update_lifecycle_forecasts.sql b/inventory-server/scripts/metrics-new/update_lifecycle_forecasts.sql new file mode 100644 index 0000000..868ef7d --- /dev/null +++ b/inventory-server/scripts/metrics-new/update_lifecycle_forecasts.sql @@ -0,0 +1,131 @@ +-- Description: Populates lifecycle forecast columns on product_metrics from product_forecasts. +-- Runs AFTER update_product_metrics.sql so that lead time / days of stock settings are available. +-- Dependencies: product_metrics (fully populated), product_forecasts, settings tables. +-- Frequency: After each metrics run and/or after forecast engine runs. + +DO $$ +DECLARE + _module_name TEXT := 'lifecycle_forecasts'; + _start_time TIMESTAMPTZ := clock_timestamp(); + _updated INT; +BEGIN + RAISE NOTICE 'Running % module. Start Time: %', _module_name, _start_time; + + -- Step 1: Set lifecycle_phase from product_forecasts (one phase per product) + UPDATE product_metrics pm + SET lifecycle_phase = sub.lifecycle_phase + FROM ( + SELECT DISTINCT ON (pid) pid, lifecycle_phase + FROM product_forecasts + ORDER BY pid, forecast_date + ) sub + WHERE pm.pid = sub.pid + AND (pm.lifecycle_phase IS DISTINCT FROM sub.lifecycle_phase); + + GET DIAGNOSTICS _updated = ROW_COUNT; + RAISE NOTICE 'Updated lifecycle_phase for % products', _updated; + + -- Step 2: Compute lifecycle-based lead time and planning period forecasts + -- Uses each product's configured lead time and days of stock + WITH forecast_sums AS ( + SELECT + pf.pid, + SUM(pf.forecast_units) FILTER ( + WHERE pf.forecast_date <= CURRENT_DATE + s.effective_lead_time + ) AS lt_forecast, + SUM(pf.forecast_units) FILTER ( + WHERE pf.forecast_date <= CURRENT_DATE + s.effective_lead_time + s.effective_days_of_stock + ) AS pp_forecast + FROM product_forecasts pf + JOIN ( + SELECT + p.pid, + COALESCE(sp.lead_time_days, sv.default_lead_time_days, + (SELECT setting_value::int FROM settings_global WHERE setting_key = 'default_lead_time_days'), 14 + ) AS effective_lead_time, + COALESCE(sp.days_of_stock, sv.default_days_of_stock, + (SELECT setting_value::int FROM settings_global WHERE setting_key = 'default_days_of_stock'), 30 + ) AS effective_days_of_stock + FROM products p + LEFT JOIN settings_product sp ON p.pid = sp.pid + LEFT JOIN settings_vendor sv ON p.vendor = sv.vendor + ) s ON s.pid = pf.pid + WHERE pf.forecast_date >= CURRENT_DATE + GROUP BY pf.pid + ) + UPDATE product_metrics pm + SET + lifecycle_lead_time_forecast = COALESCE(fs.lt_forecast, 0), + lifecycle_planning_period_forecast = COALESCE(fs.pp_forecast, 0) + FROM forecast_sums fs + WHERE pm.pid = fs.pid + AND (pm.lifecycle_lead_time_forecast IS DISTINCT FROM COALESCE(fs.lt_forecast, 0) + OR pm.lifecycle_planning_period_forecast IS DISTINCT FROM COALESCE(fs.pp_forecast, 0)); + + GET DIAGNOSTICS _updated = ROW_COUNT; + RAISE NOTICE 'Updated lifecycle forecasts for % products', _updated; + + -- Step 3: Reclassify demand_pattern using residual CV (de-trended) + -- For launch/decay products, raw CV is high because of expected lifecycle decay. + -- We subtract the expected brand curve value to get residuals, then compute CV on those. + -- Products that track their brand curve closely → low residual CV → "stable" + -- Products with erratic deviations from curve → higher residual CV → "variable"/"sporadic" + WITH product_curve AS ( + -- Get each product's brand curve and age + SELECT + pm.pid, + pm.lifecycle_phase, + pm.date_first_received, + blc.amplitude, + blc.decay_rate, + blc.baseline + FROM product_metrics pm + JOIN products p ON p.pid = pm.pid + LEFT JOIN brand_lifecycle_curves blc + ON blc.brand = pm.brand + AND blc.root_category IS NULL -- brand-only curve + WHERE pm.lifecycle_phase IN ('launch', 'decay') + AND pm.date_first_received IS NOT NULL + AND blc.amplitude IS NOT NULL + ), + daily_residuals AS ( + -- Compute residual = actual - expected for each snapshot day + -- Curve params are in WEEKLY units; divide by 7 to get daily expected + SELECT + dps.pid, + dps.units_sold, + (pc.amplitude * EXP(-pc.decay_rate * (dps.snapshot_date - pc.date_first_received)::numeric / 7.0) + pc.baseline) / 7.0 AS expected, + dps.units_sold - (pc.amplitude * EXP(-pc.decay_rate * (dps.snapshot_date - pc.date_first_received)::numeric / 7.0) + pc.baseline) / 7.0 AS residual + FROM daily_product_snapshots dps + JOIN product_curve pc ON pc.pid = dps.pid + WHERE dps.snapshot_date >= CURRENT_DATE - INTERVAL '29 days' + AND dps.snapshot_date <= CURRENT_DATE + ), + residual_cv AS ( + SELECT + pid, + AVG(units_sold) AS avg_sales, + CASE WHEN COUNT(*) >= 7 AND AVG(ABS(expected)) > 0.01 THEN + STDDEV_POP(residual) / GREATEST(AVG(ABS(expected)), 0.1) + END AS res_cv + FROM daily_residuals + GROUP BY pid + ) + UPDATE product_metrics pm + SET demand_pattern = classify_demand_pattern(rc.avg_sales, rc.res_cv) + FROM residual_cv rc + WHERE pm.pid = rc.pid + AND rc.res_cv IS NOT NULL + AND pm.demand_pattern IS DISTINCT FROM classify_demand_pattern(rc.avg_sales, rc.res_cv); + + GET DIAGNOSTICS _updated = ROW_COUNT; + RAISE NOTICE 'Reclassified demand_pattern for % launch/decay products', _updated; + + -- Update tracking + INSERT INTO public.calculate_status (module_name, last_calculation_timestamp) + VALUES (_module_name, clock_timestamp()) + ON CONFLICT (module_name) DO UPDATE SET + last_calculation_timestamp = EXCLUDED.last_calculation_timestamp; + + RAISE NOTICE '% module complete. Duration: %', _module_name, clock_timestamp() - _start_time; +END $$; diff --git a/inventory-server/src/routes/dashboard.js b/inventory-server/src/routes/dashboard.js index 7a9c341..df9dbb5 100644 --- a/inventory-server/src/routes/dashboard.js +++ b/inventory-server/src/routes/dashboard.js @@ -67,6 +67,23 @@ router.get('/stock/metrics', async (req, res) => { ORDER BY CASE WHEN brand = 'Other' THEN 1 ELSE 0 END, stock_cost DESC `); + // Stock breakdown by lifecycle phase (lifecycle_phase populated by update_lifecycle_forecasts.sql) + const { rows: phaseStock } = await executeQuery(` + SELECT + COALESCE(pm.lifecycle_phase, 'unknown') AS phase, + COUNT(DISTINCT pm.pid)::integer AS products, + COALESCE(SUM(pm.current_stock), 0)::integer AS units, + ROUND(COALESCE(SUM(pm.current_stock_cost), 0)::numeric, 2) AS cost, + ROUND(COALESCE(SUM(pm.current_stock_retail), 0)::numeric, 2) AS retail + FROM product_metrics pm + WHERE pm.is_visible = true AND pm.current_stock > 0 + AND COALESCE(pm.preorder_count, 0) = 0 + GROUP BY pm.lifecycle_phase + ORDER BY cost DESC + `); + + const phaseTotalCost = phaseStock.reduce((s, r) => s + (parseFloat(r.cost) || 0), 0); + // Format the response with explicit type conversion const response = { totalProducts: parseInt(stockMetrics.total_products) || 0, @@ -80,7 +97,17 @@ router.get('/stock/metrics', async (req, res) => { units: parseInt(v.stock_units) || 0, cost: parseFloat(v.stock_cost) || 0, retail: parseFloat(v.stock_retail) || 0 - })) + })), + phaseStock: phaseStock.filter(r => parseFloat(r.cost) > 0).map(r => ({ + phase: r.phase, + products: parseInt(r.products) || 0, + units: parseInt(r.units) || 0, + cost: parseFloat(r.cost) || 0, + retail: parseFloat(r.retail) || 0, + percentage: phaseTotalCost > 0 + ? parseFloat(((parseFloat(r.cost) / phaseTotalCost) * 100).toFixed(1)) + : 0, + })), }; res.json(response); @@ -208,12 +235,39 @@ router.get('/replenishment/metrics', async (req, res) => { LIMIT 5 `); + // Replenishment breakdown by lifecycle phase (lifecycle_phase on product_metrics) + const { rows: phaseReplenish } = await executeQuery(` + SELECT + COALESCE(pm.lifecycle_phase, 'unknown') AS phase, + COUNT(DISTINCT pm.pid)::integer AS products, + COALESCE(SUM(pm.replenishment_units), 0)::integer AS units, + ROUND(COALESCE(SUM(pm.replenishment_cost), 0)::numeric, 2) AS cost + FROM product_metrics pm + WHERE pm.is_visible = true + AND pm.is_replenishable = true + AND (pm.status IN ('Critical', 'Reorder') OR pm.current_stock < 0) + AND pm.replenishment_units > 0 + GROUP BY pm.lifecycle_phase + ORDER BY cost DESC + `); + + const replenishTotalCost = phaseReplenish.reduce((s, r) => s + (parseFloat(r.cost) || 0), 0); + // Format response const response = { productsToReplenish: parseInt(metrics.products_to_replenish) || 0, unitsToReplenish: parseInt(metrics.total_units_needed) || 0, replenishmentCost: parseFloat(metrics.total_cost) || 0, replenishmentRetail: parseFloat(metrics.total_retail) || 0, + phaseBreakdown: phaseReplenish.filter(r => parseFloat(r.cost) > 0).map(r => ({ + phase: r.phase, + products: parseInt(r.products) || 0, + units: parseInt(r.units) || 0, + cost: parseFloat(r.cost) || 0, + percentage: replenishTotalCost > 0 + ? parseFloat(((parseFloat(r.cost) / replenishTotalCost) * 100).toFixed(1)) + : 0, + })), topVariants: variants.map(v => ({ id: v.pid, title: v.title, @@ -234,165 +288,499 @@ router.get('/replenishment/metrics', async (req, res) => { }); // GET /dashboard/forecast/metrics -// Returns sales forecasts for specified period +// Reads from product_forecasts table (lifecycle-aware forecasting pipeline). +// Falls back to velocity-based projection if forecast table is empty. router.get('/forecast/metrics', async (req, res) => { - // Default to last 30 days if no date range provided const today = new Date(); - const thirtyDaysAgo = new Date(today); - thirtyDaysAgo.setDate(today.getDate() - 30); - - const startDate = req.query.startDate || thirtyDaysAgo.toISOString(); - const endDate = req.query.endDate || today.toISOString(); - + const thirtyDaysOut = new Date(today); + thirtyDaysOut.setDate(today.getDate() + 30); + + const startDate = req.query.startDate ? new Date(req.query.startDate) : today; + const endDate = req.query.endDate ? new Date(req.query.endDate) : thirtyDaysOut; + const startISO = startDate.toISOString().split('T')[0]; + const endISO = endDate.toISOString().split('T')[0]; + const days = Math.max(1, Math.round((endDate - startDate) / (1000 * 60 * 60 * 24))); + try { - // Check if sales_forecasts table exists and has data - const { rows: tableCheck } = await executeQuery(` - SELECT EXISTS ( - SELECT FROM information_schema.tables - WHERE table_schema = 'public' - AND table_name = 'sales_forecasts' - ) as table_exists - `); - - const tableExists = tableCheck[0].table_exists; - - if (!tableExists) { - console.log('sales_forecasts table does not exist, returning dummy data'); - - // Generate dummy data for forecast - const days = 30; - const dummyData = []; - const startDateObj = new Date(startDate); - - for (let i = 0; i < days; i++) { - const currentDate = new Date(startDateObj); - currentDate.setDate(startDateObj.getDate() + i); - - // Use sales data with slight randomization - const baseValue = 500 + Math.random() * 200; - dummyData.push({ - date: currentDate.toISOString().split('T')[0], - revenue: parseFloat((baseValue + Math.random() * 100).toFixed(2)), - confidence: parseFloat((0.7 + Math.random() * 0.2).toFixed(2)) - }); + // Check if product_forecasts has data + const { rows: [countRow] } = await executeQuery( + `SELECT COUNT(*) AS cnt FROM product_forecasts WHERE forecast_date >= $1 LIMIT 1`, + [startISO] + ); + const hasForecastData = parseInt(countRow.cnt) > 0; + + if (hasForecastData) { + // --- Read from lifecycle-aware forecast pipeline --- + + // Find the last date covered by product_forecasts + const { rows: [horizonRow] } = await executeQuery( + `SELECT MAX(forecast_date) AS max_date FROM product_forecasts` + ); + const forecastHorizonISO = horizonRow.max_date instanceof Date + ? horizonRow.max_date.toISOString().split('T')[0] + : horizonRow.max_date; + const forecastHorizon = new Date(forecastHorizonISO + 'T00:00:00'); + const clampedEndISO = endISO <= forecastHorizonISO ? endISO : forecastHorizonISO; + const needsExtrapolation = endISO > forecastHorizonISO; + + // Totals from actual forecast data (clamped to horizon) + const { rows: [totals] } = await executeQuery(` + SELECT + COALESCE(SUM(pf.forecast_units), 0) AS total_units, + COALESCE(SUM(pf.forecast_revenue), 0) AS total_revenue, + COUNT(DISTINCT pf.pid) FILTER ( + WHERE pf.lifecycle_phase IN ('launch','decay','mature','preorder','slow_mover') + ) AS active_products, + COUNT(DISTINCT pf.pid) FILTER ( + WHERE pf.forecast_method = 'lifecycle_curve' + ) AS curve_products + FROM product_forecasts pf + JOIN product_metrics pm ON pm.pid = pf.pid + WHERE pm.is_visible = true + AND pf.forecast_date BETWEEN $1 AND $2 + `, [startISO, clampedEndISO]); + + const active = parseInt(totals.active_products) || 1; + const curveProducts = parseInt(totals.curve_products) || 0; + const confidenceLevel = parseFloat((curveProducts / active).toFixed(2)); + + // Daily series from actual forecast + const { rows: dailyRows } = await executeQuery(` + SELECT pf.forecast_date AS date, + SUM(pf.forecast_units) AS units, + SUM(pf.forecast_revenue) AS revenue + FROM product_forecasts pf + JOIN product_metrics pm ON pm.pid = pf.pid + WHERE pm.is_visible = true + AND pf.forecast_date BETWEEN $1 AND $2 + GROUP BY pf.forecast_date + ORDER BY pf.forecast_date + `, [startISO, clampedEndISO]); + + const dailyForecasts = dailyRows.map(d => ({ + date: d.date instanceof Date ? d.date.toISOString().split('T')[0] : d.date, + units: parseFloat(d.units) || 0, + revenue: parseFloat(d.revenue) || 0, + confidence: confidenceLevel, + })); + + // Daily forecast broken down by lifecycle phase (for stacked chart) + const { rows: dailyPhaseRows } = await executeQuery(` + SELECT pf.forecast_date AS date, + COALESCE(SUM(pf.forecast_revenue) FILTER (WHERE pf.lifecycle_phase = 'preorder'), 0) AS preorder, + COALESCE(SUM(pf.forecast_revenue) FILTER (WHERE pf.lifecycle_phase = 'launch'), 0) AS launch, + COALESCE(SUM(pf.forecast_revenue) FILTER (WHERE pf.lifecycle_phase = 'decay'), 0) AS decay, + COALESCE(SUM(pf.forecast_revenue) FILTER (WHERE pf.lifecycle_phase = 'mature'), 0) AS mature, + COALESCE(SUM(pf.forecast_revenue) FILTER (WHERE pf.lifecycle_phase = 'slow_mover'), 0) AS slow_mover, + COALESCE(SUM(pf.forecast_revenue) FILTER (WHERE pf.lifecycle_phase = 'dormant'), 0) AS dormant + FROM product_forecasts pf + JOIN product_metrics pm ON pm.pid = pf.pid + WHERE pm.is_visible = true + AND pf.forecast_date BETWEEN $1 AND $2 + GROUP BY pf.forecast_date + ORDER BY pf.forecast_date + `, [startISO, clampedEndISO]); + + // --- New product pipeline contribution --- + // Average daily revenue from new product introductions (last 12 months). + // Only used for EXTRAPOLATED days beyond the forecast horizon — within the + // 90-day horizon, preorder/launch products are already forecast by lifecycle curves. + const { rows: [pipeline] } = await executeQuery(` + SELECT + COALESCE(AVG(monthly_revenue), 0) AS avg_monthly_revenue + FROM ( + SELECT DATE_TRUNC('month', pm.date_first_received) AS month, + COUNT(*) AS monthly_products, + SUM(pm.first_30_days_revenue) AS monthly_revenue + FROM product_metrics pm + WHERE pm.is_visible = true + AND pm.date_first_received >= NOW() - INTERVAL '12 months' + AND pm.date_first_received < DATE_TRUNC('month', NOW()) + GROUP BY 1 + ) sub + `); + // Compute average product price for converting revenue to unit estimates + const { rows: [priceRow] } = await executeQuery(` + SELECT COALESCE(AVG(current_price) FILTER (WHERE current_price > 0 AND sales_30d > 0), 7) AS avg_price + FROM product_metrics + WHERE is_visible = true + `); + const avgPrice = parseFloat(priceRow.avg_price) || 7; + + // Daily new-product revenue = (avg products/month × avg 30d revenue/product) / 30 + const avgMonthlyRevenue = parseFloat(pipeline.avg_monthly_revenue) || 0; + const newProductDailyRevenue = avgMonthlyRevenue / 30; + const newProductDailyUnits = newProductDailyRevenue / avgPrice; + + let totalRevenue = dailyForecasts.reduce((sum, d) => sum + d.revenue, 0); + let totalUnits = dailyForecasts.reduce((sum, d) => sum + d.units, 0); + + // --- Extrapolation beyond forecast horizon (rest-of-year) --- + if (needsExtrapolation) { + // Monthly seasonal indices from last 12 months of actual revenue + const { rows: seasonalRows } = await executeQuery(` + SELECT EXTRACT(MONTH FROM o.date)::int AS month, + SUM(o.quantity * o.price) AS revenue + FROM orders o + WHERE o.canceled IS DISTINCT FROM TRUE + AND o.date >= NOW() - INTERVAL '12 months' + GROUP BY 1 + `); + const monthlyRevenue = {}; + let totalMonthlyRev = 0; + for (const r of seasonalRows) { + monthlyRevenue[r.month] = parseFloat(r.revenue) || 0; + totalMonthlyRev += monthlyRevenue[r.month]; + } + const avgMonthRev = totalMonthlyRev / Math.max(Object.keys(monthlyRevenue).length, 1); + const seasonalIndex = {}; + for (let m = 1; m <= 12; m++) { + seasonalIndex[m] = monthlyRevenue[m] ? monthlyRevenue[m] / avgMonthRev : 1.0; + } + + // Baseline: avg daily revenue from last 7 days of forecast (mature tail) + const tailDays = dailyForecasts.slice(-7); + const baselineDaily = tailDays.length > 0 + ? tailDays.reduce((s, d) => s + d.revenue, 0) / tailDays.length + : 0; + + // Generate estimated days beyond horizon + const extraStart = new Date(forecastHorizon); + extraStart.setDate(extraStart.getDate() + 1); + const extraEnd = new Date(endISO + 'T00:00:00'); + + for (let d = new Date(extraStart); d <= extraEnd; d.setDate(d.getDate() + 1)) { + const month = d.getMonth() + 1; + const seasonal = seasonalIndex[month] || 1.0; + // Beyond horizon: existing product tail + new product pipeline + const estRevenue = baselineDaily * seasonal + newProductDailyRevenue; + const estUnits = (baselineDaily * seasonal) / avgPrice + newProductDailyUnits; + + dailyForecasts.push({ + date: d.toISOString().split('T')[0], + units: parseFloat(estUnits.toFixed(1)), + revenue: parseFloat(estRevenue.toFixed(2)), + confidence: 0, // lower confidence for extrapolated data + estimated: true, + }); + totalRevenue += estRevenue; + totalUnits += estUnits; + } } - - // Return dummy response - const response = { - forecastSales: 500, - forecastRevenue: 25000, - confidenceLevel: 0.85, - dailyForecasts: dummyData, - categoryForecasts: [ - { category: "Electronics", units: 120, revenue: 6000, confidence: 0.9 }, - { category: "Clothing", units: 80, revenue: 4000, confidence: 0.8 }, - { category: "Home Goods", units: 150, revenue: 7500, confidence: 0.75 }, - { category: "Others", units: 150, revenue: 7500, confidence: 0.7 } - ] - }; - - return res.json(response); - } - - // If the table exists, try to query it with proper error handling - try { - // Get summary metrics - const { rows: metrics } = await executeQuery(` - SELECT - COALESCE(SUM(forecast_units), 0) as total_forecast_units, - COALESCE(SUM(forecast_revenue), 0) as total_forecast_revenue, - COALESCE(AVG(confidence_level), 0) as overall_confidence - FROM sales_forecasts - WHERE forecast_date BETWEEN $1 AND $2 - `, [startDate, endDate]); - // Get daily forecasts - const { rows: dailyForecasts } = await executeQuery(` - SELECT - DATE(forecast_date) as date, - COALESCE(SUM(forecast_revenue), 0) as revenue, - COALESCE(AVG(confidence_level), 0) as confidence - FROM sales_forecasts - WHERE forecast_date BETWEEN $1 AND $2 - GROUP BY DATE(forecast_date) - ORDER BY date - `, [startDate, endDate]); - - // Get category forecasts - const { rows: categoryForecasts } = await executeQuery(` - SELECT - c.name as category, - COALESCE(SUM(cf.forecast_units), 0) as units, - COALESCE(SUM(cf.forecast_revenue), 0) as revenue, - COALESCE(AVG(cf.confidence_level), 0) as confidence - FROM category_forecasts cf - JOIN categories c ON cf.category_id = c.cat_id - WHERE cf.forecast_date BETWEEN $1 AND $2 - GROUP BY c.cat_id, c.name + // Lifecycle phase breakdown (from actual forecast data only) + const { rows: phaseRows } = await executeQuery(` + SELECT pf.lifecycle_phase AS phase, + COUNT(DISTINCT pf.pid) AS products, + COALESCE(SUM(pf.forecast_units), 0) AS units, + COALESCE(SUM(pf.forecast_revenue), 0) AS revenue + FROM product_forecasts pf + JOIN product_metrics pm ON pm.pid = pf.pid + WHERE pm.is_visible = true + AND pf.forecast_date BETWEEN $1 AND $2 + GROUP BY pf.lifecycle_phase ORDER BY revenue DESC - `, [startDate, endDate]); + `, [startISO, clampedEndISO]); - // Format response - const response = { - forecastSales: parseInt(metrics[0]?.total_forecast_units) || 0, - forecastRevenue: parseFloat(metrics[0]?.total_forecast_revenue) || 0, - confidenceLevel: parseFloat(metrics[0]?.overall_confidence) || 0, - dailyForecasts: dailyForecasts.map(d => ({ - date: d.date, - revenue: parseFloat(d.revenue) || 0, - confidence: parseFloat(d.confidence) || 0 - })), - categoryForecasts: categoryForecasts.map(c => ({ - category: c.category, - units: parseInt(c.units) || 0, - revenue: parseFloat(c.revenue) || 0, - confidence: parseFloat(c.confidence) || 0 - })) - }; + const phaseTotal = phaseRows.reduce((s, r) => s + (parseFloat(r.revenue) || 0), 0); + const phaseBreakdown = phaseRows + .filter(r => parseFloat(r.revenue) > 0) + .map(r => ({ + phase: r.phase, + products: parseInt(r.products) || 0, + units: Math.round(parseFloat(r.units) || 0), + revenue: parseFloat(parseFloat(r.revenue).toFixed(2)), + percentage: phaseTotal > 0 + ? parseFloat(((parseFloat(r.revenue) / phaseTotal) * 100).toFixed(1)) + : 0, + })); - res.json(response); - } catch (err) { - console.error('Error with forecast tables structure, returning dummy data:', err); - - // Generate dummy data for forecast as fallback - const days = 30; - const dummyData = []; - const startDateObj = new Date(startDate); - - for (let i = 0; i < days; i++) { - const currentDate = new Date(startDateObj); - currentDate.setDate(startDateObj.getDate() + i); - - const baseValue = 500 + Math.random() * 200; - dummyData.push({ - date: currentDate.toISOString().split('T')[0], - revenue: parseFloat((baseValue + Math.random() * 100).toFixed(2)), - confidence: parseFloat((0.7 + Math.random() * 0.2).toFixed(2)) - }); + // Category breakdown (from actual forecast data only) + const { rows: categoryRows } = await executeQuery(` + WITH product_root_category AS ( + SELECT DISTINCT ON (pf.pid) + pf.pid, ch.name AS category + FROM product_forecasts pf + JOIN product_metrics pm ON pm.pid = pf.pid + JOIN product_categories pc ON pc.pid = pf.pid + JOIN category_hierarchy ch ON ch.cat_id = pc.cat_id AND ch.level = 0 + WHERE pm.is_visible = true + AND ch.name NOT IN ('Deals', 'Black Friday') + AND pf.forecast_date BETWEEN $1 AND $2 + ORDER BY pf.pid, ch.name + ) + SELECT prc.category, + SUM(pf.forecast_units) AS units, + SUM(pf.forecast_revenue) AS revenue + FROM product_forecasts pf + JOIN product_root_category prc ON prc.pid = pf.pid + WHERE pf.forecast_date BETWEEN $1 AND $2 + GROUP BY prc.category + ORDER BY revenue DESC + LIMIT 8 + `, [startISO, clampedEndISO]); + + const dailyForecastsByPhase = dailyPhaseRows.map(d => ({ + date: d.date instanceof Date ? d.date.toISOString().split('T')[0] : d.date, + preorder: parseFloat(d.preorder) || 0, + launch: parseFloat(d.launch) || 0, + decay: parseFloat(d.decay) || 0, + mature: parseFloat(d.mature) || 0, + slow_mover: parseFloat(d.slow_mover) || 0, + dormant: parseFloat(d.dormant) || 0, + })); + + // Add extrapolated days to phase series (distribute proportionally using last phase ratios) + if (needsExtrapolation && dailyForecastsByPhase.length > 0) { + const lastPhaseDay = dailyForecastsByPhase[dailyForecastsByPhase.length - 1]; + const phases = ['preorder', 'launch', 'decay', 'mature', 'slow_mover', 'dormant']; + const lastTotal = phases.reduce((s, p) => s + lastPhaseDay[p], 0); + const phaseRatios = {}; + for (const p of phases) { + phaseRatios[p] = lastTotal > 0 ? lastPhaseDay[p] / lastTotal : 1 / phases.length; + } + // Match extrapolated days from dailyForecasts + for (let i = dailyForecastsByPhase.length; i < dailyForecasts.length; i++) { + const dayRev = dailyForecasts[i].revenue; + const entry = { date: dailyForecasts[i].date }; + for (const p of phases) { + entry[p] = parseFloat((dayRev * phaseRatios[p]).toFixed(2)); + } + dailyForecastsByPhase.push(entry); + } } - - // Return dummy response - const response = { - forecastSales: 500, - forecastRevenue: 25000, - confidenceLevel: 0.85, - dailyForecasts: dummyData, - categoryForecasts: [ - { category: "Electronics", units: 120, revenue: 6000, confidence: 0.9 }, - { category: "Clothing", units: 80, revenue: 4000, confidence: 0.8 }, - { category: "Home Goods", units: 150, revenue: 7500, confidence: 0.75 }, - { category: "Others", units: 150, revenue: 7500, confidence: 0.7 } - ] - }; - - res.json(response); + + return res.json({ + forecastSales: Math.round(totalUnits), + forecastRevenue: totalRevenue.toFixed(2), + confidenceLevel, + dailyForecasts, + dailyForecastsByPhase, + phaseBreakdown, + categoryForecasts: categoryRows.map(c => ({ + category: c.category, + units: Math.round(parseFloat(c.units)), + revenue: parseFloat(parseFloat(c.revenue).toFixed(2)), + })), + }); } + + // --- Fallback: velocity-based projection (no forecast data yet) --- + const { rows: [totals] } = await executeQuery(` + SELECT + COALESCE(SUM(sales_velocity_daily), 0) AS daily_units, + COALESCE(SUM(sales_velocity_daily * current_price), 0) AS daily_revenue, + COUNT(*) FILTER (WHERE sales_velocity_daily > 0) AS active_products + FROM product_metrics + WHERE is_visible = true AND sales_velocity_daily > 0 + `); + + const dailyUnits = parseFloat(totals.daily_units) || 0; + const dailyRevenue = parseFloat(totals.daily_revenue) || 0; + + const dailyForecasts = []; + for (let i = 0; i < days; i++) { + const d = new Date(startDate); + d.setDate(startDate.getDate() + i); + dailyForecasts.push({ + date: d.toISOString().split('T')[0], + units: parseFloat(dailyUnits.toFixed(1)), + revenue: parseFloat(dailyRevenue.toFixed(2)), + confidence: 0, + }); + } + + const { rows: categoryRows } = await executeQuery(` + WITH product_root_category AS ( + SELECT DISTINCT ON (pm.pid) pm.pid, + pm.sales_velocity_daily, pm.current_price, + ch.name AS category + FROM product_metrics pm + JOIN product_categories pc ON pc.pid = pm.pid + JOIN category_hierarchy ch ON ch.cat_id = pc.cat_id AND ch.level = 0 + WHERE pm.is_visible = true AND pm.sales_velocity_daily > 0 + AND ch.name NOT IN ('Deals', 'Black Friday') + ORDER BY pm.pid, ch.name + ) + SELECT category, + ROUND(SUM(sales_velocity_daily)::numeric, 1) AS daily_units, + ROUND(SUM(sales_velocity_daily * current_price)::numeric, 2) AS daily_revenue + FROM product_root_category + GROUP BY category ORDER BY daily_revenue DESC LIMIT 8 + `); + + res.json({ + forecastSales: Math.round(dailyUnits * days), + forecastRevenue: (dailyRevenue * days).toFixed(2), + confidenceLevel: 0, + dailyForecasts, + categoryForecasts: categoryRows.map(c => ({ + category: c.category, + units: Math.round(parseFloat(c.daily_units) * days), + revenue: parseFloat((parseFloat(c.daily_revenue) * days).toFixed(2)), + })), + }); } catch (err) { console.error('Error fetching forecast metrics:', err); res.status(500).json({ error: 'Failed to fetch forecast metrics' }); } }); +// GET /dashboard/forecast/accuracy +// Returns forecast accuracy metrics computed by the forecast engine. +// Reads from forecast_accuracy table (populated after each forecast run). +router.get('/forecast/accuracy', async (req, res) => { + try { + // Check if forecast_accuracy table exists and has data + const { rows: [tableCheck] } = await executeQuery(` + SELECT EXISTS ( + SELECT 1 FROM information_schema.tables + WHERE table_name = 'forecast_accuracy' + ) AS exists + `); + + if (!tableCheck.exists) { + return res.json({ hasData: false, message: 'Accuracy data not yet available' }); + } + + // Get the latest run that has accuracy data + const { rows: runRows } = await executeQuery(` + SELECT DISTINCT fa.run_id, fr.finished_at + FROM forecast_accuracy fa + JOIN forecast_runs fr ON fr.id = fa.run_id + ORDER BY fr.finished_at DESC + LIMIT 1 + `); + + if (runRows.length === 0) { + return res.json({ hasData: false, message: 'No accuracy data computed yet' }); + } + + const latestRunId = runRows[0].run_id; + const computedAt = runRows[0].finished_at; + + // Count days of history available + const { rows: [historyInfo] } = await executeQuery(` + SELECT + COUNT(DISTINCT forecast_date) AS days_of_history, + MIN(forecast_date) AS earliest_date, + MAX(forecast_date) AS latest_date + FROM product_forecasts_history + `); + + // Fetch all accuracy metrics for the latest run + const { rows: metrics } = await executeQuery(` + SELECT metric_type, dimension_value, sample_size, + total_actual_units, total_forecast_units, + mae, wmape, bias, rmse + FROM forecast_accuracy + WHERE run_id = $1 + ORDER BY metric_type, dimension_value + `, [latestRunId]); + + // Organize into response structure + const overall = metrics.find(m => m.metric_type === 'overall'); + const byPhase = metrics + .filter(m => m.metric_type === 'by_phase') + .map(m => ({ + phase: m.dimension_value, + sampleSize: parseInt(m.sample_size), + totalActual: parseFloat(m.total_actual_units) || 0, + totalForecast: parseFloat(m.total_forecast_units) || 0, + mae: m.mae != null ? parseFloat(parseFloat(m.mae).toFixed(4)) : null, + wmape: m.wmape != null ? parseFloat((parseFloat(m.wmape) * 100).toFixed(1)) : null, + bias: m.bias != null ? parseFloat(parseFloat(m.bias).toFixed(4)) : null, + rmse: m.rmse != null ? parseFloat(parseFloat(m.rmse).toFixed(4)) : null, + })) + .sort((a, b) => (b.totalActual || 0) - (a.totalActual || 0)); + + const byLeadTime = metrics + .filter(m => m.metric_type === 'by_lead_time') + .map(m => ({ + bucket: m.dimension_value, + sampleSize: parseInt(m.sample_size), + mae: m.mae != null ? parseFloat(parseFloat(m.mae).toFixed(4)) : null, + wmape: m.wmape != null ? parseFloat((parseFloat(m.wmape) * 100).toFixed(1)) : null, + bias: m.bias != null ? parseFloat(parseFloat(m.bias).toFixed(4)) : null, + rmse: m.rmse != null ? parseFloat(parseFloat(m.rmse).toFixed(4)) : null, + })) + .sort((a, b) => { + const order = { '1-7d': 0, '8-14d': 1, '15-30d': 2, '31-60d': 3, '61-90d': 4 }; + return (order[a.bucket] ?? 99) - (order[b.bucket] ?? 99); + }); + + const byMethod = metrics + .filter(m => m.metric_type === 'by_method') + .map(m => ({ + method: m.dimension_value, + sampleSize: parseInt(m.sample_size), + mae: m.mae != null ? parseFloat(parseFloat(m.mae).toFixed(4)) : null, + wmape: m.wmape != null ? parseFloat((parseFloat(m.wmape) * 100).toFixed(1)) : null, + bias: m.bias != null ? parseFloat(parseFloat(m.bias).toFixed(4)) : null, + })); + + const dailyTrend = metrics + .filter(m => m.metric_type === 'daily') + .map(m => ({ + date: m.dimension_value, + mae: m.mae != null ? parseFloat(parseFloat(m.mae).toFixed(4)) : null, + wmape: m.wmape != null ? parseFloat((parseFloat(m.wmape) * 100).toFixed(1)) : null, + bias: m.bias != null ? parseFloat(parseFloat(m.bias).toFixed(4)) : null, + })) + .sort((a, b) => a.date.localeCompare(b.date)); + + // Historical accuracy trend (across runs) + const { rows: trendRows } = await executeQuery(` + SELECT fa.run_id, fr.finished_at::date AS run_date, + fa.mae, fa.wmape, fa.bias, fa.rmse, fa.sample_size + FROM forecast_accuracy fa + JOIN forecast_runs fr ON fr.id = fa.run_id + WHERE fa.metric_type = 'overall' + AND fa.dimension_value = 'all' + ORDER BY fr.finished_at + `); + + const accuracyTrend = trendRows.map(r => ({ + date: r.run_date instanceof Date ? r.run_date.toISOString().split('T')[0] : r.run_date, + mae: r.mae != null ? parseFloat(parseFloat(r.mae).toFixed(4)) : null, + wmape: r.wmape != null ? parseFloat((parseFloat(r.wmape) * 100).toFixed(1)) : null, + bias: r.bias != null ? parseFloat(parseFloat(r.bias).toFixed(4)) : null, + sampleSize: parseInt(r.sample_size), + })); + + res.json({ + hasData: true, + computedAt, + daysOfHistory: parseInt(historyInfo.days_of_history) || 0, + historyRange: { + from: historyInfo.earliest_date instanceof Date + ? historyInfo.earliest_date.toISOString().split('T')[0] + : historyInfo.earliest_date, + to: historyInfo.latest_date instanceof Date + ? historyInfo.latest_date.toISOString().split('T')[0] + : historyInfo.latest_date, + }, + overall: overall ? { + sampleSize: parseInt(overall.sample_size), + totalActual: parseFloat(overall.total_actual_units) || 0, + totalForecast: parseFloat(overall.total_forecast_units) || 0, + mae: overall.mae != null ? parseFloat(parseFloat(overall.mae).toFixed(4)) : null, + wmape: overall.wmape != null ? parseFloat((parseFloat(overall.wmape) * 100).toFixed(1)) : null, + bias: overall.bias != null ? parseFloat(parseFloat(overall.bias).toFixed(4)) : null, + rmse: overall.rmse != null ? parseFloat(parseFloat(overall.rmse).toFixed(4)) : null, + } : null, + byPhase, + byLeadTime, + byMethod, + dailyTrend, + accuracyTrend, + }); + } catch (err) { + console.error('Error fetching forecast accuracy:', err); + res.status(500).json({ error: 'Failed to fetch forecast accuracy' }); + } +}); + // GET /dashboard/overstock/metrics // Returns overstock metrics by category router.get('/overstock/metrics', async (req, res) => { @@ -427,7 +815,7 @@ router.get('/overstock/metrics', async (req, res) => { // Get category breakdowns separately const { rows: categoryData } = await executeQuery(` - SELECT + SELECT c.name as category_name, COUNT(DISTINCT pm.pid)::integer as overstocked_products, SUM(pm.overstocked_units)::integer as total_excess_units, @@ -443,6 +831,22 @@ router.get('/overstock/metrics', async (req, res) => { LIMIT 8 `); + // Overstock breakdown by lifecycle phase + const { rows: phaseOverstock } = await executeQuery(` + SELECT + COALESCE(pm.lifecycle_phase, 'unknown') AS phase, + COUNT(DISTINCT pm.pid)::integer AS products, + COALESCE(SUM(pm.overstocked_units), 0)::integer AS units, + ROUND(COALESCE(SUM(pm.overstocked_cost), 0)::numeric, 2) AS cost, + ROUND(COALESCE(SUM(pm.overstocked_retail), 0)::numeric, 2) AS retail + FROM product_metrics pm + WHERE pm.status = 'Overstock' AND pm.is_visible = true + AND COALESCE(pm.preorder_count, 0) = 0 + GROUP BY pm.lifecycle_phase + ORDER BY cost DESC + `); + const overstockPhaseTotalCost = phaseOverstock.reduce((s, r) => s + (parseFloat(r.cost) || 0), 0); + // Format response with explicit type conversion const response = { overstockedProducts: parseInt(summaryMetrics.total_overstocked) || 0, @@ -455,7 +859,17 @@ router.get('/overstock/metrics', async (req, res) => { units: parseInt(cat.total_excess_units) || 0, cost: parseFloat(cat.total_excess_cost) || 0, retail: parseFloat(cat.total_excess_retail) || 0 - })) + })), + phaseBreakdown: phaseOverstock.filter(r => parseFloat(r.cost) > 0).map(r => ({ + phase: r.phase, + products: parseInt(r.products) || 0, + units: parseInt(r.units) || 0, + cost: parseFloat(r.cost) || 0, + retail: parseFloat(r.retail) || 0, + percentage: overstockPhaseTotalCost > 0 + ? parseFloat(((parseFloat(r.cost) / overstockPhaseTotalCost) * 100).toFixed(1)) + : 0, + })), }; res.json(response); @@ -600,7 +1014,7 @@ router.get('/sales/metrics', async (req, res) => { // Get overall metrics for the period const { rows: [metrics] } = await executeQuery(` - SELECT + SELECT COUNT(DISTINCT order_number) as total_orders, SUM(quantity) as total_units, SUM(price * quantity) as total_revenue, @@ -610,6 +1024,40 @@ router.get('/sales/metrics', async (req, res) => { AND canceled = false `, [startDate, endDate]); + // Sales breakdown by lifecycle phase + const { rows: phaseSales } = await executeQuery(` + SELECT + COALESCE(pm.lifecycle_phase, 'unknown') AS phase, + COUNT(DISTINCT o.order_number)::integer AS orders, + COALESCE(SUM(o.quantity), 0)::integer AS units, + ROUND(COALESCE(SUM(o.price * o.quantity), 0)::numeric, 2) AS revenue, + ROUND(COALESCE(SUM(o.costeach * o.quantity), 0)::numeric, 2) AS cogs + FROM orders o + LEFT JOIN product_metrics pm ON o.pid = pm.pid + WHERE o.date BETWEEN $1 AND $2 AND o.canceled = false + GROUP BY pm.lifecycle_phase + ORDER BY revenue DESC + `, [startDate, endDate]); + const salePhaseTotalRev = phaseSales.reduce((s, r) => s + (parseFloat(r.revenue) || 0), 0); + + // Daily sales broken down by lifecycle phase (for stacked chart) + const { rows: dailyPhaseRows } = await executeQuery(` + SELECT + DATE(o.date) AS sale_date, + COALESCE(SUM(o.price * o.quantity) FILTER (WHERE COALESCE(pm.lifecycle_phase, 'unknown') = 'preorder'), 0) AS preorder, + COALESCE(SUM(o.price * o.quantity) FILTER (WHERE COALESCE(pm.lifecycle_phase, 'unknown') = 'launch'), 0) AS launch, + COALESCE(SUM(o.price * o.quantity) FILTER (WHERE COALESCE(pm.lifecycle_phase, 'unknown') = 'decay'), 0) AS decay, + COALESCE(SUM(o.price * o.quantity) FILTER (WHERE COALESCE(pm.lifecycle_phase, 'unknown') = 'mature'), 0) AS mature, + COALESCE(SUM(o.price * o.quantity) FILTER (WHERE COALESCE(pm.lifecycle_phase, 'unknown') = 'slow_mover'), 0) AS slow_mover, + COALESCE(SUM(o.price * o.quantity) FILTER (WHERE COALESCE(pm.lifecycle_phase, 'unknown') = 'dormant'), 0) AS dormant, + COALESCE(SUM(o.price * o.quantity) FILTER (WHERE pm.lifecycle_phase IS NULL), 0) AS unknown + FROM orders o + LEFT JOIN product_metrics pm ON o.pid = pm.pid + WHERE o.date BETWEEN $1 AND $2 AND o.canceled = false + GROUP BY DATE(o.date) + ORDER BY sale_date + `, [startDate, endDate]); + const response = { totalOrders: parseInt(metrics?.total_orders) || 0, totalUnitsSold: parseInt(metrics?.total_units) || 0, @@ -620,7 +1068,27 @@ router.get('/sales/metrics', async (req, res) => { units: parseInt(day.total_units) || 0, revenue: parseFloat(day.total_revenue) || 0, cogs: parseFloat(day.total_cogs) || 0 - })) + })), + dailySalesByPhase: dailyPhaseRows.map(d => ({ + date: d.sale_date, + preorder: parseFloat(d.preorder) || 0, + launch: parseFloat(d.launch) || 0, + decay: parseFloat(d.decay) || 0, + mature: parseFloat(d.mature) || 0, + slow_mover: parseFloat(d.slow_mover) || 0, + dormant: parseFloat(d.dormant) || 0, + unknown: parseFloat(d.unknown) || 0, + })), + phaseBreakdown: phaseSales.filter(r => parseFloat(r.revenue) > 0).map(r => ({ + phase: r.phase, + orders: parseInt(r.orders) || 0, + units: parseInt(r.units) || 0, + revenue: parseFloat(r.revenue) || 0, + cogs: parseFloat(r.cogs) || 0, + percentage: salePhaseTotalRev > 0 + ? parseFloat(((parseFloat(r.revenue) / salePhaseTotalRev) * 100).toFixed(1)) + : 0, + })), }; res.json(response); diff --git a/inventory-server/src/routes/products.js b/inventory-server/src/routes/products.js index 6397c2c..72f3b5e 100644 --- a/inventory-server/src/routes/products.js +++ b/inventory-server/src/routes/products.js @@ -782,4 +782,49 @@ router.get('/:id/time-series', async (req, res) => { } }); +// GET /products/:id/forecast +// Returns the 90-day daily forecast for a single product from product_forecasts +router.get('/:id/forecast', async (req, res) => { + const { id } = req.params; + try { + const pool = req.app.locals.pool; + + const { rows } = await pool.query(` + SELECT + forecast_date AS date, + forecast_units AS units, + forecast_revenue AS revenue, + lifecycle_phase AS phase, + forecast_method AS method, + confidence_lower, + confidence_upper + FROM product_forecasts + WHERE pid = $1 + ORDER BY forecast_date + `, [id]); + + if (rows.length === 0) { + return res.json({ forecast: [], phase: null, method: null }); + } + + const phase = rows[0].phase; + const method = rows[0].method; + + res.json({ + phase, + method, + forecast: rows.map(r => ({ + date: r.date instanceof Date ? r.date.toISOString().split('T')[0] : r.date, + units: parseFloat(r.units) || 0, + revenue: parseFloat(r.revenue) || 0, + confidenceLower: parseFloat(r.confidence_lower) || 0, + confidenceUpper: parseFloat(r.confidence_upper) || 0, + })), + }); + } catch (error) { + console.error('Error fetching product forecast:', error); + res.status(500).json({ error: 'Failed to fetch product forecast' }); + } +}); + module.exports = router; diff --git a/inventory/src/components/overview/BestSellers.tsx b/inventory/src/components/overview/BestSellers.tsx index c45b4e9..ba905d3 100644 --- a/inventory/src/components/overview/BestSellers.tsx +++ b/inventory/src/components/overview/BestSellers.tsx @@ -79,7 +79,7 @@ export function BestSellers() { ) : ( <> - + diff --git a/inventory/src/components/overview/ForecastAccuracy.tsx b/inventory/src/components/overview/ForecastAccuracy.tsx new file mode 100644 index 0000000..f3a9a18 --- /dev/null +++ b/inventory/src/components/overview/ForecastAccuracy.tsx @@ -0,0 +1,294 @@ +import { useQuery } from "@tanstack/react-query" +import { BarChart, Bar, ResponsiveContainer, XAxis, YAxis, Tooltip as RechartsTooltip, Cell, LineChart, Line } from "recharts" +import config from "@/config" +import { Target, TrendingDown, ArrowUpDown } from "lucide-react" +import { Tooltip as UITooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/components/ui/tooltip" +import { PHASE_CONFIG } from "@/utils/lifecyclePhases" + +interface OverallMetrics { + sampleSize: number + totalActual: number + totalForecast: number + mae: number | null + wmape: number | null + bias: number | null + rmse: number | null +} + +interface PhaseAccuracy { + phase: string + sampleSize: number + totalActual: number + totalForecast: number + mae: number | null + wmape: number | null + bias: number | null + rmse: number | null +} + +interface LeadTimeAccuracy { + bucket: string + sampleSize: number + mae: number | null + wmape: number | null + bias: number | null + rmse: number | null +} + +interface AccuracyTrendPoint { + date: string + mae: number | null + wmape: number | null + bias: number | null + sampleSize: number +} + +interface AccuracyData { + hasData: boolean + message?: string + computedAt?: string + daysOfHistory?: number + historyRange?: { from: string; to: string } + overall?: OverallMetrics + byPhase?: PhaseAccuracy[] + byLeadTime?: LeadTimeAccuracy[] + byMethod?: { method: string; sampleSize: number; mae: number | null; wmape: number | null; bias: number | null }[] + dailyTrend?: { date: string; mae: number | null; wmape: number | null; bias: number | null }[] + accuracyTrend?: AccuracyTrendPoint[] +} + +function MetricSkeleton() { + return
; +} + +function formatWmape(wmape: number | null): string { + if (wmape === null) return "N/A" + return `${wmape.toFixed(1)}%` +} + +function formatBias(bias: number | null): string { + if (bias === null) return "N/A" + const sign = bias > 0 ? "+" : "" + return `${sign}${bias.toFixed(3)}` +} + +function getAccuracyColor(wmape: number | null): string { + if (wmape === null) return "text-muted-foreground" + if (wmape <= 30) return "text-green-600" + if (wmape <= 50) return "text-yellow-600" + return "text-red-600" +} + +export function ForecastAccuracy() { + const { data, error, isLoading } = useQuery({ + queryKey: ["forecast-accuracy"], + queryFn: async () => { + const response = await fetch(`${config.apiUrl}/dashboard/forecast/accuracy`) + if (!response.ok) { + throw new Error("Failed to fetch forecast accuracy") + } + return response.json() + }, + refetchInterval: 5 * 60 * 1000, + }) + + if (error) { + return ( +
+

Forecast Accuracy

+

Failed to load accuracy data

+
+ ) + } + + if (!isLoading && data && !data.hasData) { + return ( +
+

Forecast Accuracy

+

+ Accuracy data will be available after the forecast engine has run for at least 2 days, + building up historical comparisons between predictions and actual sales. +

+
+ ) + } + + const phaseChartData = (data?.byPhase || []) + .filter(p => p.wmape !== null && p.phase !== 'dormant') + .map(p => ({ + phase: PHASE_CONFIG[p.phase]?.label || p.phase, + rawPhase: p.phase, + wmape: p.wmape, + mae: p.mae, + bias: p.bias, + sampleSize: p.sampleSize, + })) + .sort((a, b) => (a.wmape ?? 100) - (b.wmape ?? 100)) + + const leadTimeData = (data?.byLeadTime || []).map(lt => ({ + bucket: lt.bucket, + wmape: lt.wmape, + mae: lt.mae, + sampleSize: lt.sampleSize, + })) + + return ( +
+

Forecast Accuracy

+ {isLoading ? ( +
+ + +
+ ) : ( + <> + {/* Headline metrics */} +
+
+
+ +

WMAPE

+
+

+ {formatWmape(data?.overall?.wmape ?? null)} +

+
+
+
+ +

MAE

+
+

+ {data?.overall?.mae !== null ? data?.overall?.mae?.toFixed(2) : "N/A"} + units +

+
+
+
+ +

Bias

+
+

+ {formatBias(data?.overall?.bias ?? null)} + + {(data?.overall?.bias ?? 0) > 0 ? "over" : (data?.overall?.bias ?? 0) < 0 ? "under" : ""} + +

+
+
+ + {/* Phase accuracy bar */} + {phaseChartData.length > 0 && ( +
+

WMAPE by Lifecycle Phase

+ +
+ {phaseChartData.map((p) => { + const cfg = PHASE_CONFIG[p.rawPhase] || { label: p.phase, color: "#94A3B8" } + const maxWmape = Math.max(...phaseChartData.map(d => d.wmape ?? 0), 1) + const barWidth = ((p.wmape ?? 0) / maxWmape) * 100 + return ( + + +
+ {cfg.label} +
+
0 ? 4 : 0, + }} + /> +
+ + {formatWmape(p.wmape)} + +
+ + +
{cfg.label}
+
WMAPE: {formatWmape(p.wmape)}
+
MAE: {p.mae?.toFixed(3) ?? "N/A"} units
+
Bias: {formatBias(p.bias)}
+
{p.sampleSize.toLocaleString()} samples
+
+ + ) + })} +
+ +
+ )} + + {/* Lead time accuracy chart */} + {leadTimeData.length > 0 && ( +
+

Accuracy by Lead Time

+
+ + + + `${v}%`} + /> + [`${value?.toFixed(1)}%`, "WMAPE"]} + /> + + {leadTimeData.map((entry, index) => ( + + ))} + + + +
+
+ )} + + {/* Accuracy trend sparkline */} + {data?.accuracyTrend && data.accuracyTrend.length > 1 && ( +
+

Accuracy Trend (WMAPE)

+
+ + + + + + +
+
+ )} + + {/* Footer info */} + {data?.daysOfHistory !== undefined && ( +

+ Based on {data.daysOfHistory} day{data.daysOfHistory !== 1 ? "s" : ""} of history + {data.overall?.sampleSize ? ` (${data.overall.sampleSize.toLocaleString()} samples)` : ""} +

+ )} + + )} +
+ ) +} diff --git a/inventory/src/components/overview/ForecastMetrics.tsx b/inventory/src/components/overview/ForecastMetrics.tsx index 8669792..fb4a9e8 100644 --- a/inventory/src/components/overview/ForecastMetrics.tsx +++ b/inventory/src/components/overview/ForecastMetrics.tsx @@ -1,13 +1,46 @@ import { useQuery } from "@tanstack/react-query" import { CardHeader, CardTitle, CardContent } from "@/components/ui/card" -import { AreaChart, Area, ResponsiveContainer, XAxis, YAxis, Tooltip } from "recharts" +import { AreaChart, Area, ResponsiveContainer, XAxis, YAxis, Tooltip as RechartsTooltip } from "recharts" import { useState } from "react" import config from "@/config" import { formatCurrency } from "@/utils/formatCurrency" -import { TrendingUp, DollarSign } from "lucide-react" -import { DateRange } from "react-day-picker" +import { TrendingUp, DollarSign, Target } from "lucide-react" +import { Tooltip as UITooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/components/ui/tooltip" +import { Popover, PopoverContent, PopoverTrigger } from "@/components/ui/popover" +import { Tabs, TabsList, TabsTrigger } from "@/components/ui/tabs" +import { Button } from "@/components/ui/button" +import { ForecastAccuracy } from "@/components/overview/ForecastAccuracy" import { addDays, format } from "date-fns" -import { DateRangePicker } from "@/components/ui/date-range-picker-narrow" +import { PHASE_CONFIG, PHASE_KEYS } from "@/utils/lifecyclePhases" + +function MetricSkeleton() { + return
; +} + +type Period = 30 | 90 | 'year'; + +function getEndDate(period: Period): Date { + if (period === 'year') return new Date(new Date().getFullYear(), 11, 31); + return addDays(new Date(), period); +} + +interface PhaseData { + phase: string + products: number + units: number + revenue: number + percentage: number +} + +interface DailyPhaseData { + date: string + preorder: number + launch: number + decay: number + mature: number + slow_mover: number + dormant: number +} interface ForecastData { forecastSales: number @@ -19,6 +52,8 @@ interface ForecastData { revenue: string confidence: number }[] + dailyForecastsByPhase?: DailyPhaseData[] + phaseBreakdown?: PhaseData[] categoryForecasts: { category: string units: number @@ -28,17 +63,14 @@ interface ForecastData { } export function ForecastMetrics() { - const [dateRange, setDateRange] = useState({ - from: new Date(), - to: addDays(new Date(), 30), - }); + const [period, setPeriod] = useState(30); const { data, error, isLoading } = useQuery({ - queryKey: ["forecast-metrics", dateRange], + queryKey: ["forecast-metrics", period], queryFn: async () => { const params = new URLSearchParams({ - startDate: dateRange.from?.toISOString() || "", - endDate: dateRange.to?.toISOString() || "", + startDate: new Date().toISOString(), + endDate: getEndDate(period).toISOString(), }); const response = await fetch(`${config.apiUrl}/dashboard/forecast/metrics?${params}`) if (!response.ok) { @@ -50,25 +82,35 @@ export function ForecastMetrics() { }, }) + const hasPhaseData = data?.dailyForecastsByPhase && data.dailyForecastsByPhase.length > 0 + return ( <> Forecast -
- { - if (range) setDateRange(range); - }} - future={true} - /> +
+ + + + + + + + + setPeriod(v === 'year' ? 'year' : Number(v) as Period)}> + + 30D + 90D + EOY + +
{error ? (
Error: {error.message}
- ) : isLoading ? ( -
Loading forecast metrics...
) : ( <>
@@ -77,52 +119,125 @@ export function ForecastMetrics() {

Forecast Sales

-

{data?.forecastSales.toLocaleString() || 0}

+ {isLoading || !data ? : ( +

{data.forecastSales.toLocaleString()}

+ )}

Forecast Revenue

-

{formatCurrency(Number(data?.forecastRevenue) || 0)}

+ {isLoading || !data ? : ( +

{formatCurrency(Number(data.forecastRevenue) || 0)}

+ )}
+ {isLoading ? ( +
+

Forecast Revenue By Lifecycle Phase

+
+
+ ) : data?.phaseBreakdown && data.phaseBreakdown.length > 0 && ( +
+

Forecast Revenue By Lifecycle Phase

+ +
+ {data.phaseBreakdown.map((p) => { + const cfg = PHASE_CONFIG[p.phase] || { label: p.phase, color: "#94A3B8" } + return ( + + +
0 ? 4 : 0, + }} + /> + + +
+
+ {cfg.label} + {p.percentage}% +
+
{formatCurrency(p.revenue)}
+
{p.products.toLocaleString()} products
+ + + ) + })} +
+ +
+ )} +
- - - - - [formatCurrency(Number(value)), "Revenue"]} - labelFormatter={(date) => format(new Date(date), 'MMM d, yyyy')} - /> - - - + {isLoading ? ( +
+
+
+ ) : ( + + + + + { + const cfg = PHASE_CONFIG[name] + return [formatCurrency(value), cfg?.label || name] + }} + labelFormatter={(date) => format(new Date(date + 'T00:00:00'), 'MMM d, yyyy')} + itemSorter={(item) => -(item.value as number || 0)} + /> + {hasPhaseData ? ( + PHASE_KEYS.map((phase) => { + const cfg = PHASE_CONFIG[phase] + return ( + + ) + }) + ) : ( + + )} + + + )}
)} ) -} \ No newline at end of file +} diff --git a/inventory/src/components/overview/OverstockMetrics.tsx b/inventory/src/components/overview/OverstockMetrics.tsx index ec2d21f..e537957 100644 --- a/inventory/src/components/overview/OverstockMetrics.tsx +++ b/inventory/src/components/overview/OverstockMetrics.tsx @@ -2,7 +2,18 @@ import { useQuery } from "@tanstack/react-query" import { CardHeader, CardTitle, CardContent } from "@/components/ui/card" import config from "@/config" import { formatCurrency } from "@/utils/formatCurrency" -import { Package, Layers, DollarSign, ShoppingCart } from "lucide-react" +import { AlertTriangle, Layers, DollarSign, Tag } from "lucide-react" +import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/components/ui/tooltip" +import { PHASE_CONFIG } from "@/utils/lifecyclePhases" + +interface PhaseBreakdown { + phase: string + products: number + units: number + cost: number + retail: number + percentage: number +} interface OverstockMetricsData { overstockedProducts: number @@ -16,6 +27,7 @@ interface OverstockMetricsData { cost: number retail: number }[] + phaseBreakdown?: PhaseBreakdown[] } function MetricSkeleton() { @@ -44,7 +56,7 @@ export function OverstockMetrics() {
- +

Overstocked Products

{isLoading || !data ? : ( @@ -71,13 +83,48 @@ export function OverstockMetrics() {
- +

Overstocked Retail

{isLoading || !data ? : (

{formatCurrency(data.total_excess_retail)}

)}
+ {data?.phaseBreakdown && data.phaseBreakdown.length > 0 && ( +
+

Overstocked Cost By Lifecycle Phase

+ +
+ {data.phaseBreakdown.map((p) => { + const cfg = PHASE_CONFIG[p.phase] || { label: p.phase, color: "#94A3B8" } + return ( + + +
0 ? 3 : 0, + }} + /> + + +
+
+ {cfg.label} + {p.percentage}% +
+
{formatCurrency(p.cost)}
+
{p.products} products · {p.units} units
+ + + ) + })} +
+ +
+ )}
)} diff --git a/inventory/src/components/overview/PurchaseMetrics.tsx b/inventory/src/components/overview/PurchaseMetrics.tsx index 400abfd..981e541 100644 --- a/inventory/src/components/overview/PurchaseMetrics.tsx +++ b/inventory/src/components/overview/PurchaseMetrics.tsx @@ -3,7 +3,7 @@ import { CardHeader, CardTitle, CardContent } from "@/components/ui/card" import { PieChart, Pie, ResponsiveContainer, Cell, Sector } from "recharts" import config from "@/config" import { formatCurrency } from "@/utils/formatCurrency" -import { ClipboardList, AlertCircle, Layers, DollarSign, ShoppingCart } from "lucide-react" +import { ClipboardList, AlertCircle, Truck, DollarSign, Tag } from "lucide-react" import { useState } from "react" interface PurchaseMetricsData { @@ -90,49 +90,49 @@ export function PurchaseMetrics() { {isError ? (

Failed to load purchase metrics

) : ( -
-
-
-
-
- -

Active Purchase Orders

+
+
+
+
+
+ +

Active Purchase Orders

{isLoading || !data ? : (

{data.activePurchaseOrders.toLocaleString()}

)}
-
-
- -

Overdue Purchase Orders

+
+
+ +

Overdue Purchase Orders

{isLoading || !data ? : (

{data.overduePurchaseOrders.toLocaleString()}

)}
-
-
- -

On Order Units

+
+
+ +

On Order Units

{isLoading || !data ? : (

{data.onOrderUnits.toLocaleString()}

)}
-
-
- -

On Order Cost

+
+
+ +

On Order Cost

{isLoading || !data ? : (

{formatCurrency(data.onOrderCost)}

)}
-
-
- -

On Order Retail

+
+
+ +

On Order Retail

{isLoading || !data ? : (

{formatCurrency(data.onOrderRetail)}

@@ -140,9 +140,9 @@ export function PurchaseMetrics() {
-
+
-
Purchase Orders By Vendor
+
PO Costs By Vendor
{isLoading || !data ? (
diff --git a/inventory/src/components/overview/ReplenishmentMetrics.tsx b/inventory/src/components/overview/ReplenishmentMetrics.tsx index 7b31e6f..3e94644 100644 --- a/inventory/src/components/overview/ReplenishmentMetrics.tsx +++ b/inventory/src/components/overview/ReplenishmentMetrics.tsx @@ -2,13 +2,24 @@ import { useQuery } from "@tanstack/react-query" import { CardHeader, CardTitle, CardContent } from "@/components/ui/card" import config from "@/config" import { formatCurrency } from "@/utils/formatCurrency" -import { Package, DollarSign, ShoppingCart } from "lucide-react" +import { PackagePlus, DollarSign, Tag } from "lucide-react" +import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/components/ui/tooltip" +import { PHASE_CONFIG } from "@/utils/lifecyclePhases" + +interface PhaseBreakdown { + phase: string + products: number + units: number + cost: number + percentage: number +} interface ReplenishmentMetricsData { productsToReplenish: number unitsToReplenish: number replenishmentCost: number replenishmentRetail: number + phaseBreakdown?: PhaseBreakdown[] topVariants: { id: number title: string @@ -47,7 +58,7 @@ export function ReplenishmentMetrics() {
- +

Units to Replenish

{isLoading || !data ? : ( @@ -65,13 +76,48 @@ export function ReplenishmentMetrics() {
- +

Replenishment Retail

{isLoading || !data ? : (

{formatCurrency(data.replenishmentRetail)}

)}
+ {data?.phaseBreakdown && data.phaseBreakdown.length > 0 && ( +
+

Replenishment Cost By Lifecycle Phase

+ +
+ {data.phaseBreakdown.map((p) => { + const cfg = PHASE_CONFIG[p.phase] || { label: p.phase, color: "#94A3B8" } + return ( + + +
0 ? 3 : 0, + }} + /> + + +
+
+ {cfg.label} + {p.percentage}% +
+
{formatCurrency(p.cost)}
+
{p.products} products · {p.units} units
+ + + ) + })} +
+ +
+ )}
)} diff --git a/inventory/src/components/overview/SalesMetrics.tsx b/inventory/src/components/overview/SalesMetrics.tsx index d85a2a6..9d6296e 100644 --- a/inventory/src/components/overview/SalesMetrics.tsx +++ b/inventory/src/components/overview/SalesMetrics.tsx @@ -1,13 +1,36 @@ import { useQuery } from "@tanstack/react-query" import { CardHeader, CardTitle, CardContent } from "@/components/ui/card" -import { AreaChart, Area, ResponsiveContainer, XAxis, YAxis, Tooltip } from "recharts" +import { AreaChart, Area, ResponsiveContainer, XAxis, YAxis, Tooltip as RechartsTooltip } from "recharts" import { useState } from "react" import config from "@/config" import { formatCurrency } from "@/utils/formatCurrency" import { ClipboardList, Package, DollarSign, ShoppingCart } from "lucide-react" -import { DateRange } from "react-day-picker" +import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/components/ui/tooltip" +import { Tabs, TabsList, TabsTrigger } from "@/components/ui/tabs" import { addDays, format } from "date-fns" -import { DateRangePicker } from "@/components/ui/date-range-picker-narrow" +import { PHASE_CONFIG, PHASE_KEYS_WITH_UNKNOWN as PHASE_KEYS } from "@/utils/lifecyclePhases" + +type Period = 7 | 30 | 90; + +interface PhaseBreakdown { + phase: string + orders: number + units: number + revenue: number + cogs: number + percentage: number +} + +interface DailyPhaseData { + date: string + preorder: number + launch: number + decay: number + mature: number + slow_mover: number + dormant: number + unknown: number +} interface SalesData { totalOrders: number @@ -20,6 +43,8 @@ interface SalesData { revenue: number cogs: number }[] + dailySalesByPhase?: DailyPhaseData[] + phaseBreakdown?: PhaseBreakdown[] } function MetricSkeleton() { @@ -27,17 +52,14 @@ function MetricSkeleton() { } export function SalesMetrics() { - const [dateRange, setDateRange] = useState({ - from: addDays(new Date(), -30), - to: new Date(), - }); + const [period, setPeriod] = useState(30); const { data, isError, isLoading } = useQuery({ - queryKey: ["sales-metrics", dateRange], + queryKey: ["sales-metrics", period], queryFn: async () => { const params = new URLSearchParams({ - startDate: dateRange.from?.toISOString() || "", - endDate: dateRange.to?.toISOString() || "", + startDate: addDays(new Date(), -period).toISOString(), + endDate: new Date().toISOString(), }); const response = await fetch(`${config.apiUrl}/dashboard/sales/metrics?${params}`) if (!response.ok) throw new Error("Failed to fetch sales metrics"); @@ -45,19 +67,19 @@ export function SalesMetrics() { }, }) + const hasPhaseData = data?.dailySalesByPhase && data.dailySalesByPhase.length > 0 + return ( <> Sales -
- { - if (range) setDateRange(range); - }} - future={false} - /> -
+ setPeriod(Number(v) as Period)}> + + 7D + 30D + 90D + +
{isError ? ( @@ -103,6 +125,42 @@ export function SalesMetrics() {
+ {data?.phaseBreakdown && data.phaseBreakdown.length > 0 && ( +
+

Revenue By Lifecycle Phase

+ +
+ {data.phaseBreakdown.map((p) => { + const cfg = PHASE_CONFIG[p.phase] || { label: p.phase, color: "#94A3B8" } + return ( + + +
0 ? 3 : 0, + }} + /> + + +
+
+ {cfg.label} + {p.percentage}% +
+
{formatCurrency(p.revenue)}
+
{p.units.toLocaleString()} units · {p.orders.toLocaleString()} orders
+ + + ) + })} +
+ +
+ )} +
{isLoading ? (
@@ -111,7 +169,7 @@ export function SalesMetrics() { ) : ( - [formatCurrency(Number(value)), "Revenue"]} + { + const cfg = PHASE_CONFIG[name] + return [formatCurrency(value), cfg?.label || name] + }} labelFormatter={(date) => format(new Date(date), 'MMM d, yyyy')} + itemSorter={(item) => -(item.value as number || 0)} /> - + {hasPhaseData ? ( + PHASE_KEYS.map((phase) => { + const cfg = PHASE_CONFIG[phase] + return ( + + ) + }) + ) : ( + + )} )} diff --git a/inventory/src/components/overview/StockMetrics.tsx b/inventory/src/components/overview/StockMetrics.tsx index 71c32ad..c09d0b9 100644 --- a/inventory/src/components/overview/StockMetrics.tsx +++ b/inventory/src/components/overview/StockMetrics.tsx @@ -3,8 +3,18 @@ import { CardHeader, CardTitle, CardContent } from "@/components/ui/card" import { PieChart, Pie, ResponsiveContainer, Cell, Sector } from "recharts" import config from "@/config" import { formatCurrency } from "@/utils/formatCurrency" -import { Package, Layers, DollarSign, ShoppingCart } from "lucide-react" +import { Package, PackageCheck, Layers, DollarSign, Tag } from "lucide-react" import { useState } from "react" +import { PHASE_CONFIG } from "@/utils/lifecyclePhases" + +interface PhaseStock { + phase: string + products: number + units: number + cost: number + retail: number + percentage: number +} interface StockMetricsData { totalProducts: number @@ -19,6 +29,7 @@ interface StockMetricsData { cost: number retail: number }[] + phaseStock?: PhaseStock[] } const COLORS = [ @@ -32,66 +43,54 @@ const COLORS = [ "#FF7C43", ] -const renderActiveShape = (props: any) => { - const { cx, cy, innerRadius, outerRadius, startAngle, endAngle, fill, brand, retail } = props; - - // Split brand name into words and create lines of max 12 chars - const words = brand.split(' '); +function wrapLabel(text: string, maxLen = 12): string[] { + const words = text.split(' '); const lines: string[] = []; - let currentLine = ''; - + let cur = ''; words.forEach((word: string) => { - if ((currentLine + ' ' + word).length <= 12) { - currentLine = currentLine ? `${currentLine} ${word}` : word; + if ((cur + ' ' + word).length <= maxLen) { + cur = cur ? `${cur} ${word}` : word; } else { - if (currentLine) lines.push(currentLine); - currentLine = word; + if (cur) lines.push(cur); + cur = word; } }); - if (currentLine) lines.push(currentLine); + if (cur) lines.push(cur); + return lines; +} + +const renderActiveShape = (props: any) => { + const { cx, cy, innerRadius, outerRadius, startAngle, endAngle, fill, brand, cost } = props; + const lines = wrapLabel(brand); return ( - - + + {lines.map((line, i) => ( - - {line} - + {line} ))} - - {formatCurrency(retail)} + + {formatCurrency(cost)} + + + ); +}; + +const renderPhaseActiveShape = (props: any) => { + const { cx, cy, innerRadius, outerRadius, startAngle, endAngle, fill, phase, cost } = props; + const cfg = PHASE_CONFIG[phase] || { label: phase }; + const lines = wrapLabel(cfg.label); + + return ( + + + + {lines.map((line, i) => ( + {line} + ))} + + {formatCurrency(cost)} ); @@ -103,6 +102,7 @@ function MetricSkeleton() { export function StockMetrics() { const [activeIndex, setActiveIndex] = useState(); + const [activePhaseIndex, setActivePhaseIndex] = useState(); const { data, isError, isLoading } = useQuery({ queryKey: ["stock-metrics"], @@ -122,49 +122,49 @@ export function StockMetrics() { {isError ? (

Failed to load stock metrics

) : ( -
-
-
-
-
- -

Products

+
+
+
+
+
+ +

Products

{isLoading || !data ? : (

{data.totalProducts.toLocaleString()}

)}
-
-
- -

Products In Stock

+
+
+ +

Products In Stock

{isLoading || !data ? : (

{data.productsInStock.toLocaleString()}

)}
-
-
- -

Stock Units

+
+
+ +

Stock Units

{isLoading || !data ? : (

{data.totalStockUnits.toLocaleString()}

)}
-
-
- -

Stock Cost

+
+
+ +

Stock Cost

{isLoading || !data ? : (

{formatCurrency(data.totalStockCost)}

)}
-
-
- -

Stock Retail

+
+
+ +

Stock Retail

{isLoading || !data ? : (

{formatCurrency(data.totalStockRetail)}

@@ -172,9 +172,9 @@ export function StockMetrics() {
-
-
-
Stock Retail By Brand
+
+
+
Stock Cost By Brand
{isLoading || !data ? (
@@ -185,7 +185,7 @@ export function StockMetrics() {
+
+
Stock Cost By Phase
+
+ {isLoading || !data?.phaseStock ? ( +
+
+
+ ) : ( + + + setActivePhaseIndex(index)} + onMouseLeave={() => setActivePhaseIndex(undefined)} + > + {data.phaseStock.map((entry) => { + const cfg = PHASE_CONFIG[entry.phase] || { color: "#94A3B8" } + return ( + + ) + })} + + + + )} +
+
)} diff --git a/inventory/src/components/overview/TopReplenishProducts.tsx b/inventory/src/components/overview/TopReplenishProducts.tsx index 7f3144f..585833f 100644 --- a/inventory/src/components/overview/TopReplenishProducts.tsx +++ b/inventory/src/components/overview/TopReplenishProducts.tsx @@ -46,7 +46,7 @@ export function TopReplenishProducts() { ) : isLoading ? ( ) : ( - +
diff --git a/inventory/src/components/products/ProductDetail.tsx b/inventory/src/components/products/ProductDetail.tsx index c0a405f..c9d0057 100644 --- a/inventory/src/components/products/ProductDetail.tsx +++ b/inventory/src/components/products/ProductDetail.tsx @@ -19,8 +19,9 @@ import { StatusBadge } from "@/components/products/StatusBadge"; import { transformMetricsRow } from "@/utils/transformUtils"; import { cn } from "@/lib/utils"; import config from "@/config"; -import { ResponsiveContainer, LineChart, Line, XAxis, YAxis, Tooltip, CartesianGrid, Legend } from "recharts"; +import { ResponsiveContainer, LineChart, Line, AreaChart, Area, XAxis, YAxis, Tooltip, CartesianGrid, Legend } from "recharts"; import { Badge } from "@/components/ui/badge"; +import { format } from "date-fns"; import { Table, TableHeader, TableRow, TableHead, TableBody, TableCell } from "@/components/ui/table"; // Interfaces for POs and time series data @@ -46,6 +47,26 @@ interface ProductTimeSeries { recentPurchases: ProductPurchaseOrder[]; } +interface ProductForecast { + phase: string | null; + method: string | null; + forecast: { + date: string; + units: number; + revenue: number; + confidenceLower: number; + confidenceUpper: number; + }[]; +} + +const PHASE_LABELS: Record = { + preorder: "Pre-order", + launch: "Launch", + decay: "Active Decay", + mature: "Evergreen", + dormant: "Dormant", +}; + interface ProductDetailProps { productId: number | null; onClose: () => void; @@ -109,6 +130,18 @@ export function ProductDetail({ productId, onClose }: ProductDetailProps) { enabled: !!productId, // Only run query when productId is truthy }); + // Fetch product forecast data + const { data: forecastData, isLoading: isLoadingForecast } = useQuery({ + queryKey: ["productForecast", productId], + queryFn: async () => { + if (!productId) throw new Error("Product ID is required"); + const response = await fetch(`${config.apiUrl}/products/${productId}/forecast`, {credentials: 'include'}); + if (!response.ok) throw new Error("Failed to fetch forecast"); + return response.json(); + }, + enabled: !!productId, + }); + // Get PO status display names (DB stores text statuses) const getPOStatusName = (status: string): string => { const statusMap: Record = { @@ -328,6 +361,72 @@ export function ProductDetail({ productId, onClose }: ProductDetailProps) { + {/* Forecast Chart */} + + + 90-Day Forecast + + {forecastData?.phase + ? `${PHASE_LABELS[forecastData.phase] || forecastData.phase} phase \u00b7 ${forecastData.method || 'unknown'} method` + : 'Lifecycle-aware demand forecast'} + + + + {isLoadingForecast ? ( +
+ +
+ ) : forecastData && forecastData.forecast.length > 0 ? ( + + + + format(new Date(d + 'T00:00:00'), 'MMM d')} + interval="preserveStartEnd" + tick={{ fontSize: 11 }} + /> + + + format(new Date(d + 'T00:00:00'), 'MMM d, yyyy')} + formatter={(value: number, name: string) => { + if (name === 'Revenue') return [formatCurrency(value), name]; + return [value.toFixed(1), name]; + }} + /> + + + + + + ) : ( +
+

No forecast data available for this product.

+
+ )} +
+
+ Sales Performance (30 Days) @@ -535,6 +634,8 @@ export function ProductDetail({ productId, onClose }: ProductDetailProps) { Forecasting + + diff --git a/inventory/src/pages/Overview.tsx b/inventory/src/pages/Overview.tsx index 2efe7da..c293119 100644 --- a/inventory/src/pages/Overview.tsx +++ b/inventory/src/pages/Overview.tsx @@ -18,11 +18,11 @@ export function Overview() { {/* First row - Stock and Purchase metrics */} -
- +
+ - +
diff --git a/inventory/src/utils/lifecyclePhases.ts b/inventory/src/utils/lifecyclePhases.ts new file mode 100644 index 0000000..3b495f3 --- /dev/null +++ b/inventory/src/utils/lifecyclePhases.ts @@ -0,0 +1,15 @@ +export const PHASE_CONFIG: Record = { + preorder: { label: "Pre-order", color: "#3B82F6" }, + launch: { label: "Launch", color: "#22C55E" }, + decay: { label: "Active", color: "#F59E0B" }, + mature: { label: "Evergreen", color: "#8B5CF6" }, + slow_mover: { label: "Slow Mover", color: "#14B8A6" }, + dormant: { label: "Dormant", color: "#6B7280" }, + unknown: { label: "Unclassified", color: "#94A3B8" }, +} + +/** Stacking order for phase area/bar charts (bottom to top) */ +export const PHASE_KEYS = ["mature", "slow_mover", "decay", "launch", "preorder", "dormant"] as const + +/** Same as PHASE_KEYS but includes the unknown bucket (for sales data where lifecycle_phase can be NULL) */ +export const PHASE_KEYS_WITH_UNKNOWN = ["mature", "slow_mover", "decay", "launch", "preorder", "dormant", "unknown"] as const diff --git a/inventory/tsconfig.tsbuildinfo b/inventory/tsconfig.tsbuildinfo index 78079bc..b07a951 100644 --- a/inventory/tsconfig.tsbuildinfo +++ b/inventory/tsconfig.tsbuildinfo @@ -1 +1 @@ -{"root":["./src/app.tsx","./src/config.ts","./src/main.tsx","./src/vite-env.d.ts","./src/components/config.ts","./src/components/analytics/agingsellthrough.tsx","./src/components/analytics/capitalefficiency.tsx","./src/components/analytics/discountimpact.tsx","./src/components/analytics/growthmomentum.tsx","./src/components/analytics/inventoryflow.tsx","./src/components/analytics/inventorytrends.tsx","./src/components/analytics/inventoryvaluetrend.tsx","./src/components/analytics/portfolioanalysis.tsx","./src/components/analytics/seasonalpatterns.tsx","./src/components/analytics/stockhealth.tsx","./src/components/analytics/stockoutrisk.tsx","./src/components/auth/firstaccessiblepage.tsx","./src/components/auth/protected.tsx","./src/components/auth/requireauth.tsx","./src/components/chat/chatroom.tsx","./src/components/chat/chattest.tsx","./src/components/chat/roomlist.tsx","./src/components/chat/searchresults.tsx","./src/components/dashboard/financialoverview.tsx","./src/components/dashboard/operationsmetrics.tsx","./src/components/dashboard/payrollmetrics.tsx","./src/components/dashboard/periodselectionpopover.tsx","./src/components/dashboard/shared/dashboardbadge.tsx","./src/components/dashboard/shared/dashboardcharttooltip.tsx","./src/components/dashboard/shared/dashboardsectionheader.tsx","./src/components/dashboard/shared/dashboardskeleton.tsx","./src/components/dashboard/shared/dashboardstatcard.tsx","./src/components/dashboard/shared/dashboardstatcardmini.tsx","./src/components/dashboard/shared/dashboardstates.tsx","./src/components/dashboard/shared/dashboardtable.tsx","./src/components/dashboard/shared/index.ts","./src/components/discount-simulator/configpanel.tsx","./src/components/discount-simulator/resultschart.tsx","./src/components/discount-simulator/resultstable.tsx","./src/components/discount-simulator/summarycard.tsx","./src/components/forecasting/daterangepickerquick.tsx","./src/components/forecasting/quickorderbuilder.tsx","./src/components/forecasting/columns.tsx","./src/components/layout/appsidebar.tsx","./src/components/layout/mainlayout.tsx","./src/components/layout/navuser.tsx","./src/components/newsletter/campaignhistorydialog.tsx","./src/components/newsletter/newsletterstats.tsx","./src/components/newsletter/recommendationtable.tsx","./src/components/overview/bestsellers.tsx","./src/components/overview/forecastmetrics.tsx","./src/components/overview/overstockmetrics.tsx","./src/components/overview/overview.tsx","./src/components/overview/purchasemetrics.tsx","./src/components/overview/replenishmentmetrics.tsx","./src/components/overview/salesmetrics.tsx","./src/components/overview/stockmetrics.tsx","./src/components/overview/topoverstockedproducts.tsx","./src/components/overview/topreplenishproducts.tsx","./src/components/overview/vendorperformance.tsx","./src/components/product-editor/comboboxfield.tsx","./src/components/product-editor/editablecomboboxfield.tsx","./src/components/product-editor/editableinput.tsx","./src/components/product-editor/editablemultiselect.tsx","./src/components/product-editor/imagemanager.tsx","./src/components/product-editor/producteditform.tsx","./src/components/product-editor/productsearch.tsx","./src/components/product-editor/types.ts","./src/components/product-import/createproductcategorydialog.tsx","./src/components/product-import/reactspreadsheetimport.tsx","./src/components/product-import/config.ts","./src/components/product-import/index.ts","./src/components/product-import/translationsrsiprops.ts","./src/components/product-import/types.ts","./src/components/product-import/components/closeconfirmationdialog.tsx","./src/components/product-import/components/modalwrapper.tsx","./src/components/product-import/components/providers.tsx","./src/components/product-import/components/savesessiondialog.tsx","./src/components/product-import/components/savedsessionslist.tsx","./src/components/product-import/components/table.tsx","./src/components/product-import/hooks/usersi.ts","./src/components/product-import/steps/steps.tsx","./src/components/product-import/steps/uploadflow.tsx","./src/components/product-import/steps/imageuploadstep/imageuploadstep.tsx","./src/components/product-import/steps/imageuploadstep/types.ts","./src/components/product-import/steps/imageuploadstep/components/droppablecontainer.tsx","./src/components/product-import/steps/imageuploadstep/components/genericdropzone.tsx","./src/components/product-import/steps/imageuploadstep/components/unassignedimagessection.tsx","./src/components/product-import/steps/imageuploadstep/components/productcard/copybutton.tsx","./src/components/product-import/steps/imageuploadstep/components/productcard/imagedropzone.tsx","./src/components/product-import/steps/imageuploadstep/components/productcard/productcard.tsx","./src/components/product-import/steps/imageuploadstep/components/productcard/sortableimage.tsx","./src/components/product-import/steps/imageuploadstep/components/unassignedimagessection/unassignedimageitem.tsx","./src/components/product-import/steps/imageuploadstep/hooks/usebulkimageupload.ts","./src/components/product-import/steps/imageuploadstep/hooks/usedraganddrop.ts","./src/components/product-import/steps/imageuploadstep/hooks/useproductimageoperations.ts","./src/components/product-import/steps/imageuploadstep/hooks/useproductimagesinit.ts","./src/components/product-import/steps/imageuploadstep/hooks/useurlimageupload.ts","./src/components/product-import/steps/matchcolumnsstep/matchcolumnsstep.tsx","./src/components/product-import/steps/matchcolumnsstep/types.ts","./src/components/product-import/steps/matchcolumnsstep/components/matchicon.tsx","./src/components/product-import/steps/matchcolumnsstep/components/templatecolumn.tsx","./src/components/product-import/steps/matchcolumnsstep/utils/findmatch.ts","./src/components/product-import/steps/matchcolumnsstep/utils/findunmatchedrequiredfields.ts","./src/components/product-import/steps/matchcolumnsstep/utils/getfieldoptions.ts","./src/components/product-import/steps/matchcolumnsstep/utils/getmatchedcolumns.ts","./src/components/product-import/steps/matchcolumnsstep/utils/normalizecheckboxvalue.ts","./src/components/product-import/steps/matchcolumnsstep/utils/normalizetabledata.ts","./src/components/product-import/steps/matchcolumnsstep/utils/setcolumn.ts","./src/components/product-import/steps/matchcolumnsstep/utils/setignorecolumn.ts","./src/components/product-import/steps/matchcolumnsstep/utils/setsubcolumn.ts","./src/components/product-import/steps/matchcolumnsstep/utils/uniqueentries.ts","./src/components/product-import/steps/selectheaderstep/selectheaderstep.tsx","./src/components/product-import/steps/selectheaderstep/components/selectheadertable.tsx","./src/components/product-import/steps/selectheaderstep/components/columns.tsx","./src/components/product-import/steps/selectsheetstep/selectsheetstep.tsx","./src/components/product-import/steps/uploadstep/uploadstep.tsx","./src/components/product-import/steps/uploadstep/components/dropzone.tsx","./src/components/product-import/steps/uploadstep/components/columns.tsx","./src/components/product-import/steps/uploadstep/utils/readfilesasync.ts","./src/components/product-import/steps/validationstep/index.tsx","./src/components/product-import/steps/validationstep/components/aisuggestionbadge.tsx","./src/components/product-import/steps/validationstep/components/copydownbanner.tsx","./src/components/product-import/steps/validationstep/components/floatingselectionbar.tsx","./src/components/product-import/steps/validationstep/components/initializingoverlay.tsx","./src/components/product-import/steps/validationstep/components/searchabletemplateselect.tsx","./src/components/product-import/steps/validationstep/components/suggestionbadges.tsx","./src/components/product-import/steps/validationstep/components/validationcontainer.tsx","./src/components/product-import/steps/validationstep/components/validationfooter.tsx","./src/components/product-import/steps/validationstep/components/validationtable.tsx","./src/components/product-import/steps/validationstep/components/validationtoolbar.tsx","./src/components/product-import/steps/validationstep/components/cells/checkboxcell.tsx","./src/components/product-import/steps/validationstep/components/cells/comboboxcell.tsx","./src/components/product-import/steps/validationstep/components/cells/inputcell.tsx","./src/components/product-import/steps/validationstep/components/cells/multiselectcell.tsx","./src/components/product-import/steps/validationstep/components/cells/multilineinput.tsx","./src/components/product-import/steps/validationstep/components/cells/selectcell.tsx","./src/components/product-import/steps/validationstep/contexts/aisuggestionscontext.tsx","./src/components/product-import/steps/validationstep/dialogs/aidebugdialog.tsx","./src/components/product-import/steps/validationstep/dialogs/aivalidationprogress.tsx","./src/components/product-import/steps/validationstep/dialogs/aivalidationresults.tsx","./src/components/product-import/steps/validationstep/dialogs/sanitycheckdialog.tsx","./src/components/product-import/steps/validationstep/hooks/useautoinlineaivalidation.ts","./src/components/product-import/steps/validationstep/hooks/usecopydownvalidation.ts","./src/components/product-import/steps/validationstep/hooks/usefieldoptions.ts","./src/components/product-import/steps/validationstep/hooks/useinlineaivalidation.ts","./src/components/product-import/steps/validationstep/hooks/useproductlines.ts","./src/components/product-import/steps/validationstep/hooks/usesanitycheck.ts","./src/components/product-import/steps/validationstep/hooks/usetemplatemanagement.ts","./src/components/product-import/steps/validationstep/hooks/useupcvalidation.ts","./src/components/product-import/steps/validationstep/hooks/usevalidationactions.ts","./src/components/product-import/steps/validationstep/hooks/useaivalidation/index.ts","./src/components/product-import/steps/validationstep/hooks/useaivalidation/useaiapi.ts","./src/components/product-import/steps/validationstep/hooks/useaivalidation/useaiprogress.ts","./src/components/product-import/steps/validationstep/hooks/useaivalidation/useaitransform.ts","./src/components/product-import/steps/validationstep/store/selectors.ts","./src/components/product-import/steps/validationstep/store/types.ts","./src/components/product-import/steps/validationstep/store/validationstore.ts","./src/components/product-import/steps/validationstep/utils/aivalidationutils.ts","./src/components/product-import/steps/validationstep/utils/countryutils.ts","./src/components/product-import/steps/validationstep/utils/datamutations.ts","./src/components/product-import/steps/validationstep/utils/inlineaipayload.ts","./src/components/product-import/steps/validationstep/utils/priceutils.ts","./src/components/product-import/steps/validationstep/utils/upcutils.ts","./src/components/product-import/steps/validationstepold/index.tsx","./src/components/product-import/steps/validationstepold/types.ts","./src/components/product-import/steps/validationstepold/components/aivalidationdialogs.tsx","./src/components/product-import/steps/validationstepold/components/basecellcontent.tsx","./src/components/product-import/steps/validationstepold/components/initializingvalidation.tsx","./src/components/product-import/steps/validationstepold/components/searchabletemplateselect.tsx","./src/components/product-import/steps/validationstepold/components/upcvalidationtableadapter.tsx","./src/components/product-import/steps/validationstepold/components/validationcell.tsx","./src/components/product-import/steps/validationstepold/components/validationcontainer.tsx","./src/components/product-import/steps/validationstepold/components/validationtable.tsx","./src/components/product-import/steps/validationstepold/components/cells/checkboxcell.tsx","./src/components/product-import/steps/validationstepold/components/cells/inputcell.tsx","./src/components/product-import/steps/validationstepold/components/cells/multiselectcell.tsx","./src/components/product-import/steps/validationstepold/components/cells/multilineinput.tsx","./src/components/product-import/steps/validationstepold/components/cells/selectcell.tsx","./src/components/product-import/steps/validationstepold/hooks/useaivalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/usefieldvalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/usefiltermanagement.tsx","./src/components/product-import/steps/validationstepold/hooks/useinitialvalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/useproductlinesfetching.tsx","./src/components/product-import/steps/validationstepold/hooks/userowoperations.tsx","./src/components/product-import/steps/validationstepold/hooks/usetemplatemanagement.tsx","./src/components/product-import/steps/validationstepold/hooks/useuniqueitemnumbersvalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/useuniquevalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/useupcvalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/usevalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/usevalidationstate.tsx","./src/components/product-import/steps/validationstepold/hooks/validationtypes.ts","./src/components/product-import/steps/validationstepold/types/index.ts","./src/components/product-import/steps/validationstepold/utils/aivalidationutils.ts","./src/components/product-import/steps/validationstepold/utils/countryutils.ts","./src/components/product-import/steps/validationstepold/utils/datamutations.ts","./src/components/product-import/steps/validationstepold/utils/priceutils.ts","./src/components/product-import/steps/validationstepold/utils/upcutils.ts","./src/components/product-import/utils/exceedsmaxrecords.ts","./src/components/product-import/utils/mapdata.ts","./src/components/product-import/utils/mapworkbook.ts","./src/components/product-import/utils/steps.ts","./src/components/products/productdetail.tsx","./src/components/products/productfilters.tsx","./src/components/products/productsummarycards.tsx","./src/components/products/producttable.tsx","./src/components/products/producttableskeleton.tsx","./src/components/products/productviews.tsx","./src/components/products/statusbadge.tsx","./src/components/products/columndefinitions.ts","./src/components/purchase-orders/categorymetricscard.tsx","./src/components/purchase-orders/filtercontrols.tsx","./src/components/purchase-orders/ordermetricscard.tsx","./src/components/purchase-orders/paginationcontrols.tsx","./src/components/purchase-orders/pipelinecard.tsx","./src/components/purchase-orders/purchaseorderaccordion.tsx","./src/components/purchase-orders/purchaseorderstable.tsx","./src/components/purchase-orders/vendormetricscard.tsx","./src/components/settings/datamanagement.tsx","./src/components/settings/globalsettings.tsx","./src/components/settings/permissionselector.tsx","./src/components/settings/productsettings.tsx","./src/components/settings/promptmanagement.tsx","./src/components/settings/reusableimagemanagement.tsx","./src/components/settings/templatemanagement.tsx","./src/components/settings/userform.tsx","./src/components/settings/userlist.tsx","./src/components/settings/usermanagement.tsx","./src/components/settings/vendorsettings.tsx","./src/components/templates/searchproducttemplatedialog.tsx","./src/components/templates/templateform.tsx","./src/components/ui/accordion.tsx","./src/components/ui/alert-dialog.tsx","./src/components/ui/alert.tsx","./src/components/ui/avatar.tsx","./src/components/ui/badge.tsx","./src/components/ui/button.tsx","./src/components/ui/calendar.tsx","./src/components/ui/card.tsx","./src/components/ui/carousel.tsx","./src/components/ui/checkbox.tsx","./src/components/ui/code.tsx","./src/components/ui/collapsible.tsx","./src/components/ui/command.tsx","./src/components/ui/date-range-picker-narrow.tsx","./src/components/ui/date-range-picker.tsx","./src/components/ui/dialog.tsx","./src/components/ui/drawer.tsx","./src/components/ui/dropdown-menu.tsx","./src/components/ui/form.tsx","./src/components/ui/input.tsx","./src/components/ui/label.tsx","./src/components/ui/page-loading.tsx","./src/components/ui/pagination.tsx","./src/components/ui/popover.tsx","./src/components/ui/progress.tsx","./src/components/ui/radio-group.tsx","./src/components/ui/scroll-area.tsx","./src/components/ui/select.tsx","./src/components/ui/separator.tsx","./src/components/ui/sheet.tsx","./src/components/ui/sidebar.tsx","./src/components/ui/skeleton.tsx","./src/components/ui/sonner.tsx","./src/components/ui/switch.tsx","./src/components/ui/table.tsx","./src/components/ui/tabs.tsx","./src/components/ui/textarea.tsx","./src/components/ui/toast.tsx","./src/components/ui/toaster.tsx","./src/components/ui/toggle-group.tsx","./src/components/ui/toggle.tsx","./src/components/ui/tooltip.tsx","./src/config/dashboard.ts","./src/contexts/authcontext.tsx","./src/contexts/dashboardscrollcontext.tsx","./src/contexts/importsessioncontext.tsx","./src/hooks/use-mobile.tsx","./src/hooks/use-toast.ts","./src/hooks/usedebounce.ts","./src/hooks/useimportautosave.ts","./src/lib/utils.ts","./src/lib/dashboard/chartconfig.ts","./src/lib/dashboard/designtokens.ts","./src/pages/analytics.tsx","./src/pages/blackfridaydashboard.tsx","./src/pages/brands.tsx","./src/pages/categories.tsx","./src/pages/chat.tsx","./src/pages/dashboard.tsx","./src/pages/discountsimulator.tsx","./src/pages/forecasting.tsx","./src/pages/htslookup.tsx","./src/pages/import.tsx","./src/pages/login.tsx","./src/pages/newsletter.tsx","./src/pages/overview.tsx","./src/pages/producteditor.tsx","./src/pages/products.tsx","./src/pages/purchaseorders.tsx","./src/pages/settings.tsx","./src/pages/smalldashboard.tsx","./src/pages/vendors.tsx","./src/services/apiv2.ts","./src/services/importsessionapi.ts","./src/services/producteditor.ts","./src/types/dashboard-shims.d.ts","./src/types/dashboard.d.ts","./src/types/discount-simulator.ts","./src/types/globals.d.ts","./src/types/importsession.ts","./src/types/products.ts","./src/types/react-data-grid.d.ts","./src/types/status-codes.ts","./src/utils/emojiutils.ts","./src/utils/formatcurrency.ts","./src/utils/naturallanguageperiod.ts","./src/utils/productutils.ts","./src/utils/transformutils.ts"],"version":"5.6.3"} \ No newline at end of file +{"root":["./src/app.tsx","./src/config.ts","./src/main.tsx","./src/vite-env.d.ts","./src/components/config.ts","./src/components/analytics/agingsellthrough.tsx","./src/components/analytics/capitalefficiency.tsx","./src/components/analytics/discountimpact.tsx","./src/components/analytics/growthmomentum.tsx","./src/components/analytics/inventoryflow.tsx","./src/components/analytics/inventorytrends.tsx","./src/components/analytics/inventoryvaluetrend.tsx","./src/components/analytics/portfolioanalysis.tsx","./src/components/analytics/seasonalpatterns.tsx","./src/components/analytics/stockhealth.tsx","./src/components/analytics/stockoutrisk.tsx","./src/components/auth/firstaccessiblepage.tsx","./src/components/auth/protected.tsx","./src/components/auth/requireauth.tsx","./src/components/chat/chatroom.tsx","./src/components/chat/chattest.tsx","./src/components/chat/roomlist.tsx","./src/components/chat/searchresults.tsx","./src/components/dashboard/financialoverview.tsx","./src/components/dashboard/operationsmetrics.tsx","./src/components/dashboard/payrollmetrics.tsx","./src/components/dashboard/periodselectionpopover.tsx","./src/components/dashboard/shared/dashboardbadge.tsx","./src/components/dashboard/shared/dashboardcharttooltip.tsx","./src/components/dashboard/shared/dashboardsectionheader.tsx","./src/components/dashboard/shared/dashboardskeleton.tsx","./src/components/dashboard/shared/dashboardstatcard.tsx","./src/components/dashboard/shared/dashboardstatcardmini.tsx","./src/components/dashboard/shared/dashboardstates.tsx","./src/components/dashboard/shared/dashboardtable.tsx","./src/components/dashboard/shared/index.ts","./src/components/discount-simulator/configpanel.tsx","./src/components/discount-simulator/resultschart.tsx","./src/components/discount-simulator/resultstable.tsx","./src/components/discount-simulator/summarycard.tsx","./src/components/forecasting/daterangepickerquick.tsx","./src/components/forecasting/quickorderbuilder.tsx","./src/components/forecasting/columns.tsx","./src/components/layout/appsidebar.tsx","./src/components/layout/mainlayout.tsx","./src/components/layout/navuser.tsx","./src/components/newsletter/campaignhistorydialog.tsx","./src/components/newsletter/newsletterstats.tsx","./src/components/newsletter/recommendationtable.tsx","./src/components/overview/bestsellers.tsx","./src/components/overview/forecastaccuracy.tsx","./src/components/overview/forecastmetrics.tsx","./src/components/overview/overstockmetrics.tsx","./src/components/overview/purchasemetrics.tsx","./src/components/overview/replenishmentmetrics.tsx","./src/components/overview/salesmetrics.tsx","./src/components/overview/stockmetrics.tsx","./src/components/overview/topoverstockedproducts.tsx","./src/components/overview/topreplenishproducts.tsx","./src/components/product-editor/comboboxfield.tsx","./src/components/product-editor/editablecomboboxfield.tsx","./src/components/product-editor/editableinput.tsx","./src/components/product-editor/editablemultiselect.tsx","./src/components/product-editor/imagemanager.tsx","./src/components/product-editor/producteditform.tsx","./src/components/product-editor/productsearch.tsx","./src/components/product-editor/types.ts","./src/components/product-import/createproductcategorydialog.tsx","./src/components/product-import/reactspreadsheetimport.tsx","./src/components/product-import/config.ts","./src/components/product-import/index.ts","./src/components/product-import/translationsrsiprops.ts","./src/components/product-import/types.ts","./src/components/product-import/components/closeconfirmationdialog.tsx","./src/components/product-import/components/modalwrapper.tsx","./src/components/product-import/components/providers.tsx","./src/components/product-import/components/savesessiondialog.tsx","./src/components/product-import/components/savedsessionslist.tsx","./src/components/product-import/components/table.tsx","./src/components/product-import/hooks/usersi.ts","./src/components/product-import/steps/steps.tsx","./src/components/product-import/steps/uploadflow.tsx","./src/components/product-import/steps/imageuploadstep/imageuploadstep.tsx","./src/components/product-import/steps/imageuploadstep/types.ts","./src/components/product-import/steps/imageuploadstep/components/droppablecontainer.tsx","./src/components/product-import/steps/imageuploadstep/components/genericdropzone.tsx","./src/components/product-import/steps/imageuploadstep/components/unassignedimagessection.tsx","./src/components/product-import/steps/imageuploadstep/components/productcard/copybutton.tsx","./src/components/product-import/steps/imageuploadstep/components/productcard/imagedropzone.tsx","./src/components/product-import/steps/imageuploadstep/components/productcard/productcard.tsx","./src/components/product-import/steps/imageuploadstep/components/productcard/sortableimage.tsx","./src/components/product-import/steps/imageuploadstep/components/unassignedimagessection/unassignedimageitem.tsx","./src/components/product-import/steps/imageuploadstep/hooks/usebulkimageupload.ts","./src/components/product-import/steps/imageuploadstep/hooks/usedraganddrop.ts","./src/components/product-import/steps/imageuploadstep/hooks/useproductimageoperations.ts","./src/components/product-import/steps/imageuploadstep/hooks/useproductimagesinit.ts","./src/components/product-import/steps/imageuploadstep/hooks/useurlimageupload.ts","./src/components/product-import/steps/matchcolumnsstep/matchcolumnsstep.tsx","./src/components/product-import/steps/matchcolumnsstep/types.ts","./src/components/product-import/steps/matchcolumnsstep/components/matchicon.tsx","./src/components/product-import/steps/matchcolumnsstep/components/templatecolumn.tsx","./src/components/product-import/steps/matchcolumnsstep/utils/findmatch.ts","./src/components/product-import/steps/matchcolumnsstep/utils/findunmatchedrequiredfields.ts","./src/components/product-import/steps/matchcolumnsstep/utils/getfieldoptions.ts","./src/components/product-import/steps/matchcolumnsstep/utils/getmatchedcolumns.ts","./src/components/product-import/steps/matchcolumnsstep/utils/normalizecheckboxvalue.ts","./src/components/product-import/steps/matchcolumnsstep/utils/normalizetabledata.ts","./src/components/product-import/steps/matchcolumnsstep/utils/setcolumn.ts","./src/components/product-import/steps/matchcolumnsstep/utils/setignorecolumn.ts","./src/components/product-import/steps/matchcolumnsstep/utils/setsubcolumn.ts","./src/components/product-import/steps/matchcolumnsstep/utils/uniqueentries.ts","./src/components/product-import/steps/selectheaderstep/selectheaderstep.tsx","./src/components/product-import/steps/selectheaderstep/components/selectheadertable.tsx","./src/components/product-import/steps/selectheaderstep/components/columns.tsx","./src/components/product-import/steps/selectsheetstep/selectsheetstep.tsx","./src/components/product-import/steps/uploadstep/uploadstep.tsx","./src/components/product-import/steps/uploadstep/components/dropzone.tsx","./src/components/product-import/steps/uploadstep/components/columns.tsx","./src/components/product-import/steps/uploadstep/utils/readfilesasync.ts","./src/components/product-import/steps/validationstep/index.tsx","./src/components/product-import/steps/validationstep/components/aisuggestionbadge.tsx","./src/components/product-import/steps/validationstep/components/copydownbanner.tsx","./src/components/product-import/steps/validationstep/components/floatingselectionbar.tsx","./src/components/product-import/steps/validationstep/components/initializingoverlay.tsx","./src/components/product-import/steps/validationstep/components/searchabletemplateselect.tsx","./src/components/product-import/steps/validationstep/components/suggestionbadges.tsx","./src/components/product-import/steps/validationstep/components/validationcontainer.tsx","./src/components/product-import/steps/validationstep/components/validationfooter.tsx","./src/components/product-import/steps/validationstep/components/validationtable.tsx","./src/components/product-import/steps/validationstep/components/validationtoolbar.tsx","./src/components/product-import/steps/validationstep/components/cells/checkboxcell.tsx","./src/components/product-import/steps/validationstep/components/cells/comboboxcell.tsx","./src/components/product-import/steps/validationstep/components/cells/inputcell.tsx","./src/components/product-import/steps/validationstep/components/cells/multiselectcell.tsx","./src/components/product-import/steps/validationstep/components/cells/multilineinput.tsx","./src/components/product-import/steps/validationstep/components/cells/selectcell.tsx","./src/components/product-import/steps/validationstep/contexts/aisuggestionscontext.tsx","./src/components/product-import/steps/validationstep/dialogs/aidebugdialog.tsx","./src/components/product-import/steps/validationstep/dialogs/aivalidationprogress.tsx","./src/components/product-import/steps/validationstep/dialogs/aivalidationresults.tsx","./src/components/product-import/steps/validationstep/dialogs/sanitycheckdialog.tsx","./src/components/product-import/steps/validationstep/hooks/useautoinlineaivalidation.ts","./src/components/product-import/steps/validationstep/hooks/usecopydownvalidation.ts","./src/components/product-import/steps/validationstep/hooks/usefieldoptions.ts","./src/components/product-import/steps/validationstep/hooks/useinlineaivalidation.ts","./src/components/product-import/steps/validationstep/hooks/useproductlines.ts","./src/components/product-import/steps/validationstep/hooks/usesanitycheck.ts","./src/components/product-import/steps/validationstep/hooks/usetemplatemanagement.ts","./src/components/product-import/steps/validationstep/hooks/useupcvalidation.ts","./src/components/product-import/steps/validationstep/hooks/usevalidationactions.ts","./src/components/product-import/steps/validationstep/hooks/useaivalidation/index.ts","./src/components/product-import/steps/validationstep/hooks/useaivalidation/useaiapi.ts","./src/components/product-import/steps/validationstep/hooks/useaivalidation/useaiprogress.ts","./src/components/product-import/steps/validationstep/hooks/useaivalidation/useaitransform.ts","./src/components/product-import/steps/validationstep/store/selectors.ts","./src/components/product-import/steps/validationstep/store/types.ts","./src/components/product-import/steps/validationstep/store/validationstore.ts","./src/components/product-import/steps/validationstep/utils/aivalidationutils.ts","./src/components/product-import/steps/validationstep/utils/countryutils.ts","./src/components/product-import/steps/validationstep/utils/datamutations.ts","./src/components/product-import/steps/validationstep/utils/inlineaipayload.ts","./src/components/product-import/steps/validationstep/utils/priceutils.ts","./src/components/product-import/steps/validationstep/utils/upcutils.ts","./src/components/product-import/steps/validationstepold/index.tsx","./src/components/product-import/steps/validationstepold/types.ts","./src/components/product-import/steps/validationstepold/components/aivalidationdialogs.tsx","./src/components/product-import/steps/validationstepold/components/basecellcontent.tsx","./src/components/product-import/steps/validationstepold/components/initializingvalidation.tsx","./src/components/product-import/steps/validationstepold/components/searchabletemplateselect.tsx","./src/components/product-import/steps/validationstepold/components/upcvalidationtableadapter.tsx","./src/components/product-import/steps/validationstepold/components/validationcell.tsx","./src/components/product-import/steps/validationstepold/components/validationcontainer.tsx","./src/components/product-import/steps/validationstepold/components/validationtable.tsx","./src/components/product-import/steps/validationstepold/components/cells/checkboxcell.tsx","./src/components/product-import/steps/validationstepold/components/cells/inputcell.tsx","./src/components/product-import/steps/validationstepold/components/cells/multiselectcell.tsx","./src/components/product-import/steps/validationstepold/components/cells/multilineinput.tsx","./src/components/product-import/steps/validationstepold/components/cells/selectcell.tsx","./src/components/product-import/steps/validationstepold/hooks/useaivalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/usefieldvalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/usefiltermanagement.tsx","./src/components/product-import/steps/validationstepold/hooks/useinitialvalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/useproductlinesfetching.tsx","./src/components/product-import/steps/validationstepold/hooks/userowoperations.tsx","./src/components/product-import/steps/validationstepold/hooks/usetemplatemanagement.tsx","./src/components/product-import/steps/validationstepold/hooks/useuniqueitemnumbersvalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/useuniquevalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/useupcvalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/usevalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/usevalidationstate.tsx","./src/components/product-import/steps/validationstepold/hooks/validationtypes.ts","./src/components/product-import/steps/validationstepold/types/index.ts","./src/components/product-import/steps/validationstepold/utils/aivalidationutils.ts","./src/components/product-import/steps/validationstepold/utils/countryutils.ts","./src/components/product-import/steps/validationstepold/utils/datamutations.ts","./src/components/product-import/steps/validationstepold/utils/priceutils.ts","./src/components/product-import/steps/validationstepold/utils/upcutils.ts","./src/components/product-import/utils/exceedsmaxrecords.ts","./src/components/product-import/utils/mapdata.ts","./src/components/product-import/utils/mapworkbook.ts","./src/components/product-import/utils/steps.ts","./src/components/products/productdetail.tsx","./src/components/products/productfilters.tsx","./src/components/products/productsummarycards.tsx","./src/components/products/producttable.tsx","./src/components/products/producttableskeleton.tsx","./src/components/products/productviews.tsx","./src/components/products/statusbadge.tsx","./src/components/products/columndefinitions.ts","./src/components/purchase-orders/categorymetricscard.tsx","./src/components/purchase-orders/filtercontrols.tsx","./src/components/purchase-orders/ordermetricscard.tsx","./src/components/purchase-orders/paginationcontrols.tsx","./src/components/purchase-orders/pipelinecard.tsx","./src/components/purchase-orders/purchaseorderaccordion.tsx","./src/components/purchase-orders/purchaseorderstable.tsx","./src/components/purchase-orders/vendormetricscard.tsx","./src/components/settings/datamanagement.tsx","./src/components/settings/globalsettings.tsx","./src/components/settings/permissionselector.tsx","./src/components/settings/productsettings.tsx","./src/components/settings/promptmanagement.tsx","./src/components/settings/reusableimagemanagement.tsx","./src/components/settings/templatemanagement.tsx","./src/components/settings/userform.tsx","./src/components/settings/userlist.tsx","./src/components/settings/usermanagement.tsx","./src/components/settings/vendorsettings.tsx","./src/components/templates/searchproducttemplatedialog.tsx","./src/components/templates/templateform.tsx","./src/components/ui/accordion.tsx","./src/components/ui/alert-dialog.tsx","./src/components/ui/alert.tsx","./src/components/ui/avatar.tsx","./src/components/ui/badge.tsx","./src/components/ui/button.tsx","./src/components/ui/calendar.tsx","./src/components/ui/card.tsx","./src/components/ui/carousel.tsx","./src/components/ui/checkbox.tsx","./src/components/ui/code.tsx","./src/components/ui/collapsible.tsx","./src/components/ui/command.tsx","./src/components/ui/date-range-picker-narrow.tsx","./src/components/ui/date-range-picker.tsx","./src/components/ui/dialog.tsx","./src/components/ui/drawer.tsx","./src/components/ui/dropdown-menu.tsx","./src/components/ui/form.tsx","./src/components/ui/input.tsx","./src/components/ui/label.tsx","./src/components/ui/page-loading.tsx","./src/components/ui/pagination.tsx","./src/components/ui/popover.tsx","./src/components/ui/progress.tsx","./src/components/ui/radio-group.tsx","./src/components/ui/scroll-area.tsx","./src/components/ui/select.tsx","./src/components/ui/separator.tsx","./src/components/ui/sheet.tsx","./src/components/ui/sidebar.tsx","./src/components/ui/skeleton.tsx","./src/components/ui/sonner.tsx","./src/components/ui/switch.tsx","./src/components/ui/table.tsx","./src/components/ui/tabs.tsx","./src/components/ui/textarea.tsx","./src/components/ui/toast.tsx","./src/components/ui/toaster.tsx","./src/components/ui/toggle-group.tsx","./src/components/ui/toggle.tsx","./src/components/ui/tooltip.tsx","./src/config/dashboard.ts","./src/contexts/authcontext.tsx","./src/contexts/dashboardscrollcontext.tsx","./src/contexts/importsessioncontext.tsx","./src/hooks/use-mobile.tsx","./src/hooks/use-toast.ts","./src/hooks/usedebounce.ts","./src/hooks/useimportautosave.ts","./src/lib/utils.ts","./src/lib/dashboard/chartconfig.ts","./src/lib/dashboard/designtokens.ts","./src/pages/analytics.tsx","./src/pages/blackfridaydashboard.tsx","./src/pages/brands.tsx","./src/pages/categories.tsx","./src/pages/chat.tsx","./src/pages/dashboard.tsx","./src/pages/discountsimulator.tsx","./src/pages/forecasting.tsx","./src/pages/htslookup.tsx","./src/pages/import.tsx","./src/pages/login.tsx","./src/pages/newsletter.tsx","./src/pages/overview.tsx","./src/pages/producteditor.tsx","./src/pages/products.tsx","./src/pages/purchaseorders.tsx","./src/pages/settings.tsx","./src/pages/smalldashboard.tsx","./src/pages/vendors.tsx","./src/services/apiv2.ts","./src/services/importsessionapi.ts","./src/services/producteditor.ts","./src/types/dashboard-shims.d.ts","./src/types/dashboard.d.ts","./src/types/discount-simulator.ts","./src/types/globals.d.ts","./src/types/importsession.ts","./src/types/products.ts","./src/types/react-data-grid.d.ts","./src/types/status-codes.ts","./src/utils/emojiutils.ts","./src/utils/formatcurrency.ts","./src/utils/lifecyclephases.ts","./src/utils/naturallanguageperiod.ts","./src/utils/productutils.ts","./src/utils/transformutils.ts"],"version":"5.6.3"} \ No newline at end of file