diff --git a/.gitignore b/.gitignore index 2c98b09..df5a263 100644 --- a/.gitignore +++ b/.gitignore @@ -80,3 +80,8 @@ chat-migration*/ **/chat-migration*/ chat-migration*/** **/chat-migration*/** + +venv/ +venv/** +**/venv/* +**/venv/** \ No newline at end of file diff --git a/inventory-server/scripts/calculate-metrics-new.js b/inventory-server/scripts/calculate-metrics-new.js index c0dd718..4920586 100644 --- a/inventory-server/scripts/calculate-metrics-new.js +++ b/inventory-server/scripts/calculate-metrics-new.js @@ -11,6 +11,7 @@ const RUN_PERIODIC_METRICS = true; const RUN_BRAND_METRICS = true; const RUN_VENDOR_METRICS = true; const RUN_CATEGORY_METRICS = true; +const RUN_LIFECYCLE_FORECASTS = true; // Maximum execution time for the entire sequence (e.g., 90 minutes) const MAX_EXECUTION_TIME_TOTAL = 90 * 60 * 1000; @@ -592,6 +593,13 @@ async function runAllCalculations() { historyType: 'product_metrics', statusModule: 'product_metrics' }, + { + run: RUN_LIFECYCLE_FORECASTS, + name: 'Lifecycle Forecast Update', + sqlFile: 'metrics-new/update_lifecycle_forecasts.sql', + historyType: 'lifecycle_forecasts', + statusModule: 'lifecycle_forecasts' + }, { run: RUN_PERIODIC_METRICS, name: 'Periodic Metrics Update', diff --git a/inventory-server/scripts/forecast/__pycache__/forecast_engine.cpython-314.pyc b/inventory-server/scripts/forecast/__pycache__/forecast_engine.cpython-314.pyc new file mode 100644 index 0000000..419abe2 Binary files /dev/null and b/inventory-server/scripts/forecast/__pycache__/forecast_engine.cpython-314.pyc differ diff --git a/inventory-server/scripts/forecast/forecast_engine.py b/inventory-server/scripts/forecast/forecast_engine.py new file mode 100644 index 0000000..52b6b77 --- /dev/null +++ b/inventory-server/scripts/forecast/forecast_engine.py @@ -0,0 +1,1612 @@ +""" +Lifecycle-Aware Forecast Engine + +Generates 90-day per-product daily sales forecasts using analogous lifecycle +curves learned from historical brand/category launch patterns. + +Usage: + python forecast_engine.py + python forecast_engine.py --backfill 30 + +Environment variables (from .env): + DB_HOST, DB_USER, DB_PASSWORD, DB_NAME, DB_PORT (default 5432) +""" + +import os +import sys +import json +import time +import logging +from datetime import datetime, date, timedelta + +import numpy as np +import pandas as pd +import psycopg2 +import psycopg2.extras +import psycopg2.extensions +from scipy.optimize import curve_fit +from statsmodels.tsa.holtwinters import SimpleExpSmoothing, Holt + +# Register numpy type adapters so psycopg2 can serialize them to SQL +psycopg2.extensions.register_adapter(np.float64, lambda x: psycopg2.extensions.AsIs(float(x))) +psycopg2.extensions.register_adapter(np.float32, lambda x: psycopg2.extensions.AsIs(float(x))) +psycopg2.extensions.register_adapter(np.int64, lambda x: psycopg2.extensions.AsIs(int(x))) +psycopg2.extensions.register_adapter(np.int32, lambda x: psycopg2.extensions.AsIs(int(x))) + +# --------------------------------------------------------------------------- +# Config +# --------------------------------------------------------------------------- +FORECAST_HORIZON_DAYS = 90 +CURVE_HISTORY_DAYS = 730 # 2 years of launches to build reference curves +CURVE_WINDOW_WEEKS = 13 # Track decay for 13 weeks (91 days) +MIN_PRODUCTS_FOR_CURVE = 5 # Minimum launches to fit a brand curve +MIN_PRODUCTS_FOR_BRAND_CAT = 10 # Minimum for brand x category curve +MATURE_VELOCITY_THRESHOLD = 0.1 # units/day to qualify as "mature" vs "dormant" +MATURE_AGE_DAYS = 60 # days since first_received to be considered mature +LAUNCH_AGE_DAYS = 14 # days in "launch" phase +DECAY_AGE_DAYS = 60 # days in "active decay" phase +EXP_SMOOTHING_WINDOW = 60 # days of history for mature product smoothing +BATCH_SIZE = 1000 # rows per INSERT batch +DOW_LOOKBACK_DAYS = 90 # days of order history for day-of-week indices +MIN_R_SQUARED = 0.1 # curves below this are unreliable (fall back to velocity) +SEASONAL_LOOKBACK_DAYS = 365 # 12 months of order history for monthly seasonal indices +MIN_PREORDER_DAYS = 3 # minimum pre-order accumulation days for reliable scaling + +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s [%(levelname)s] %(message)s', + datefmt='%H:%M:%S' +) +log = logging.getLogger('forecast') + + +# --------------------------------------------------------------------------- +# Database helpers +# --------------------------------------------------------------------------- +def get_connection(): + """Create a PostgreSQL connection from environment variables.""" + return psycopg2.connect( + host=os.environ.get('DB_HOST', 'localhost'), + user=os.environ.get('DB_USER', 'inventory_user'), + password=os.environ.get('DB_PASSWORD', ''), + dbname=os.environ.get('DB_NAME', 'inventory_db'), + port=int(os.environ.get('DB_PORT', 5432)), + ) + + +def execute_query(conn, sql, params=None): + """Execute a query and return a DataFrame.""" + import warnings + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", message=".*pandas only supports SQLAlchemy.*") + return pd.read_sql_query(sql, conn, params=params) + + +def cleanup_stale_runs(conn): + """Mark any runs stuck in 'running' status as failed (e.g. from prior crashes).""" + with conn.cursor() as cur: + cur.execute(""" + UPDATE forecast_runs + SET status = 'failed', finished_at = NOW(), + error_message = 'Stale run cleaned up on engine restart' + WHERE status = 'running' + AND started_at < NOW() - INTERVAL '1 hour' + """) + cleaned = cur.rowcount + conn.commit() + if cleaned > 0: + log.info(f"Cleaned up {cleaned} stale 'running' forecast run(s)") + + +# --------------------------------------------------------------------------- +# Decay curve model: sales(t) = A * exp(-λt) + C +# --------------------------------------------------------------------------- +def decay_model(t, amplitude, decay_rate, baseline): + """Parametric exponential decay with baseline.""" + return amplitude * np.exp(-decay_rate * t) + baseline + + +def fit_decay_curve(weekly_medians): + """ + Fit the decay model to median weekly sales data. + + Args: + weekly_medians: array of median sales per week (index = week number) + + Returns: + (amplitude, decay_rate, baseline, r_squared) or None if fit fails + """ + weeks = np.arange(len(weekly_medians), dtype=float) + y = np.array(weekly_medians, dtype=float) + + # Skip if all zeros or too few points + if len(y) < 3 or np.max(y) == 0: + return None + + # Initial guesses + a0 = float(np.max(y)) + c0 = float(np.min(y[len(y)//2:])) # baseline from second half + lam0 = 0.3 # moderate decay + + try: + popt, _ = curve_fit( + decay_model, weeks, y, + p0=[a0, lam0, c0], + bounds=([0, 0.01, 0], [a0 * 5, 5.0, a0]), + maxfev=5000, + ) + amplitude, decay_rate, baseline = popt + + # R-squared + y_pred = decay_model(weeks, *popt) + ss_res = np.sum((y - y_pred) ** 2) + ss_tot = np.sum((y - np.mean(y)) ** 2) + r_sq = 1 - (ss_res / ss_tot) if ss_tot > 0 else 0.0 + + return float(amplitude), float(decay_rate), float(baseline), float(r_sq) + except (RuntimeError, ValueError) as e: + log.debug(f"Curve fit failed: {e}") + return None + + +# --------------------------------------------------------------------------- +# Day-of-week indices +# --------------------------------------------------------------------------- +def compute_dow_indices(conn): + """ + Compute day-of-week revenue indices from recent order history. + + Returns a dict mapping ISO weekday (1=Mon ... 7=Sun) to a multiplier + normalized so they sum to 7.0 (average = 1.0). This means applying them + preserves the weekly total while reshaping the daily distribution. + """ + sql = """ + SELECT + EXTRACT(ISODOW FROM o.date)::int AS dow, + SUM(o.price * o.quantity) AS revenue + FROM orders o + WHERE o.canceled IS DISTINCT FROM TRUE + AND o.date >= CURRENT_DATE - INTERVAL '1 day' * %s + GROUP BY 1 + ORDER BY 1 + """ + + df = execute_query(conn, sql, [DOW_LOOKBACK_DAYS]) + + if df.empty or len(df) < 7: + log.warning("Insufficient order data for DOW indices, using flat distribution") + return {d: 1.0 for d in range(1, 8)} + + total = df['revenue'].sum() + avg = total / 7.0 + + indices = {} + for _, row in df.iterrows(): + dow = int(row['dow']) + idx = float(row['revenue']) / avg if avg > 0 else 1.0 + indices[dow] = round(idx, 4) + + # Fill any missing days + for d in range(1, 8): + if d not in indices: + indices[d] = 1.0 + + log.info(f"DOW indices: Mon={indices[1]:.3f} Tue={indices[2]:.3f} Wed={indices[3]:.3f} " + f"Thu={indices[4]:.3f} Fri={indices[5]:.3f} Sat={indices[6]:.3f} Sun={indices[7]:.3f}") + return indices + + +# --------------------------------------------------------------------------- +# Monthly seasonal indices +# --------------------------------------------------------------------------- +def compute_monthly_seasonal_indices(conn): + """ + Compute monthly seasonal indices from recent order revenue. + + Returns a dict mapping month number (1-12) to a multiplier normalized + so they average 1.0. Months with above-average revenue get >1, below get <1. + """ + sql = """ + SELECT + EXTRACT(MONTH FROM o.date)::int AS month, + SUM(o.price * o.quantity) AS revenue + FROM orders o + WHERE o.canceled IS DISTINCT FROM TRUE + AND o.date >= CURRENT_DATE - INTERVAL '1 day' * %s + GROUP BY 1 + ORDER BY 1 + """ + + df = execute_query(conn, sql, [SEASONAL_LOOKBACK_DAYS]) + + if df.empty or len(df) < 6: + log.warning("Insufficient data for seasonal indices, using flat distribution") + return {m: 1.0 for m in range(1, 13)} + + total = df['revenue'].sum() + n_months = len(df) + avg = total / n_months + + indices = {} + for _, row in df.iterrows(): + month = int(row['month']) + idx = float(row['revenue']) / avg if avg > 0 else 1.0 + indices[month] = round(idx, 4) + + # Fill any missing months with 1.0 + for m in range(1, 13): + if m not in indices: + indices[m] = 1.0 + + present = [f"{m}={indices[m]:.3f}" for m in range(1, 13)] + log.info(f"Monthly seasonal indices: {', '.join(present)}") + return indices + + +# --------------------------------------------------------------------------- +# Phase 1: Build brand-category reference curves +# --------------------------------------------------------------------------- +DEAL_CATEGORIES = frozenset([ + 'Deals', 'Black Friday', 'Week 1', 'Week 2', 'Week 3', + '28 Off', '5 Dollar Deals', '10 Dollar Deals', 'Fall Sale', +]) + + +def build_reference_curves(conn): + """ + Build decay curves for each brand (and brand x category at every hierarchy level). + + For category curves, we load each product's full set of category assignments + (across all hierarchy levels), then fit brand×cat_id curves wherever we have + enough products. This gives granular curves like "49 and Market × 12x12 Paper Pads" + alongside coarser fallbacks like "49 and Market × Paper". + + Returns DataFrame of curves written to brand_lifecycle_curves. + """ + log.info("Building reference curves from historical launches...") + + # Get daily sales aligned by days-since-first-received for recent launches + # (no category join here — we attach categories separately) + sales_sql = """ + WITH recent_launches AS ( + SELECT pm.pid, p.brand + FROM product_metrics pm + JOIN products p ON p.pid = pm.pid + WHERE p.visible = true + AND p.brand IS NOT NULL + AND pm.date_first_received >= NOW() - INTERVAL '1 day' * %s + AND pm.date_first_received < NOW() - INTERVAL '14 days' + ), + daily_sales AS ( + SELECT + rl.pid, rl.brand, + dps.snapshot_date, + COALESCE(dps.units_sold, 0) AS units_sold, + (dps.snapshot_date - pm.date_first_received::date) AS day_offset + FROM recent_launches rl + JOIN product_metrics pm ON pm.pid = rl.pid + JOIN daily_product_snapshots dps ON dps.pid = rl.pid + WHERE dps.snapshot_date >= pm.date_first_received::date + AND dps.snapshot_date < pm.date_first_received::date + INTERVAL '1 week' * %s + ) + SELECT brand, pid, + FLOOR(day_offset / 7)::int AS week_num, + SUM(units_sold) AS weekly_sales + FROM daily_sales + WHERE day_offset >= 0 + GROUP BY brand, pid, week_num + ORDER BY brand, pid, week_num + """ + + df = execute_query(conn, sales_sql, [CURVE_HISTORY_DAYS, CURVE_WINDOW_WEEKS]) + if df.empty: + log.warning("No launch data found for reference curves") + return pd.DataFrame() + + log.info(f"Loaded {len(df)} weekly sales records from {df['pid'].nunique()} products across {df['brand'].nunique()} brands") + + # Load all category assignments for these products (every hierarchy level) + launch_pids = df['pid'].unique().tolist() + cat_sql = """ + SELECT pc.pid, ch.cat_id, ch.name AS cat_name, ch.level AS cat_level + FROM product_categories pc + JOIN category_hierarchy ch ON ch.cat_id = pc.cat_id + WHERE pc.pid = ANY(%s) + AND ch.name NOT IN %s + ORDER BY pc.pid, ch.level DESC + """ + cat_df = execute_query(conn, cat_sql, [launch_pids, tuple(DEAL_CATEGORIES)]) + # Build pid -> list of (cat_id, cat_name, cat_level) + pid_cats = {} + for _, row in cat_df.iterrows(): + pid = int(row['pid']) + if pid not in pid_cats: + pid_cats[pid] = [] + pid_cats[pid].append((int(row['cat_id']), row['cat_name'], int(row['cat_level']))) + + # Also get pre-order stats per brand (median pre-order sales AND accumulation window). + # Uses de-facto preorders: any product that had orders before date_first_received, + # regardless of the preorder_count flag. This gives us 6000+ completed cycles vs ~19 + # from the explicit flag alone. + preorder_sql = """ + WITH preorder_stats AS ( + SELECT p.pid, p.brand, + COALESCE((SELECT SUM(o.quantity) FROM orders o + WHERE o.pid = p.pid AND o.canceled IS DISTINCT FROM TRUE + AND o.date < pm.date_first_received), 0) AS preorder_units, + GREATEST(EXTRACT(DAY FROM pm.date_first_received - MIN(o.date)), 1) AS preorder_days + FROM products p + JOIN product_metrics pm ON pm.pid = p.pid + LEFT JOIN orders o ON o.pid = p.pid AND o.canceled IS DISTINCT FROM TRUE + AND o.date < pm.date_first_received + WHERE p.visible = true AND p.brand IS NOT NULL + AND pm.date_first_received IS NOT NULL + AND pm.date_first_received >= NOW() - INTERVAL '1 day' * %s + GROUP BY p.pid, p.brand, pm.date_first_received + ) + SELECT brand, + PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY preorder_units) AS median_preorder_sales, + PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY preorder_days) AS median_preorder_days + FROM preorder_stats + WHERE preorder_units > 0 + GROUP BY brand + HAVING COUNT(*) >= 3 + """ + + preorder_df = execute_query(conn, preorder_sql, [CURVE_HISTORY_DAYS]) + preorder_map = dict(zip(preorder_df['brand'], preorder_df['median_preorder_sales'])) if not preorder_df.empty else {} + preorder_days_map = dict(zip(preorder_df['brand'], preorder_df['median_preorder_days'])) if not preorder_df.empty else {} + + curves = [] + + def _fit_and_append(group_df, brand, cat_id=None, cat_name=None, cat_level=None): + """Helper: fit a decay curve for a group and append to curves list.""" + product_count = group_df['pid'].nunique() + min_products = MIN_PRODUCTS_FOR_CURVE if cat_id is None else MIN_PRODUCTS_FOR_BRAND_CAT + + if product_count < min_products: + return False + + weekly = group_df.groupby('week_num')['weekly_sales'].median() + if len(weekly) < 4: + return False + + full_weeks = weekly.reindex(range(CURVE_WINDOW_WEEKS), fill_value=0.0) + weekly_arr = full_weeks.values[:CURVE_WINDOW_WEEKS] + + result = fit_decay_curve(weekly_arr) + if result is None: + return False + + amplitude, decay_rate, baseline, r_sq = result + + # Quality gate: only store curves above the reliability threshold + if r_sq < MIN_R_SQUARED: + return False + + first_week = group_df[group_df['week_num'] == 0].groupby('pid')['weekly_sales'].sum() + median_fw = float(first_week.median()) if len(first_week) > 0 else 0.0 + + curves.append({ + 'brand': brand, + 'root_category': cat_name, # kept for readability; cat_id is the real key + 'cat_id': cat_id, + 'category_level': cat_level, + 'amplitude': amplitude, + 'decay_rate': decay_rate, + 'baseline': baseline, + 'r_squared': r_sq, + 'sample_size': product_count, + 'median_first_week_sales': median_fw, + 'median_preorder_sales': preorder_map.get(brand), + 'median_preorder_days': preorder_days_map.get(brand), + }) + return True + + # 1. Fit brand-level curves (aggregate across all categories) + for brand, brand_df in df.groupby('brand'): + _fit_and_append(brand_df, brand) + + # 2. Fit brand × category curves at every hierarchy level + # Build a mapping of (brand, cat_id) -> list of pids + brand_cat_pids = {} + for pid, cats in pid_cats.items(): + brand_rows = df[df['pid'] == pid] + if brand_rows.empty: + continue + brand = brand_rows.iloc[0]['brand'] + for cat_id, cat_name, cat_level in cats: + key = (brand, cat_id) + if key not in brand_cat_pids: + brand_cat_pids[key] = {'cat_name': cat_name, 'cat_level': cat_level, 'pids': set()} + brand_cat_pids[key]['pids'].add(pid) + + cat_curves_fitted = 0 + for (brand, cat_id), info in brand_cat_pids.items(): + group_df = df[(df['brand'] == brand) & (df['pid'].isin(info['pids']))] + if _fit_and_append(group_df, brand, cat_id=cat_id, + cat_name=info['cat_name'], cat_level=info['cat_level']): + cat_curves_fitted += 1 + + curves_df = pd.DataFrame(curves) + if curves_df.empty: + log.warning("No curves could be fitted") + return curves_df + + # Write to database + with conn.cursor() as cur: + cur.execute("TRUNCATE brand_lifecycle_curves") + for _, row in curves_df.iterrows(): + cur.execute(""" + INSERT INTO brand_lifecycle_curves + (brand, root_category, cat_id, category_level, + amplitude, decay_rate, baseline, + r_squared, sample_size, median_first_week_sales, + median_preorder_sales, median_preorder_days, computed_at) + VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, NOW()) + """, ( + row['brand'], + None if pd.isna(row.get('root_category')) else row.get('root_category'), + None if pd.isna(row.get('cat_id')) else int(row['cat_id']), + None if pd.isna(row.get('category_level')) else int(row['category_level']), + row['amplitude'], row['decay_rate'], row['baseline'], + row['r_squared'], row['sample_size'], + row['median_first_week_sales'], + None if pd.isna(row.get('median_preorder_sales')) else row.get('median_preorder_sales'), + None if pd.isna(row.get('median_preorder_days')) else row.get('median_preorder_days'), + )) + conn.commit() + + brand_only = curves_df[curves_df['cat_id'].isna()].shape[0] + cat_total = curves_df[curves_df['cat_id'].notna()].shape[0] + log.info(f"Wrote {len(curves_df)} reference curves ({cat_total} brand+category across all levels, {brand_only} brand-only)") + return curves_df + + +# --------------------------------------------------------------------------- +# Phase 2: Classify products and generate forecasts +# --------------------------------------------------------------------------- +def load_products(conn): + """ + Load all visible products with their metrics for classification. + + Also loads each product's full category ancestry (all hierarchy levels), + stored as a list of cat_ids ordered deepest-first for hierarchical curve lookup. + """ + sql = """ + SELECT + pm.pid, + p.brand, + pm.current_price, + pm.current_stock, + pm.sales_velocity_daily, + pm.sales_30d, + pm.date_first_received, + pm.date_last_sold, + p.preorder_count, + COALESCE(p.baskets, 0) AS baskets, + EXTRACT(DAY FROM NOW() - pm.date_first_received) AS age_days + FROM product_metrics pm + JOIN products p ON p.pid = pm.pid + WHERE p.visible = true + """ + products = execute_query(conn, sql) + + # Load category assignments for all products (every hierarchy level, deepest first) + cat_sql = """ + SELECT pc.pid, ch.cat_id, ch.level AS cat_level + FROM product_categories pc + JOIN category_hierarchy ch ON ch.cat_id = pc.cat_id + WHERE ch.name NOT IN %s + ORDER BY pc.pid, ch.level DESC + """ + cat_df = execute_query(conn, cat_sql, [tuple(DEAL_CATEGORIES)]) + + # Build pid -> [cat_id, ...] ordered deepest-first + pid_cat_ids = {} + for _, row in cat_df.iterrows(): + pid = int(row['pid']) + if pid not in pid_cat_ids: + pid_cat_ids[pid] = [] + pid_cat_ids[pid].append(int(row['cat_id'])) + + # Attach category list to each product row as a Python object column + products['cat_ids'] = products['pid'].apply(lambda p: pid_cat_ids.get(int(p), [])) + + log.info(f"Loaded {len(products)} products, " + f"{sum(1 for c in products['cat_ids'] if len(c) > 0)}/{len(products)} with category data") + return products + + +def classify_phase(row): + """Classify a product into its lifecycle phase.""" + preorder = (row.get('preorder_count') or 0) > 0 + age = row.get('age_days') + velocity = row.get('sales_velocity_daily') or 0 + first_received = row.get('date_first_received') + + # Pre-order: has preorder_count and either not received or very recently received + if preorder and (first_received is None or (age is not None and age <= LAUNCH_AGE_DAYS)): + return 'preorder' + + # No first_received date — can't determine lifecycle + if first_received is None or age is None: + if velocity > MATURE_VELOCITY_THRESHOLD: + return 'mature' + if velocity > 0: + return 'slow_mover' + return 'dormant' + + if age <= LAUNCH_AGE_DAYS: + return 'launch' + elif age <= DECAY_AGE_DAYS: + if velocity > 0: + return 'decay' + return 'dormant' + else: + if velocity > MATURE_VELOCITY_THRESHOLD: + return 'mature' + if velocity > 0: + return 'slow_mover' + return 'dormant' + + +def get_curve_for_product(product, curves_df): + """ + Look up the best matching reference curve for a product. + + Uses hierarchical category fallback: tries the product's deepest category + first, walks up the hierarchy to coarser categories, then falls back to + brand-only. This ensures e.g. "49 and Market × 12x12 Paper Pads" is + preferred over "49 and Market × Paper Crafts" when available. + + Skips curves with R² below MIN_R_SQUARED (unreliable fits). + Returns (amplitude, decay_rate, baseline, median_first_week, median_preorder, median_preorder_days) or None. + """ + brand = product.get('brand') + + if brand is None or curves_df.empty: + return None + + # Filter to this brand's reliable curves once + brand_curves = curves_df[ + (curves_df['brand'] == brand) + & (curves_df['r_squared'] >= MIN_R_SQUARED) + ] + if brand_curves.empty: + return None + + def _extract(row): + return ( + float(row['amplitude']), + float(row['decay_rate']), + float(row['baseline']), + float(row['median_first_week_sales'] or 1), + float(row['median_preorder_sales']) if pd.notna(row.get('median_preorder_sales')) else None, + float(row['median_preorder_days']) if pd.notna(row.get('median_preorder_days')) else None, + ) + + # Try each category from deepest to shallowest + cat_ids = product.get('cat_ids') or [] + for cat_id in cat_ids: + match = brand_curves[brand_curves['cat_id'] == cat_id] + if not match.empty: + return _extract(match.iloc[0]) + + # Fall back to brand-only curve (cat_id is NaN/None) + brand_only = brand_curves[brand_curves['cat_id'].isna()] + if brand_only.empty: + return None + + return _extract(brand_only.iloc[0]) + + +def forecast_from_curve(curve_params, scale_factor, age_days, horizon_days): + """ + Generate daily forecast from a scaled decay curve. + + The scale factor is applied only to the decay envelope, NOT the baseline. + This prevents hot products from getting inflated long-tail forecasts. + + Formula: daily_value = (A/7) * exp(-λ * t_weeks) * scale + (C/7) + + Args: + curve_params: (amplitude, decay_rate, baseline, ...) + scale_factor: multiplier for the decay envelope + age_days: current product age in days + horizon_days: how many days to forecast + + Returns: + array of daily forecast values + """ + amplitude, decay_rate, baseline = curve_params[:3] + # The curve is in weekly units; convert to daily + daily_amp = amplitude / 7.0 + daily_baseline = baseline / 7.0 + + forecasts = [] + for d in range(horizon_days): + t_weeks = (age_days + d) / 7.0 + daily_value = daily_amp * np.exp(-decay_rate * t_weeks) * scale_factor + daily_baseline + forecasts.append(max(0.0, daily_value)) + + return np.array(forecasts) + + +# --------------------------------------------------------------------------- +# Batch data loading (eliminates N+1 per-product queries) +# --------------------------------------------------------------------------- +def batch_load_product_data(conn, products): + """ + Batch-load all per-product data needed for forecasting in a few queries + instead of one query per product. + + Returns dict with keys: + preorder_sales: {pid: units} — pre-order units (before first received) + launch_sales: {pid: units} — first 14 days of sales + decay_velocity: {pid: avg} — recent 30-day daily average + mature_history: {pid: DataFrame} — daily sales history for SES + """ + data = { + 'preorder_sales': {}, + 'preorder_days': {}, + 'launch_sales': {}, + 'decay_velocity': {}, + 'mature_history': {}, + } + + # Pre-order sales: orders placed BEFORE first received date + # Also compute the number of days pre-orders accumulated over (for daily-rate normalization) + preorder_pids = products[products['phase'] == 'preorder']['pid'].tolist() + if preorder_pids: + sql = """ + SELECT o.pid, + COALESCE(SUM(o.quantity), 0) AS preorder_units, + GREATEST(EXTRACT(DAY FROM NOW() - MIN(o.date)), 1) AS preorder_days + FROM orders o + LEFT JOIN product_metrics pm ON pm.pid = o.pid + WHERE o.pid = ANY(%s) + AND o.canceled IS DISTINCT FROM TRUE + AND (pm.date_first_received IS NULL OR o.date < pm.date_first_received) + GROUP BY o.pid + """ + df = execute_query(conn, sql, [preorder_pids]) + for _, row in df.iterrows(): + data['preorder_sales'][int(row['pid'])] = float(row['preorder_units']) + data['preorder_days'][int(row['pid'])] = float(row['preorder_days']) + log.info(f"Batch loaded pre-order sales for {len(data['preorder_sales'])}/{len(preorder_pids)} preorder products") + + # Launch sales: first 14 days after first received + launch_pids = products[products['phase'] == 'launch']['pid'].tolist() + if launch_pids: + sql = """ + SELECT dps.pid, COALESCE(SUM(dps.units_sold), 0) AS total_sold + FROM daily_product_snapshots dps + JOIN product_metrics pm ON pm.pid = dps.pid + WHERE dps.pid = ANY(%s) + AND dps.snapshot_date >= pm.date_first_received::date + AND dps.snapshot_date < pm.date_first_received::date + INTERVAL '14 days' + GROUP BY dps.pid + """ + df = execute_query(conn, sql, [launch_pids]) + for _, row in df.iterrows(): + data['launch_sales'][int(row['pid'])] = float(row['total_sold']) + log.info(f"Batch loaded launch sales for {len(data['launch_sales'])}/{len(launch_pids)} launch products") + + # Decay recent velocity: average daily sales over last 30 days + decay_pids = products[products['phase'] == 'decay']['pid'].tolist() + if decay_pids: + sql = """ + SELECT dps.pid, AVG(COALESCE(dps.units_sold, 0)) AS avg_daily + FROM daily_product_snapshots dps + WHERE dps.pid = ANY(%s) + AND dps.snapshot_date >= CURRENT_DATE - INTERVAL '30 days' + GROUP BY dps.pid + """ + df = execute_query(conn, sql, [decay_pids]) + for _, row in df.iterrows(): + data['decay_velocity'][int(row['pid'])] = float(row['avg_daily']) + log.info(f"Batch loaded decay velocity for {len(data['decay_velocity'])}/{len(decay_pids)} decay products") + + # Mature daily history: full time series for exponential smoothing + mature_pids = products[products['phase'] == 'mature']['pid'].tolist() + if mature_pids: + sql = """ + SELECT dps.pid, dps.snapshot_date, COALESCE(dps.units_sold, 0) AS units_sold + FROM daily_product_snapshots dps + WHERE dps.pid = ANY(%s) + AND dps.snapshot_date >= CURRENT_DATE - INTERVAL '1 day' * %s + ORDER BY dps.pid, dps.snapshot_date + """ + df = execute_query(conn, sql, [mature_pids, EXP_SMOOTHING_WINDOW]) + for pid, group in df.groupby('pid'): + data['mature_history'][int(pid)] = group.copy() + log.info(f"Batch loaded history for {len(data['mature_history'])}/{len(mature_pids)} mature products") + + return data + + +# --------------------------------------------------------------------------- +# Per-product scale factor computation +# --------------------------------------------------------------------------- +def compute_scale_factor(phase, product, curve_info, batch_data): + """ + Compute the per-product scale factor for the brand curve. + + The scale factor captures how much more/less this product sells compared + to the brand average. It's applied to the decay envelope only (not baseline). + """ + if curve_info is None: + return 1.0 + + pid = int(product['pid']) + amplitude, decay_rate, baseline, median_fw, median_preorder, med_preorder_days = curve_info + + if phase == 'preorder': + preorder_units = batch_data['preorder_sales'].get(pid, 0) + preorder_days = batch_data['preorder_days'].get(pid, 1) + baskets = product.get('baskets') or 0 + + # Too few days of accumulation → noisy signal, use brand average + if preorder_days < MIN_PREORDER_DAYS and preorder_units > 0: + scale = 1.0 + return max(0.1, min(scale, 5.0)) + + # Use order units as primary signal; fall back to baskets if no orders + demand_signal = preorder_units if preorder_units > 0 else baskets + signal_days = preorder_days if preorder_units > 0 else max(preorder_days, 14) + + # Normalize to daily rate before comparing to brand median daily rate. + # Use the brand's stored median pre-order window for the denominator + # (not the current product's signal_days) to avoid systematic bias. + demand_daily = demand_signal / max(signal_days, 1) + if median_preorder and median_preorder > 0: + brand_preorder_window = max(med_preorder_days or signal_days, 1) + median_preorder_daily = median_preorder / brand_preorder_window + scale = demand_daily / median_preorder_daily + elif median_fw > 0 and demand_daily > 0: + median_fw_daily = median_fw / 7.0 + scale = demand_daily / median_fw_daily + else: + scale = 1.0 + + elif phase == 'launch': + actual_sold = batch_data['launch_sales'].get(pid, 0) + age = max(0, product.get('age_days') or 0) + if median_fw > 0 and actual_sold > 0: + days_observed = min(age, 14) + if days_observed > 0: + projected_first_week = (actual_sold / days_observed) * 7 + scale = projected_first_week / median_fw + else: + scale = 1.0 + else: + scale = 1.0 + + elif phase == 'decay': + actual_velocity = batch_data['decay_velocity'].get(pid, 0) + age = max(0, product.get('age_days') or 0) + t_weeks = age / 7.0 + # With baseline fix: value = (A/7)*exp(-λt)*scale + C/7 + # Solve for scale: scale = (actual - C/7) / ((A/7)*exp(-λt)) + decay_part = (amplitude / 7.0) * np.exp(-decay_rate * t_weeks) + # Use a higher floor for the denominator at high ages to prevent + # extreme scale factors when the decay envelope is nearly zero + min_decay = max(0.01, amplitude / 70.0) # at least 10% of week-1 daily value + if decay_part > min_decay and actual_velocity > 0: + scale = (actual_velocity - baseline / 7.0) / decay_part + elif actual_velocity > 0: + scale = 1.0 + else: + scale = 1.0 + + else: + scale = 1.0 + + # Clamp to avoid extreme values — tighter for preorder since the signal + # is noisier (pre-orders accumulate differently than post-launch sales) + max_scale = 5.0 if phase == 'preorder' else 8.0 + return max(0.1, min(scale, max_scale)) + + +# --------------------------------------------------------------------------- +# Mature product forecast (Holt's double exponential smoothing) +# --------------------------------------------------------------------------- +def forecast_mature(product, history_df): + """ + Forecast for a mature/evergreen product using Holt's linear trend method + on recent daily sales history. Holt's adds a trend component over SES, + so it naturally pulls the forecast back down after a sales spike instead + of persisting the inflated level. + + Falls back to SES then flat velocity on failure. + """ + pid = int(product['pid']) + velocity = product.get('sales_velocity_daily') or 0 + + if history_df is None or history_df.empty or len(history_df) < 7: + # Not enough data — flat velocity + return np.full(FORECAST_HORIZON_DAYS, velocity) + + # Fill date gaps with 0 sales (days where product had no snapshot = no sales) + hist = history_df.copy() + hist['snapshot_date'] = pd.to_datetime(hist['snapshot_date']) + hist = hist.set_index('snapshot_date').resample('D').sum().fillna(0) + series = hist['units_sold'].values.astype(float) + + # Need at least 2 non-zero values for smoothing + if np.count_nonzero(series) < 2: + return np.full(FORECAST_HORIZON_DAYS, velocity) + + try: + # Holt's with damped trend: the phi parameter dampens the trend over + # the horizon so forecasts converge to a level instead of extrapolating + # a linear trend indefinitely. + model = Holt(series, initialization_method='estimated', damped_trend=True) + fit = model.fit(optimized=True) + forecast = fit.forecast(FORECAST_HORIZON_DAYS) + forecast = np.maximum(forecast, 0) + return forecast + except Exception: + # Fall back to SES if Holt's fails (e.g. insufficient data points) + try: + model = SimpleExpSmoothing(series, initialization_method='estimated') + fit = model.fit(optimized=True) + forecast = fit.forecast(FORECAST_HORIZON_DAYS) + forecast = np.maximum(forecast, 0) + return forecast + except Exception as e: + log.debug(f"ExpSmoothing failed for pid {pid}: {e}") + return np.full(FORECAST_HORIZON_DAYS, velocity) + + +def forecast_dormant(): + """Dormant products get near-zero forecast.""" + return np.zeros(FORECAST_HORIZON_DAYS) + + +# --------------------------------------------------------------------------- +# Accuracy-driven confidence margins +# --------------------------------------------------------------------------- +DEFAULT_MARGINS = { + 'preorder': 0.4, + 'launch': 0.35, + 'decay': 0.3, + 'mature': 0.35, + 'slow_mover': 0.5, + 'dormant': 0.5, +} +MIN_MARGIN = 0.15 # intervals shouldn't be tighter than ±15% +MAX_MARGIN = 1.0 # intervals shouldn't exceed ±100% + + +def load_accuracy_margins(conn): + """ + Load per-phase WMAPE from the most recent forecast accuracy run. + Returns a dict of phase -> base_margin, falling back to DEFAULT_MARGINS. + WMAPE is already a ratio (e.g. 1.7 = 170%), which we use directly as margin. + """ + margins = dict(DEFAULT_MARGINS) + try: + df = execute_query(conn, """ + SELECT fa.dimension_value AS phase, fa.wmape + FROM forecast_accuracy fa + JOIN forecast_runs fr ON fr.id = fa.run_id + WHERE fa.metric_type = 'by_phase' + AND fr.status IN ('completed', 'backfill') + AND fa.wmape IS NOT NULL + ORDER BY fr.finished_at DESC + """) + if df.empty: + log.info("No accuracy data available, using default confidence margins") + return margins + + # Take the most recent run's values (they appear first due to ORDER BY) + seen = set() + for _, row in df.iterrows(): + phase = row['phase'] + if phase not in seen: + wmape = float(row['wmape']) + margins[phase] = max(MIN_MARGIN, min(wmape, MAX_MARGIN)) + seen.add(phase) + + log.info(f"Loaded accuracy-based margins: {', '.join(f'{k}={v:.2f}' for k, v in margins.items())}") + except Exception as e: + log.warning(f"Could not load accuracy margins, using defaults: {e}") + + return margins + + +# --------------------------------------------------------------------------- +# Main orchestration +# --------------------------------------------------------------------------- +FLUSH_EVERY_PRODUCTS = 5000 # Flush forecast rows to DB every N products + +def generate_all_forecasts(conn, curves_df, dow_indices, monthly_indices=None, + accuracy_margins=None): + """Classify all products, batch-load data, generate and stream-write forecasts. + + Writes forecast rows to product_forecasts in chunks to avoid accumulating + millions of rows in memory (37K products × 90 days = 3.3M rows). + """ + if monthly_indices is None: + monthly_indices = {m: 1.0 for m in range(1, 13)} + if accuracy_margins is None: + accuracy_margins = dict(DEFAULT_MARGINS) + log.info("Loading products for classification...") + products = load_products(conn) + log.info(f"Loaded {len(products)} visible products") + + # Classify each product + products['phase'] = products.apply(classify_phase, axis=1) + phase_counts = products['phase'].value_counts().to_dict() + log.info(f"Phase distribution: {phase_counts}") + + # Batch-load per-product data (replaces per-product queries) + log.info("Batch loading product data...") + batch_data = batch_load_product_data(conn, products) + + today = date.today() + forecast_dates = [today + timedelta(days=i) for i in range(FORECAST_HORIZON_DAYS)] + + # Pre-compute DOW and seasonal multipliers for each forecast date + dow_multipliers = [dow_indices.get(d.isoweekday(), 1.0) for d in forecast_dates] + seasonal_multipliers = [monthly_indices.get(d.month, 1.0) for d in forecast_dates] + + # TRUNCATE before streaming writes + with conn.cursor() as cur: + cur.execute("TRUNCATE product_forecasts") + conn.commit() + + buffer = [] + methods = {} + processed = 0 + errors = 0 + total_rows = 0 + + insert_sql = """ + INSERT INTO product_forecasts + (pid, forecast_date, forecast_units, forecast_revenue, + lifecycle_phase, forecast_method, confidence_lower, + confidence_upper) + VALUES %s + """ + + def flush_buffer(): + nonlocal buffer, total_rows + if not buffer: + return + with conn.cursor() as cur: + psycopg2.extras.execute_values( + cur, insert_sql, buffer, + template="(%s, %s, %s, %s, %s, %s, %s, %s)", + page_size=BATCH_SIZE, + ) + conn.commit() + total_rows += len(buffer) + buffer = [] + + for _, product in products.iterrows(): + pid = int(product['pid']) + phase = product['phase'] + price = float(product['current_price'] or 0) + age = max(0, product.get('age_days') or 0) + + try: + curve_info = get_curve_for_product(product, curves_df) + + if phase in ('preorder', 'launch'): + if curve_info: + scale = compute_scale_factor(phase, product, curve_info, batch_data) + forecasts = forecast_from_curve(curve_info, scale, age, FORECAST_HORIZON_DAYS) + method = 'lifecycle_curve' + else: + # No reliable curve — fall back to velocity if available + velocity = product.get('sales_velocity_daily') or 0 + if velocity > 0: + forecasts = np.full(FORECAST_HORIZON_DAYS, velocity) + method = 'velocity' + else: + forecasts = forecast_dormant() + method = 'zero' + + elif phase == 'decay': + if curve_info: + scale = compute_scale_factor(phase, product, curve_info, batch_data) + forecasts = forecast_from_curve(curve_info, scale, age, FORECAST_HORIZON_DAYS) + method = 'lifecycle_curve' + else: + velocity = product.get('sales_velocity_daily') or 0 + forecasts = np.full(FORECAST_HORIZON_DAYS, velocity) + method = 'velocity' + + elif phase == 'mature': + history = batch_data['mature_history'].get(pid) + forecasts = forecast_mature(product, history) + method = 'exp_smoothing' + + elif phase == 'slow_mover': + velocity = product.get('sales_velocity_daily') or 0 + forecasts = np.full(FORECAST_HORIZON_DAYS, velocity) + method = 'velocity' + + else: # dormant + forecasts = forecast_dormant() + method = 'zero' + + # Confidence interval: use accuracy-calibrated margins per phase + base_margin = accuracy_margins.get(phase, 0.5) + + for i, d in enumerate(forecast_dates): + base_units = float(forecasts[i]) if i < len(forecasts) else 0.0 + # Apply day-of-week and seasonal adjustments + units = base_units * dow_multipliers[i] * seasonal_multipliers[i] + # Widen confidence interval as horizon grows: day 0 = base, day 89 ≈ +50% wider + horizon_factor = 1.0 + 0.5 * (i / max(FORECAST_HORIZON_DAYS - 1, 1)) + margin = base_margin * horizon_factor + buffer.append(( + pid, d, + round(units, 2), + round(units * price, 4), + phase, method, + round(units * max(1 - margin, 0), 2), + round(units * (1 + margin), 2), + )) + + methods[method] = methods.get(method, 0) + 1 + + except Exception as e: + log.warning(f"Error forecasting pid {pid}: {e}") + errors += 1 + # Write zero forecast so we have complete coverage + for d in forecast_dates: + buffer.append((pid, d, 0, 0, phase, 'zero', 0, 0)) + + processed += 1 + if processed % FLUSH_EVERY_PRODUCTS == 0: + flush_buffer() + log.info(f" Processed {processed}/{len(products)} products ({total_rows} rows written)...") + + # Final flush + flush_buffer() + + log.info(f"Forecast generation complete. {processed} products, {errors} errors, {total_rows} rows") + log.info(f"Method distribution: {methods}") + + return total_rows, processed, phase_counts + + +def archive_forecasts(conn, run_id): + """ + Copy current product_forecasts into history before they get replaced. + Only archives forecast rows for dates that have already passed, + so we can later compare them against actuals. + """ + with conn.cursor() as cur: + # Ensure history table exists + cur.execute(""" + CREATE TABLE IF NOT EXISTS product_forecasts_history ( + run_id INT NOT NULL, + pid BIGINT NOT NULL, + forecast_date DATE NOT NULL, + forecast_units NUMERIC(10,2), + forecast_revenue NUMERIC(14,4), + lifecycle_phase TEXT, + forecast_method TEXT, + confidence_lower NUMERIC(10,2), + confidence_upper NUMERIC(10,2), + generated_at TIMESTAMP, + PRIMARY KEY (run_id, pid, forecast_date) + ) + """) + cur.execute("CREATE INDEX IF NOT EXISTS idx_pfh_date ON product_forecasts_history(forecast_date)") + cur.execute("CREATE INDEX IF NOT EXISTS idx_pfh_pid_date ON product_forecasts_history(pid, forecast_date)") + + # Find the previous completed run (whose forecasts are still in product_forecasts) + cur.execute(""" + SELECT id FROM forecast_runs + WHERE status = 'completed' + ORDER BY finished_at DESC + LIMIT 1 + """) + prev_run = cur.fetchone() + if prev_run is None: + log.info("No previous completed run found, skipping archive") + conn.commit() + return 0 + + prev_run_id = prev_run[0] + + # Archive only past-date forecasts (where actuals now exist) + cur.execute(""" + INSERT INTO product_forecasts_history + (run_id, pid, forecast_date, forecast_units, forecast_revenue, + lifecycle_phase, forecast_method, confidence_lower, confidence_upper, generated_at) + SELECT %s, pid, forecast_date, forecast_units, forecast_revenue, + lifecycle_phase, forecast_method, confidence_lower, confidence_upper, generated_at + FROM product_forecasts + WHERE forecast_date < CURRENT_DATE + ON CONFLICT (run_id, pid, forecast_date) DO NOTHING + """, (prev_run_id,)) + + archived = cur.rowcount + conn.commit() + + if archived > 0: + log.info(f"Archived {archived} historical forecast rows from run {prev_run_id}") + else: + log.info("No past-date forecasts to archive") + + # Prune old history (keep 90 days for accuracy analysis) + cur.execute("DELETE FROM product_forecasts_history WHERE forecast_date < CURRENT_DATE - INTERVAL '90 days'") + pruned = cur.rowcount + if pruned > 0: + log.info(f"Pruned {pruned} old history rows (>90 days)") + conn.commit() + + return archived + + +def compute_accuracy(conn, run_id): + """ + Compute forecast accuracy metrics from archived history vs. actual sales. + + Joins product_forecasts_history with daily_product_snapshots on + (pid, forecast_date = snapshot_date) to compare forecasted vs. actual units. + + Stores results in forecast_accuracy table, broken down by: + - overall: single aggregate row + - by_phase: per lifecycle phase + - by_lead_time: bucketed by how far ahead the forecast was + - by_method: per forecast method + - daily: per forecast_date (for trend charts) + """ + with conn.cursor() as cur: + # Ensure accuracy table exists + cur.execute(""" + CREATE TABLE IF NOT EXISTS forecast_accuracy ( + run_id INT NOT NULL, + metric_type TEXT NOT NULL, + dimension_value TEXT NOT NULL, + sample_size INT, + total_actual_units NUMERIC(12,2), + total_forecast_units NUMERIC(12,2), + mae NUMERIC(10,4), + wmape NUMERIC(10,4), + bias NUMERIC(10,4), + rmse NUMERIC(10,4), + computed_at TIMESTAMP NOT NULL DEFAULT NOW(), + PRIMARY KEY (run_id, metric_type, dimension_value) + ) + """) + conn.commit() + + # Check if we have any history to analyze + cur.execute("SELECT COUNT(*) FROM product_forecasts_history") + history_count = cur.fetchone()[0] + if history_count == 0: + log.info("No forecast history available for accuracy computation") + return + + # For each (pid, forecast_date) pair, keep only the most recent run's + # forecast row. This prevents double-counting when multiple runs have + # archived forecasts for the same product×date combination. + accuracy_cte = """ + WITH ranked_history AS ( + SELECT + pfh.*, + fr.started_at, + ROW_NUMBER() OVER ( + PARTITION BY pfh.pid, pfh.forecast_date + ORDER BY fr.started_at DESC + ) AS rn + FROM product_forecasts_history pfh + JOIN forecast_runs fr ON fr.id = pfh.run_id + ), + accuracy AS ( + SELECT + rh.lifecycle_phase, + rh.forecast_method, + rh.forecast_date, + (rh.forecast_date - rh.started_at::date) AS lead_days, + rh.forecast_units, + COALESCE(dps.units_sold, 0) AS actual_units, + (rh.forecast_units - COALESCE(dps.units_sold, 0)) AS error, + ABS(rh.forecast_units - COALESCE(dps.units_sold, 0)) AS abs_error + FROM ranked_history rh + LEFT JOIN daily_product_snapshots dps + ON dps.pid = rh.pid AND dps.snapshot_date = rh.forecast_date + WHERE rh.rn = 1 + AND NOT (rh.forecast_units = 0 AND COALESCE(dps.units_sold, 0) = 0) + ) + """ + + # Compute and insert metrics for each dimension + dimensions = { + 'overall': "SELECT 'all' AS dim", + 'by_phase': "SELECT DISTINCT lifecycle_phase AS dim FROM accuracy", + 'by_lead_time': """ + SELECT DISTINCT + CASE + WHEN lead_days BETWEEN 0 AND 6 THEN '1-7d' + WHEN lead_days BETWEEN 7 AND 13 THEN '8-14d' + WHEN lead_days BETWEEN 14 AND 29 THEN '15-30d' + WHEN lead_days BETWEEN 30 AND 59 THEN '31-60d' + ELSE '61-90d' + END AS dim + FROM accuracy + """, + 'by_method': "SELECT DISTINCT forecast_method AS dim FROM accuracy", + 'daily': "SELECT DISTINCT forecast_date::text AS dim FROM accuracy", + } + + filter_clauses = { + 'overall': "lifecycle_phase != 'dormant'", + 'by_phase': "lifecycle_phase = dims.dim", + 'by_lead_time': """ + CASE + WHEN lead_days BETWEEN 0 AND 6 THEN '1-7d' + WHEN lead_days BETWEEN 7 AND 13 THEN '8-14d' + WHEN lead_days BETWEEN 14 AND 29 THEN '15-30d' + WHEN lead_days BETWEEN 30 AND 59 THEN '31-60d' + ELSE '61-90d' + END = dims.dim + """, + 'by_method': "forecast_method = dims.dim", + 'daily': "forecast_date::text = dims.dim", + } + + total_inserted = 0 + + for metric_type, dim_query in dimensions.items(): + filter_clause = filter_clauses[metric_type] + + sql = f""" + {accuracy_cte}, + dims AS ({dim_query}) + SELECT + dims.dim, + COUNT(*) AS sample_size, + COALESCE(SUM(a.actual_units), 0) AS total_actual, + COALESCE(SUM(a.forecast_units), 0) AS total_forecast, + AVG(a.abs_error) AS mae, + CASE WHEN SUM(a.actual_units) > 0 + THEN SUM(a.abs_error) / SUM(a.actual_units) + ELSE NULL END AS wmape, + AVG(a.error) AS bias, + SQRT(AVG(POWER(a.error, 2))) AS rmse + FROM dims + CROSS JOIN accuracy a + WHERE {filter_clause} + GROUP BY dims.dim + """ + + cur.execute(sql) + rows = cur.fetchall() + + for row in rows: + dim_val, sample_size, total_actual, total_forecast, mae, wmape, bias, rmse = row + cur.execute(""" + INSERT INTO forecast_accuracy + (run_id, metric_type, dimension_value, sample_size, + total_actual_units, total_forecast_units, mae, wmape, bias, rmse) + VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) + ON CONFLICT (run_id, metric_type, dimension_value) + DO UPDATE SET + sample_size = EXCLUDED.sample_size, + total_actual_units = EXCLUDED.total_actual_units, + total_forecast_units = EXCLUDED.total_forecast_units, + mae = EXCLUDED.mae, wmape = EXCLUDED.wmape, + bias = EXCLUDED.bias, rmse = EXCLUDED.rmse, + computed_at = NOW() + """, (run_id, metric_type, dim_val, sample_size, + float(total_actual), float(total_forecast), + float(mae) if mae is not None else None, + float(wmape) if wmape is not None else None, + float(bias) if bias is not None else None, + float(rmse) if rmse is not None else None)) + total_inserted += 1 + + conn.commit() + + # Prune old accuracy data (keep 90 days of runs + any in-progress run) + cur.execute(""" + DELETE FROM forecast_accuracy + WHERE run_id NOT IN ( + SELECT id FROM forecast_runs + WHERE finished_at >= NOW() - INTERVAL '90 days' + OR finished_at IS NULL + ) + """) + pruned = cur.rowcount + conn.commit() + + log.info(f"Accuracy metrics: {total_inserted} rows computed" + + (f", {pruned} old rows pruned" if pruned > 0 else "")) + + +def backfill_accuracy_data(conn, backfill_days=30): + """ + Generate retroactive forecast data for the past N days to bootstrap + accuracy metrics. Uses the current brand curves with per-product scaling + to approximate what the model would have predicted for each past day, + then stores results in product_forecasts_history for comparison against + actual snapshots. + + This is a model backtest (in-sample), not true out-of-sample accuracy, + but provides much better initial estimates than unscaled brand curves. + """ + backfill_start_time = time.time() + log.info(f"Backfilling {backfill_days} days of accuracy data with per-product scaling...") + + # Load DOW indices + dow_indices = compute_dow_indices(conn) + + # Load brand curves (already fitted) + curves_df = execute_query(conn, """ + SELECT brand, root_category, cat_id, category_level, + amplitude, decay_rate, baseline, + r_squared, median_first_week_sales, median_preorder_sales, + median_preorder_days + FROM brand_lifecycle_curves + """) + + # Load products + products = load_products(conn) + products['phase'] = products.apply(classify_phase, axis=1) + + # Skip dormant — they forecast 0 and are filtered from accuracy anyway + active = products[products['phase'] != 'dormant'].copy() + log.info(f"Backfilling for {len(active)} non-dormant products " + f"(skipping {len(products) - len(active)} dormant)") + + # Batch load product data for per-product scaling + batch_data = batch_load_product_data(conn, active) + + today = date.today() + backfill_start = today - timedelta(days=backfill_days) + + # Create a synthetic run entry + with conn.cursor() as cur: + cur.execute(""" + INSERT INTO forecast_runs + (started_at, finished_at, status, products_forecast, + phase_counts, error_message) + VALUES (%s, NOW(), 'backfill', %s, %s, %s) + RETURNING id + """, ( + backfill_start, + len(active), + json.dumps({'backfill_days': backfill_days}), + f'Model backtest: {backfill_days} days with per-product scaling', + )) + backfill_run_id = cur.fetchone()[0] + conn.commit() + log.info(f"Created backfill run {backfill_run_id} " + f"(simulated start: {backfill_start})") + + # Generate retroactive forecasts + all_rows = [] + backfill_dates = [backfill_start + timedelta(days=i) + for i in range(backfill_days)] + + for _, product in active.iterrows(): + pid = int(product['pid']) + price = float(product['current_price'] or 0) + current_age = product.get('age_days') + velocity = float(product.get('sales_velocity_daily') or 0) + phase = product['phase'] + + curve_info = get_curve_for_product(product, curves_df) + + # Compute per-product scale factor (same logic as main forecast) + scale = compute_scale_factor(phase, product, curve_info, batch_data) + + for forecast_date in backfill_dates: + # How many days ago was this date? + days_ago = (today - forecast_date).days + # Product's age on that date + past_age = (current_age - days_ago) if current_age is not None else None + + if past_age is not None and past_age < 0: + # Product didn't exist yet on this date + continue + + # Determine what phase the product was likely in + if past_age is not None: + if past_age <= LAUNCH_AGE_DAYS: + past_phase = 'launch' + elif past_age <= DECAY_AGE_DAYS: + past_phase = 'decay' + else: + past_phase = phase # use current classification + else: + past_phase = phase + + # Compute forecast value for this date + if past_phase in ('launch', 'decay', 'preorder') and curve_info: + amplitude, decay_rate, baseline = curve_info[:3] + age_for_calc = max(0, past_age or 0) + t_weeks = age_for_calc / 7.0 + # Use corrected formula: scale only the decay envelope, not the baseline + daily_value = (amplitude / 7.0) * np.exp(-decay_rate * t_weeks) * scale + (baseline / 7.0) + units = max(0.0, float(daily_value)) + method = 'lifecycle_curve' + elif past_phase == 'mature' and velocity > 0: + units = velocity + method = 'exp_smoothing' + else: + units = velocity if velocity > 0 else 0.0 + method = 'velocity' if velocity > 0 else 'zero' + + # Apply DOW multiplier + dow_mult = dow_indices.get(forecast_date.isoweekday(), 1.0) + units *= dow_mult + + if units == 0 and method == 'zero': + continue # skip zero-zero rows + + revenue = units * price + margin = 0.3 if method == 'lifecycle_curve' else 0.4 + + all_rows.append(( + backfill_run_id, pid, forecast_date, + round(float(units), 2), + round(float(revenue), 4), + past_phase, method, + round(float(units * (1 - margin)), 2), + round(float(units * (1 + margin)), 2), + backfill_start, # generated_at + )) + + log.info(f"Generated {len(all_rows)} backfill forecast rows") + + # Write to history table + if all_rows: + with conn.cursor() as cur: + sql = """ + INSERT INTO product_forecasts_history + (run_id, pid, forecast_date, forecast_units, forecast_revenue, + lifecycle_phase, forecast_method, confidence_lower, + confidence_upper, generated_at) + VALUES %s + ON CONFLICT (run_id, pid, forecast_date) DO NOTHING + """ + psycopg2.extras.execute_values( + cur, sql, all_rows, + template="(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", + page_size=BATCH_SIZE, + ) + conn.commit() + log.info(f"Wrote {len(all_rows)} rows to product_forecasts_history") + + # Now compute accuracy on the backfilled data + compute_accuracy(conn, backfill_run_id) + + # Mark the backfill run as completed + backfill_duration = time.time() - backfill_start_time + with conn.cursor() as cur: + cur.execute(""" + UPDATE forecast_runs + SET finished_at = NOW(), status = 'backfill', + duration_seconds = %s + WHERE id = %s + """, (round(backfill_duration, 2), backfill_run_id)) + conn.commit() + + log.info(f"Backfill complete in {backfill_duration:.1f}s") + return backfill_run_id + + +def main(): + start_time = time.time() + + conn = get_connection() + + # Clean up any stale "running" entries from prior crashes + cleanup_stale_runs(conn) + + # Check for --backfill flag + if '--backfill' in sys.argv: + idx = sys.argv.index('--backfill') + days = int(sys.argv[idx + 1]) if idx + 1 < len(sys.argv) else 30 + log.info("=" * 60) + log.info(f"Backfill mode: {days} days") + log.info("=" * 60) + try: + backfill_accuracy_data(conn, days) + finally: + conn.close() + return + + log.info("=" * 60) + log.info("Forecast Engine starting") + log.info("=" * 60) + + run_id = None + + try: + # Record run start + with conn.cursor() as cur: + cur.execute( + "INSERT INTO forecast_runs (started_at, status) VALUES (NOW(), 'running') RETURNING id" + ) + run_id = cur.fetchone()[0] + conn.commit() + + # Phase 0: Compute day-of-week and monthly seasonal indices + dow_indices = compute_dow_indices(conn) + monthly_indices = compute_monthly_seasonal_indices(conn) + + # Phase 1: Build reference curves + curves_df = build_reference_curves(conn) + + # Phase 2: Archive historical forecasts (before TRUNCATE in generation) + archive_forecasts(conn, run_id) + + # Phase 3: Compute accuracy from archived history vs actuals + compute_accuracy(conn, run_id) + + # Phase 3b: Load accuracy-calibrated confidence margins + accuracy_margins = load_accuracy_margins(conn) + + # Phase 4: Generate and stream-write forecasts (TRUNCATE + chunked INSERT) + total_rows, products_forecast, phase_counts = generate_all_forecasts( + conn, curves_df, dow_indices, monthly_indices, accuracy_margins + ) + + duration = time.time() - start_time + + # Record run completion (include DOW indices in metadata) + with conn.cursor() as cur: + cur.execute(""" + UPDATE forecast_runs + SET finished_at = NOW(), status = 'completed', + products_forecast = %s, phase_counts = %s, + curve_count = %s, duration_seconds = %s + WHERE id = %s + """, ( + products_forecast, + json.dumps({ + **phase_counts, + '_dow_indices': {str(k): v for k, v in dow_indices.items()}, + '_seasonal_indices': {str(k): v for k, v in monthly_indices.items()}, + }), + len(curves_df) if not curves_df.empty else 0, + round(duration, 2), + run_id, + )) + conn.commit() + + log.info("=" * 60) + log.info(f"Forecast complete in {duration:.1f}s") + log.info(f" Products: {products_forecast}") + log.info(f" Curves: {len(curves_df) if not curves_df.empty else 0}") + log.info(f" Phases: {phase_counts}") + log.info(f" Rows written: {total_rows}") + log.info("=" * 60) + + except Exception as e: + duration = time.time() - start_time + log.error(f"Forecast engine failed: {e}", exc_info=True) + + if run_id: + try: + with conn.cursor() as cur: + cur.execute(""" + UPDATE forecast_runs + SET finished_at = NOW(), status = 'failed', + error_message = %s, duration_seconds = %s + WHERE id = %s + """, (str(e), round(duration, 2), run_id)) + conn.commit() + except Exception: + pass + + sys.exit(1) + finally: + conn.close() + + +if __name__ == '__main__': + main() diff --git a/inventory-server/scripts/forecast/requirements.txt b/inventory-server/scripts/forecast/requirements.txt new file mode 100644 index 0000000..d1990d8 --- /dev/null +++ b/inventory-server/scripts/forecast/requirements.txt @@ -0,0 +1,5 @@ +numpy>=1.24 +scipy>=1.10 +pandas>=2.0 +psycopg2-binary>=2.9 +statsmodels>=0.14 diff --git a/inventory-server/scripts/forecast/run_forecast.js b/inventory-server/scripts/forecast/run_forecast.js new file mode 100644 index 0000000..3ea3d32 --- /dev/null +++ b/inventory-server/scripts/forecast/run_forecast.js @@ -0,0 +1,128 @@ +#!/usr/bin/env node +/** + * Forecast Pipeline Orchestrator + * + * Spawns the Python forecast engine with database credentials from the + * environment. Can be run manually, via cron, or integrated into the + * existing metrics pipeline. + * + * Usage: + * node run_forecast.js + * + * Environment: + * Reads DB_HOST, DB_USER, DB_PASSWORD, DB_NAME, DB_PORT from + * /var/www/html/inventory/.env (or current process env). + */ + +const { spawn } = require('child_process'); +const path = require('path'); +const fs = require('fs'); + +// Load .env file if it exists (production path) +const envPaths = [ + '/var/www/html/inventory/.env', + path.join(__dirname, '../../.env'), +]; + +for (const envPath of envPaths) { + if (fs.existsSync(envPath)) { + const envContent = fs.readFileSync(envPath, 'utf-8'); + for (const line of envContent.split('\n')) { + const trimmed = line.trim(); + if (!trimmed || trimmed.startsWith('#')) continue; + const eqIndex = trimmed.indexOf('='); + if (eqIndex === -1) continue; + const key = trimmed.slice(0, eqIndex); + const value = trimmed.slice(eqIndex + 1); + if (!process.env[key]) { + process.env[key] = value; + } + } + console.log(`Loaded env from ${envPath}`); + break; + } +} + +// Verify required env vars +const required = ['DB_HOST', 'DB_USER', 'DB_PASSWORD', 'DB_NAME']; +const missing = required.filter(k => !process.env[k]); +if (missing.length > 0) { + console.error(`Missing required environment variables: ${missing.join(', ')}`); + process.exit(1); +} + +const SCRIPT_DIR = __dirname; +const PYTHON_SCRIPT = path.join(SCRIPT_DIR, 'forecast_engine.py'); +const VENV_DIR = path.join(SCRIPT_DIR, 'venv'); +const REQUIREMENTS = path.join(SCRIPT_DIR, 'requirements.txt'); + +// Determine python binary (prefer venv if it exists) +function getPythonBin() { + const venvPython = path.join(VENV_DIR, 'bin', 'python'); + if (fs.existsSync(venvPython)) return venvPython; + + // Fall back to system python + return 'python3'; +} + +// Ensure venv and dependencies are installed +async function ensureDependencies() { + if (!fs.existsSync(path.join(VENV_DIR, 'bin', 'python'))) { + console.log('Creating virtual environment...'); + await runCommand('python3', ['-m', 'venv', VENV_DIR]); + } + + // Always run pip install — idempotent, fast when packages already present + console.log('Checking dependencies...'); + const python = path.join(VENV_DIR, 'bin', 'python'); + await runCommand(python, ['-m', 'pip', 'install', '--quiet', '-r', REQUIREMENTS]); +} + +function runCommand(cmd, args, options = {}) { + return new Promise((resolve, reject) => { + const proc = spawn(cmd, args, { + stdio: 'inherit', + ...options, + }); + proc.on('close', code => { + if (code === 0) resolve(); + else reject(new Error(`${cmd} exited with code ${code}`)); + }); + proc.on('error', reject); + }); +} + +async function main() { + const startTime = Date.now(); + console.log('='.repeat(60)); + console.log(`Forecast Pipeline - ${new Date().toISOString()}`); + console.log('='.repeat(60)); + + try { + await ensureDependencies(); + + const pythonBin = getPythonBin(); + console.log(`Using Python: ${pythonBin}`); + console.log(`Running: ${PYTHON_SCRIPT}`); + console.log(''); + + await runCommand(pythonBin, [PYTHON_SCRIPT], { + env: { + ...process.env, + PYTHONUNBUFFERED: '1', // Real-time output + }, + }); + + const duration = ((Date.now() - startTime) / 1000).toFixed(1); + console.log(''); + console.log('='.repeat(60)); + console.log(`Forecast pipeline completed in ${duration}s`); + console.log('='.repeat(60)); + } catch (err) { + const duration = ((Date.now() - startTime) / 1000).toFixed(1); + console.error(`Forecast pipeline FAILED after ${duration}s:`, err.message); + process.exit(1); + } +} + +main(); diff --git a/inventory-server/scripts/forecast/sql/create_tables.sql b/inventory-server/scripts/forecast/sql/create_tables.sql new file mode 100644 index 0000000..fcc6cd2 --- /dev/null +++ b/inventory-server/scripts/forecast/sql/create_tables.sql @@ -0,0 +1,51 @@ +-- Forecasting Pipeline Tables +-- Run once to create the schema. Safe to re-run (IF NOT EXISTS). + +-- Precomputed reference decay curves per brand (or brand x category at any hierarchy level) +CREATE TABLE IF NOT EXISTS brand_lifecycle_curves ( + id SERIAL PRIMARY KEY, + brand TEXT NOT NULL, + root_category TEXT, -- NULL = brand-level fallback curve, else category name + cat_id BIGINT, -- NULL = brand-only; else category_hierarchy.cat_id for precise matching + category_level SMALLINT, -- NULL = brand-only; 0-3 = hierarchy depth + amplitude NUMERIC(10,4), -- A in: sales(t) = A * exp(-λt) + C + decay_rate NUMERIC(10,6), -- λ (higher = faster decay) + baseline NUMERIC(10,4), -- C (long-tail steady-state daily sales) + r_squared NUMERIC(6,4), -- goodness of fit + sample_size INT, -- number of products that informed this curve + median_first_week_sales NUMERIC(10,2), -- for scaling new launches + median_preorder_sales NUMERIC(10,2), -- for scaling pre-order products + median_preorder_days NUMERIC(10,2), -- median pre-order accumulation window (days) + computed_at TIMESTAMP DEFAULT NOW(), + UNIQUE(brand, cat_id) +); + +-- Per-product daily forecasts (next 90 days, regenerated each run) +CREATE TABLE IF NOT EXISTS product_forecasts ( + pid BIGINT NOT NULL, + forecast_date DATE NOT NULL, + forecast_units NUMERIC(10,2), + forecast_revenue NUMERIC(14,4), + lifecycle_phase TEXT, -- preorder, launch, decay, mature, slow_mover, dormant + forecast_method TEXT, -- lifecycle_curve, exp_smoothing, velocity, zero + confidence_lower NUMERIC(10,2), + confidence_upper NUMERIC(10,2), + generated_at TIMESTAMP DEFAULT NOW(), + PRIMARY KEY (pid, forecast_date) +); + +CREATE INDEX IF NOT EXISTS idx_pf_date ON product_forecasts(forecast_date); +CREATE INDEX IF NOT EXISTS idx_pf_phase ON product_forecasts(lifecycle_phase); + +-- Forecast run history (for monitoring) +CREATE TABLE IF NOT EXISTS forecast_runs ( + id SERIAL PRIMARY KEY, + started_at TIMESTAMP NOT NULL, + finished_at TIMESTAMP, + status TEXT DEFAULT 'running', -- running, completed, failed + products_forecast INT, + phase_counts JSONB, -- {"launch": 50, "decay": 200, ...} + curve_count INT, -- brand curves computed + error_message TEXT, + duration_seconds NUMERIC(10,2) +); diff --git a/inventory-server/scripts/import-from-prod.js b/inventory-server/scripts/import-from-prod.js index c8ffa99..cd1ef50 100644 --- a/inventory-server/scripts/import-from-prod.js +++ b/inventory-server/scripts/import-from-prod.js @@ -40,7 +40,7 @@ const sshConfig = { password: process.env.PROD_DB_PASSWORD, database: process.env.PROD_DB_NAME, port: process.env.PROD_DB_PORT || 3306, - timezone: '-05:00', // Production DB always stores times in EST (UTC-5) regardless of DST + timezone: '-05:00', // mysql2 driver timezone — corrected at runtime via adjustDateForMySQL() in utils.js }, localDbConfig: { // PostgreSQL config for local diff --git a/inventory-server/scripts/import/orders.js b/inventory-server/scripts/import/orders.js index ecdc523..74a7c50 100644 --- a/inventory-server/scripts/import/orders.js +++ b/inventory-server/scripts/import/orders.js @@ -58,8 +58,12 @@ async function importOrders(prodConnection, localConnection, incrementalUpdate = "SELECT last_sync_timestamp FROM sync_status WHERE table_name = 'orders'" ); const lastSyncTime = syncInfo?.rows?.[0]?.last_sync_timestamp || '1970-01-01'; + // Adjust for mysql2 driver timezone vs MySQL server timezone mismatch + const mysqlSyncTime = prodConnection.adjustDateForMySQL + ? prodConnection.adjustDateForMySQL(lastSyncTime) + : lastSyncTime; - console.log('Orders: Using last sync time:', lastSyncTime); + console.log('Orders: Using last sync time:', lastSyncTime, '(adjusted:', mysqlSyncTime, ')'); // First get count of order items - Keep MySQL compatible for production const [[{ total }]] = await prodConnection.query(` @@ -82,7 +86,7 @@ async function importOrders(prodConnection, localConnection, incrementalUpdate = ) ) ` : ''} - `, incrementalUpdate ? [lastSyncTime, lastSyncTime, lastSyncTime] : []); + `, incrementalUpdate ? [mysqlSyncTime, mysqlSyncTime, mysqlSyncTime] : []); totalOrderItems = total; console.log('Orders: Found changes:', totalOrderItems); @@ -116,7 +120,7 @@ async function importOrders(prodConnection, localConnection, incrementalUpdate = ) ) ` : ''} - `, incrementalUpdate ? [lastSyncTime, lastSyncTime, lastSyncTime] : []); + `, incrementalUpdate ? [mysqlSyncTime, mysqlSyncTime, mysqlSyncTime] : []); console.log('Orders: Found', orderItems.length, 'order items to process'); diff --git a/inventory-server/scripts/import/products.js b/inventory-server/scripts/import/products.js index ac66ccf..aa191a8 100644 --- a/inventory-server/scripts/import/products.js +++ b/inventory-server/scripts/import/products.js @@ -669,8 +669,13 @@ async function importProducts(prodConnection, localConnection, incrementalUpdate // Setup temporary tables await setupTemporaryTables(localConnection); + // Adjust sync time for mysql2 driver timezone vs MySQL server timezone mismatch + const mysqlSyncTime = prodConnection.adjustDateForMySQL + ? prodConnection.adjustDateForMySQL(lastSyncTime) + : lastSyncTime; + // Materialize calculations into temp table - const materializeResult = await materializeCalculations(prodConnection, localConnection, incrementalUpdate, lastSyncTime, startTime); + const materializeResult = await materializeCalculations(prodConnection, localConnection, incrementalUpdate, mysqlSyncTime, startTime); // Get the list of products that need updating const [products] = await localConnection.query(` diff --git a/inventory-server/scripts/import/purchase-orders.js b/inventory-server/scripts/import/purchase-orders.js index 39eee9e..0b882bc 100644 --- a/inventory-server/scripts/import/purchase-orders.js +++ b/inventory-server/scripts/import/purchase-orders.js @@ -65,8 +65,12 @@ async function importPurchaseOrders(prodConnection, localConnection, incremental "SELECT last_sync_timestamp FROM sync_status WHERE table_name = 'purchase_orders'" ); const lastSyncTime = syncInfo?.rows?.[0]?.last_sync_timestamp || '1970-01-01'; + // Adjust for mysql2 driver timezone vs MySQL server timezone mismatch + const mysqlSyncTime = prodConnection.adjustDateForMySQL + ? prodConnection.adjustDateForMySQL(lastSyncTime) + : lastSyncTime; - console.log('Purchase Orders: Using last sync time:', lastSyncTime); + console.log('Purchase Orders: Using last sync time:', lastSyncTime, '(adjusted:', mysqlSyncTime, ')'); // Create temp tables for processing await localConnection.query(` @@ -254,7 +258,7 @@ async function importPurchaseOrders(prodConnection, localConnection, incremental OR p.date_estin > ? ) ` : ''} - `, incrementalUpdate ? [lastSyncTime, lastSyncTime, lastSyncTime] : []); + `, incrementalUpdate ? [mysqlSyncTime, mysqlSyncTime, mysqlSyncTime] : []); const totalPOs = poCount[0].total; console.log(`Found ${totalPOs} relevant purchase orders`); @@ -291,7 +295,7 @@ async function importPurchaseOrders(prodConnection, localConnection, incremental ` : ''} ORDER BY p.po_id LIMIT ${PO_BATCH_SIZE} OFFSET ${offset} - `, incrementalUpdate ? [lastSyncTime, lastSyncTime, lastSyncTime] : []); + `, incrementalUpdate ? [mysqlSyncTime, mysqlSyncTime, mysqlSyncTime] : []); if (poList.length === 0) { allPOsProcessed = true; @@ -426,7 +430,7 @@ async function importPurchaseOrders(prodConnection, localConnection, incremental OR r.date_created > ? ) ` : ''} - `, incrementalUpdate ? [lastSyncTime, lastSyncTime] : []); + `, incrementalUpdate ? [mysqlSyncTime, mysqlSyncTime] : []); const totalReceivings = receivingCount[0].total; console.log(`Found ${totalReceivings} relevant receivings`); @@ -463,7 +467,7 @@ async function importPurchaseOrders(prodConnection, localConnection, incremental ` : ''} ORDER BY r.receiving_id LIMIT ${PO_BATCH_SIZE} OFFSET ${offset} - `, incrementalUpdate ? [lastSyncTime, lastSyncTime] : []); + `, incrementalUpdate ? [mysqlSyncTime, mysqlSyncTime] : []); if (receivingList.length === 0) { allReceivingsProcessed = true; diff --git a/inventory-server/scripts/import/utils.js b/inventory-server/scripts/import/utils.js index b7d888d..564b86a 100644 --- a/inventory-server/scripts/import/utils.js +++ b/inventory-server/scripts/import/utils.js @@ -48,6 +48,37 @@ async function setupConnections(sshConfig) { stream: tunnel.stream, }); + // Detect MySQL server timezone and calculate correction for the driver timezone mismatch. + // The mysql2 driver is configured with timezone: '-05:00' (EST), but the MySQL server + // may be in a different timezone (e.g., America/Chicago = CST/CDT). When the driver + // formats a JS Date as EST and MySQL interprets it in its own timezone, DATETIME + // comparisons can be off. This correction adjusts Date objects before they're passed + // to MySQL queries so the formatted string matches the server's local time. + const [[{ utcDiffSec }]] = await prodConnection.query( + "SELECT TIMESTAMPDIFF(SECOND, NOW(), UTC_TIMESTAMP()) as utcDiffSec" + ); + const mysqlOffsetMs = -utcDiffSec * 1000; // MySQL UTC offset in ms (e.g., -21600000 for CST) + const driverOffsetMs = -5 * 3600 * 1000; // Driver's -05:00 in ms (-18000000) + const tzCorrectionMs = driverOffsetMs - mysqlOffsetMs; + // CST (winter): -18000000 - (-21600000) = +3600000 (1 hour correction needed) + // CDT (summer): -18000000 - (-18000000) = 0 (no correction needed) + + if (tzCorrectionMs !== 0) { + console.log(`MySQL timezone correction: ${tzCorrectionMs / 1000}s (server offset: ${utcDiffSec}s from UTC)`); + } + + /** + * Adjusts a Date/timestamp for the mysql2 driver timezone mismatch before + * passing it as a query parameter to MySQL. This ensures that the string + * mysql2 generates matches the timezone that DATETIME values are stored in. + */ + function adjustDateForMySQL(date) { + if (!date || tzCorrectionMs === 0) return date; + const d = date instanceof Date ? date : new Date(date); + return new Date(d.getTime() - tzCorrectionMs); + } + prodConnection.adjustDateForMySQL = adjustDateForMySQL; + // Setup PostgreSQL connection pool for local const localPool = new Pool(sshConfig.localDbConfig); diff --git a/inventory-server/scripts/metrics-new/update_daily_snapshots.sql b/inventory-server/scripts/metrics-new/update_daily_snapshots.sql index af37a00..3c69104 100644 --- a/inventory-server/scripts/metrics-new/update_daily_snapshots.sql +++ b/inventory-server/scripts/metrics-new/update_daily_snapshots.sql @@ -1,6 +1,7 @@ -- Description: Calculates and updates daily aggregated product data. --- Self-healing: automatically detects and fills gaps in snapshot history. --- Always reprocesses recent days to pick up new orders and data corrections. +-- Self-healing: detects gaps (missing snapshots), stale data (snapshot +-- aggregates that don't match source tables after backfills), and always +-- reprocesses recent days to pick up new orders and data corrections. -- Dependencies: Core import tables (products, orders, purchase_orders), calculate_status table. -- Frequency: Hourly (Run ~5-10 minutes after hourly data import completes). @@ -18,28 +19,26 @@ DECLARE BEGIN RAISE NOTICE 'Running % script. Start Time: %', _module_name, _start_time; - -- Find the latest existing snapshot date to determine where gaps begin + -- Find the latest existing snapshot date (for logging only) SELECT MAX(snapshot_date) INTO _latest_snapshot FROM public.daily_product_snapshots; - -- Determine how far back to look for gaps, capped at _max_backfill_days - _backfill_start := GREATEST( - COALESCE(_latest_snapshot + 1, CURRENT_DATE - _max_backfill_days), - CURRENT_DATE - _max_backfill_days - ); + -- Always scan the full backfill window to catch holes in the middle, + -- not just gaps at the end. The gap fill and stale detection queries + -- need to see the entire range to find missing or outdated snapshots. + _backfill_start := CURRENT_DATE - _max_backfill_days; IF _latest_snapshot IS NULL THEN RAISE NOTICE 'No existing snapshots found. Backfilling up to % days.', _max_backfill_days; - ELSIF _backfill_start > _latest_snapshot + 1 THEN - RAISE NOTICE 'Latest snapshot: %. Gap exceeds % day cap — backfilling from %. Use rebuild script for full history.', - _latest_snapshot, _max_backfill_days, _backfill_start; ELSE - RAISE NOTICE 'Latest snapshot: %. Checking for gaps from %.', _latest_snapshot, _backfill_start; + RAISE NOTICE 'Latest snapshot: %. Scanning from % for gaps and stale data.', _latest_snapshot, _backfill_start; END IF; -- Process all dates that need snapshots: -- 1. Gap fill: dates with orders/receivings but no snapshots (older than recent window) - -- 2. Recent recheck: last N days always reprocessed (picks up new orders, corrections) + -- 2. Stale detection: existing snapshots where aggregates don't match source data + -- (catches backfilled imports that arrived after snapshot was calculated) + -- 3. Recent recheck: last N days always reprocessed (picks up new orders, corrections) FOR _target_date IN SELECT d FROM ( -- Gap fill: find dates with activity but missing snapshots @@ -55,6 +54,36 @@ BEGIN SELECT 1 FROM public.daily_product_snapshots dps WHERE dps.snapshot_date = activity_dates.d ) UNION + -- Stale detection: compare snapshot aggregates against source tables + SELECT snap_agg.snapshot_date AS d + FROM ( + SELECT snapshot_date, + COALESCE(SUM(units_received), 0)::bigint AS snap_received, + COALESCE(SUM(units_sold), 0)::bigint AS snap_sold + FROM public.daily_product_snapshots + WHERE snapshot_date >= _backfill_start + AND snapshot_date < CURRENT_DATE - _recent_recheck_days + GROUP BY snapshot_date + ) snap_agg + LEFT JOIN ( + SELECT received_date::date AS d, SUM(qty_each)::bigint AS actual_received + FROM public.receivings + WHERE received_date::date >= _backfill_start + AND received_date::date < CURRENT_DATE - _recent_recheck_days + GROUP BY received_date::date + ) recv_agg ON snap_agg.snapshot_date = recv_agg.d + LEFT JOIN ( + SELECT date::date AS d, + SUM(CASE WHEN quantity > 0 AND COALESCE(status, 'pending') NOT IN ('canceled', 'returned') + THEN quantity ELSE 0 END)::bigint AS actual_sold + FROM public.orders + WHERE date::date >= _backfill_start + AND date::date < CURRENT_DATE - _recent_recheck_days + GROUP BY date::date + ) orders_agg ON snap_agg.snapshot_date = orders_agg.d + WHERE snap_agg.snap_received != COALESCE(recv_agg.actual_received, 0) + OR snap_agg.snap_sold != COALESCE(orders_agg.actual_sold, 0) + UNION -- Recent days: always reprocess SELECT d::date FROM generate_series( @@ -66,11 +95,18 @@ BEGIN ORDER BY d LOOP _days_processed := _days_processed + 1; - RAISE NOTICE 'Processing date: % [%/%]', _target_date, _days_processed, - _days_processed; -- count not known ahead of time, but shows progress - + + -- Classify why this date is being processed (for logging) + IF _target_date >= CURRENT_DATE - _recent_recheck_days THEN + RAISE NOTICE 'Processing date: % [recent recheck]', _target_date; + ELSIF NOT EXISTS (SELECT 1 FROM public.daily_product_snapshots WHERE snapshot_date = _target_date) THEN + RAISE NOTICE 'Processing date: % [gap fill — no existing snapshot]', _target_date; + ELSE + RAISE NOTICE 'Processing date: % [stale data — snapshot aggregates mismatch source]', _target_date; + END IF; + -- IMPORTANT: First delete any existing data for this date to prevent duplication - DELETE FROM public.daily_product_snapshots + DELETE FROM public.daily_product_snapshots WHERE snapshot_date = _target_date; -- Proceed with calculating daily metrics only for products with actual activity diff --git a/inventory-server/scripts/metrics-new/update_lifecycle_forecasts.sql b/inventory-server/scripts/metrics-new/update_lifecycle_forecasts.sql new file mode 100644 index 0000000..868ef7d --- /dev/null +++ b/inventory-server/scripts/metrics-new/update_lifecycle_forecasts.sql @@ -0,0 +1,131 @@ +-- Description: Populates lifecycle forecast columns on product_metrics from product_forecasts. +-- Runs AFTER update_product_metrics.sql so that lead time / days of stock settings are available. +-- Dependencies: product_metrics (fully populated), product_forecasts, settings tables. +-- Frequency: After each metrics run and/or after forecast engine runs. + +DO $$ +DECLARE + _module_name TEXT := 'lifecycle_forecasts'; + _start_time TIMESTAMPTZ := clock_timestamp(); + _updated INT; +BEGIN + RAISE NOTICE 'Running % module. Start Time: %', _module_name, _start_time; + + -- Step 1: Set lifecycle_phase from product_forecasts (one phase per product) + UPDATE product_metrics pm + SET lifecycle_phase = sub.lifecycle_phase + FROM ( + SELECT DISTINCT ON (pid) pid, lifecycle_phase + FROM product_forecasts + ORDER BY pid, forecast_date + ) sub + WHERE pm.pid = sub.pid + AND (pm.lifecycle_phase IS DISTINCT FROM sub.lifecycle_phase); + + GET DIAGNOSTICS _updated = ROW_COUNT; + RAISE NOTICE 'Updated lifecycle_phase for % products', _updated; + + -- Step 2: Compute lifecycle-based lead time and planning period forecasts + -- Uses each product's configured lead time and days of stock + WITH forecast_sums AS ( + SELECT + pf.pid, + SUM(pf.forecast_units) FILTER ( + WHERE pf.forecast_date <= CURRENT_DATE + s.effective_lead_time + ) AS lt_forecast, + SUM(pf.forecast_units) FILTER ( + WHERE pf.forecast_date <= CURRENT_DATE + s.effective_lead_time + s.effective_days_of_stock + ) AS pp_forecast + FROM product_forecasts pf + JOIN ( + SELECT + p.pid, + COALESCE(sp.lead_time_days, sv.default_lead_time_days, + (SELECT setting_value::int FROM settings_global WHERE setting_key = 'default_lead_time_days'), 14 + ) AS effective_lead_time, + COALESCE(sp.days_of_stock, sv.default_days_of_stock, + (SELECT setting_value::int FROM settings_global WHERE setting_key = 'default_days_of_stock'), 30 + ) AS effective_days_of_stock + FROM products p + LEFT JOIN settings_product sp ON p.pid = sp.pid + LEFT JOIN settings_vendor sv ON p.vendor = sv.vendor + ) s ON s.pid = pf.pid + WHERE pf.forecast_date >= CURRENT_DATE + GROUP BY pf.pid + ) + UPDATE product_metrics pm + SET + lifecycle_lead_time_forecast = COALESCE(fs.lt_forecast, 0), + lifecycle_planning_period_forecast = COALESCE(fs.pp_forecast, 0) + FROM forecast_sums fs + WHERE pm.pid = fs.pid + AND (pm.lifecycle_lead_time_forecast IS DISTINCT FROM COALESCE(fs.lt_forecast, 0) + OR pm.lifecycle_planning_period_forecast IS DISTINCT FROM COALESCE(fs.pp_forecast, 0)); + + GET DIAGNOSTICS _updated = ROW_COUNT; + RAISE NOTICE 'Updated lifecycle forecasts for % products', _updated; + + -- Step 3: Reclassify demand_pattern using residual CV (de-trended) + -- For launch/decay products, raw CV is high because of expected lifecycle decay. + -- We subtract the expected brand curve value to get residuals, then compute CV on those. + -- Products that track their brand curve closely → low residual CV → "stable" + -- Products with erratic deviations from curve → higher residual CV → "variable"/"sporadic" + WITH product_curve AS ( + -- Get each product's brand curve and age + SELECT + pm.pid, + pm.lifecycle_phase, + pm.date_first_received, + blc.amplitude, + blc.decay_rate, + blc.baseline + FROM product_metrics pm + JOIN products p ON p.pid = pm.pid + LEFT JOIN brand_lifecycle_curves blc + ON blc.brand = pm.brand + AND blc.root_category IS NULL -- brand-only curve + WHERE pm.lifecycle_phase IN ('launch', 'decay') + AND pm.date_first_received IS NOT NULL + AND blc.amplitude IS NOT NULL + ), + daily_residuals AS ( + -- Compute residual = actual - expected for each snapshot day + -- Curve params are in WEEKLY units; divide by 7 to get daily expected + SELECT + dps.pid, + dps.units_sold, + (pc.amplitude * EXP(-pc.decay_rate * (dps.snapshot_date - pc.date_first_received)::numeric / 7.0) + pc.baseline) / 7.0 AS expected, + dps.units_sold - (pc.amplitude * EXP(-pc.decay_rate * (dps.snapshot_date - pc.date_first_received)::numeric / 7.0) + pc.baseline) / 7.0 AS residual + FROM daily_product_snapshots dps + JOIN product_curve pc ON pc.pid = dps.pid + WHERE dps.snapshot_date >= CURRENT_DATE - INTERVAL '29 days' + AND dps.snapshot_date <= CURRENT_DATE + ), + residual_cv AS ( + SELECT + pid, + AVG(units_sold) AS avg_sales, + CASE WHEN COUNT(*) >= 7 AND AVG(ABS(expected)) > 0.01 THEN + STDDEV_POP(residual) / GREATEST(AVG(ABS(expected)), 0.1) + END AS res_cv + FROM daily_residuals + GROUP BY pid + ) + UPDATE product_metrics pm + SET demand_pattern = classify_demand_pattern(rc.avg_sales, rc.res_cv) + FROM residual_cv rc + WHERE pm.pid = rc.pid + AND rc.res_cv IS NOT NULL + AND pm.demand_pattern IS DISTINCT FROM classify_demand_pattern(rc.avg_sales, rc.res_cv); + + GET DIAGNOSTICS _updated = ROW_COUNT; + RAISE NOTICE 'Reclassified demand_pattern for % launch/decay products', _updated; + + -- Update tracking + INSERT INTO public.calculate_status (module_name, last_calculation_timestamp) + VALUES (_module_name, clock_timestamp()) + ON CONFLICT (module_name) DO UPDATE SET + last_calculation_timestamp = EXCLUDED.last_calculation_timestamp; + + RAISE NOTICE '% module complete. Duration: %', _module_name, clock_timestamp() - _start_time; +END $$; diff --git a/inventory-server/src/routes/dashboard.js b/inventory-server/src/routes/dashboard.js index 7a9c341..df9dbb5 100644 --- a/inventory-server/src/routes/dashboard.js +++ b/inventory-server/src/routes/dashboard.js @@ -67,6 +67,23 @@ router.get('/stock/metrics', async (req, res) => { ORDER BY CASE WHEN brand = 'Other' THEN 1 ELSE 0 END, stock_cost DESC `); + // Stock breakdown by lifecycle phase (lifecycle_phase populated by update_lifecycle_forecasts.sql) + const { rows: phaseStock } = await executeQuery(` + SELECT + COALESCE(pm.lifecycle_phase, 'unknown') AS phase, + COUNT(DISTINCT pm.pid)::integer AS products, + COALESCE(SUM(pm.current_stock), 0)::integer AS units, + ROUND(COALESCE(SUM(pm.current_stock_cost), 0)::numeric, 2) AS cost, + ROUND(COALESCE(SUM(pm.current_stock_retail), 0)::numeric, 2) AS retail + FROM product_metrics pm + WHERE pm.is_visible = true AND pm.current_stock > 0 + AND COALESCE(pm.preorder_count, 0) = 0 + GROUP BY pm.lifecycle_phase + ORDER BY cost DESC + `); + + const phaseTotalCost = phaseStock.reduce((s, r) => s + (parseFloat(r.cost) || 0), 0); + // Format the response with explicit type conversion const response = { totalProducts: parseInt(stockMetrics.total_products) || 0, @@ -80,7 +97,17 @@ router.get('/stock/metrics', async (req, res) => { units: parseInt(v.stock_units) || 0, cost: parseFloat(v.stock_cost) || 0, retail: parseFloat(v.stock_retail) || 0 - })) + })), + phaseStock: phaseStock.filter(r => parseFloat(r.cost) > 0).map(r => ({ + phase: r.phase, + products: parseInt(r.products) || 0, + units: parseInt(r.units) || 0, + cost: parseFloat(r.cost) || 0, + retail: parseFloat(r.retail) || 0, + percentage: phaseTotalCost > 0 + ? parseFloat(((parseFloat(r.cost) / phaseTotalCost) * 100).toFixed(1)) + : 0, + })), }; res.json(response); @@ -208,12 +235,39 @@ router.get('/replenishment/metrics', async (req, res) => { LIMIT 5 `); + // Replenishment breakdown by lifecycle phase (lifecycle_phase on product_metrics) + const { rows: phaseReplenish } = await executeQuery(` + SELECT + COALESCE(pm.lifecycle_phase, 'unknown') AS phase, + COUNT(DISTINCT pm.pid)::integer AS products, + COALESCE(SUM(pm.replenishment_units), 0)::integer AS units, + ROUND(COALESCE(SUM(pm.replenishment_cost), 0)::numeric, 2) AS cost + FROM product_metrics pm + WHERE pm.is_visible = true + AND pm.is_replenishable = true + AND (pm.status IN ('Critical', 'Reorder') OR pm.current_stock < 0) + AND pm.replenishment_units > 0 + GROUP BY pm.lifecycle_phase + ORDER BY cost DESC + `); + + const replenishTotalCost = phaseReplenish.reduce((s, r) => s + (parseFloat(r.cost) || 0), 0); + // Format response const response = { productsToReplenish: parseInt(metrics.products_to_replenish) || 0, unitsToReplenish: parseInt(metrics.total_units_needed) || 0, replenishmentCost: parseFloat(metrics.total_cost) || 0, replenishmentRetail: parseFloat(metrics.total_retail) || 0, + phaseBreakdown: phaseReplenish.filter(r => parseFloat(r.cost) > 0).map(r => ({ + phase: r.phase, + products: parseInt(r.products) || 0, + units: parseInt(r.units) || 0, + cost: parseFloat(r.cost) || 0, + percentage: replenishTotalCost > 0 + ? parseFloat(((parseFloat(r.cost) / replenishTotalCost) * 100).toFixed(1)) + : 0, + })), topVariants: variants.map(v => ({ id: v.pid, title: v.title, @@ -234,165 +288,499 @@ router.get('/replenishment/metrics', async (req, res) => { }); // GET /dashboard/forecast/metrics -// Returns sales forecasts for specified period +// Reads from product_forecasts table (lifecycle-aware forecasting pipeline). +// Falls back to velocity-based projection if forecast table is empty. router.get('/forecast/metrics', async (req, res) => { - // Default to last 30 days if no date range provided const today = new Date(); - const thirtyDaysAgo = new Date(today); - thirtyDaysAgo.setDate(today.getDate() - 30); - - const startDate = req.query.startDate || thirtyDaysAgo.toISOString(); - const endDate = req.query.endDate || today.toISOString(); - + const thirtyDaysOut = new Date(today); + thirtyDaysOut.setDate(today.getDate() + 30); + + const startDate = req.query.startDate ? new Date(req.query.startDate) : today; + const endDate = req.query.endDate ? new Date(req.query.endDate) : thirtyDaysOut; + const startISO = startDate.toISOString().split('T')[0]; + const endISO = endDate.toISOString().split('T')[0]; + const days = Math.max(1, Math.round((endDate - startDate) / (1000 * 60 * 60 * 24))); + try { - // Check if sales_forecasts table exists and has data - const { rows: tableCheck } = await executeQuery(` - SELECT EXISTS ( - SELECT FROM information_schema.tables - WHERE table_schema = 'public' - AND table_name = 'sales_forecasts' - ) as table_exists - `); - - const tableExists = tableCheck[0].table_exists; - - if (!tableExists) { - console.log('sales_forecasts table does not exist, returning dummy data'); - - // Generate dummy data for forecast - const days = 30; - const dummyData = []; - const startDateObj = new Date(startDate); - - for (let i = 0; i < days; i++) { - const currentDate = new Date(startDateObj); - currentDate.setDate(startDateObj.getDate() + i); - - // Use sales data with slight randomization - const baseValue = 500 + Math.random() * 200; - dummyData.push({ - date: currentDate.toISOString().split('T')[0], - revenue: parseFloat((baseValue + Math.random() * 100).toFixed(2)), - confidence: parseFloat((0.7 + Math.random() * 0.2).toFixed(2)) - }); + // Check if product_forecasts has data + const { rows: [countRow] } = await executeQuery( + `SELECT COUNT(*) AS cnt FROM product_forecasts WHERE forecast_date >= $1 LIMIT 1`, + [startISO] + ); + const hasForecastData = parseInt(countRow.cnt) > 0; + + if (hasForecastData) { + // --- Read from lifecycle-aware forecast pipeline --- + + // Find the last date covered by product_forecasts + const { rows: [horizonRow] } = await executeQuery( + `SELECT MAX(forecast_date) AS max_date FROM product_forecasts` + ); + const forecastHorizonISO = horizonRow.max_date instanceof Date + ? horizonRow.max_date.toISOString().split('T')[0] + : horizonRow.max_date; + const forecastHorizon = new Date(forecastHorizonISO + 'T00:00:00'); + const clampedEndISO = endISO <= forecastHorizonISO ? endISO : forecastHorizonISO; + const needsExtrapolation = endISO > forecastHorizonISO; + + // Totals from actual forecast data (clamped to horizon) + const { rows: [totals] } = await executeQuery(` + SELECT + COALESCE(SUM(pf.forecast_units), 0) AS total_units, + COALESCE(SUM(pf.forecast_revenue), 0) AS total_revenue, + COUNT(DISTINCT pf.pid) FILTER ( + WHERE pf.lifecycle_phase IN ('launch','decay','mature','preorder','slow_mover') + ) AS active_products, + COUNT(DISTINCT pf.pid) FILTER ( + WHERE pf.forecast_method = 'lifecycle_curve' + ) AS curve_products + FROM product_forecasts pf + JOIN product_metrics pm ON pm.pid = pf.pid + WHERE pm.is_visible = true + AND pf.forecast_date BETWEEN $1 AND $2 + `, [startISO, clampedEndISO]); + + const active = parseInt(totals.active_products) || 1; + const curveProducts = parseInt(totals.curve_products) || 0; + const confidenceLevel = parseFloat((curveProducts / active).toFixed(2)); + + // Daily series from actual forecast + const { rows: dailyRows } = await executeQuery(` + SELECT pf.forecast_date AS date, + SUM(pf.forecast_units) AS units, + SUM(pf.forecast_revenue) AS revenue + FROM product_forecasts pf + JOIN product_metrics pm ON pm.pid = pf.pid + WHERE pm.is_visible = true + AND pf.forecast_date BETWEEN $1 AND $2 + GROUP BY pf.forecast_date + ORDER BY pf.forecast_date + `, [startISO, clampedEndISO]); + + const dailyForecasts = dailyRows.map(d => ({ + date: d.date instanceof Date ? d.date.toISOString().split('T')[0] : d.date, + units: parseFloat(d.units) || 0, + revenue: parseFloat(d.revenue) || 0, + confidence: confidenceLevel, + })); + + // Daily forecast broken down by lifecycle phase (for stacked chart) + const { rows: dailyPhaseRows } = await executeQuery(` + SELECT pf.forecast_date AS date, + COALESCE(SUM(pf.forecast_revenue) FILTER (WHERE pf.lifecycle_phase = 'preorder'), 0) AS preorder, + COALESCE(SUM(pf.forecast_revenue) FILTER (WHERE pf.lifecycle_phase = 'launch'), 0) AS launch, + COALESCE(SUM(pf.forecast_revenue) FILTER (WHERE pf.lifecycle_phase = 'decay'), 0) AS decay, + COALESCE(SUM(pf.forecast_revenue) FILTER (WHERE pf.lifecycle_phase = 'mature'), 0) AS mature, + COALESCE(SUM(pf.forecast_revenue) FILTER (WHERE pf.lifecycle_phase = 'slow_mover'), 0) AS slow_mover, + COALESCE(SUM(pf.forecast_revenue) FILTER (WHERE pf.lifecycle_phase = 'dormant'), 0) AS dormant + FROM product_forecasts pf + JOIN product_metrics pm ON pm.pid = pf.pid + WHERE pm.is_visible = true + AND pf.forecast_date BETWEEN $1 AND $2 + GROUP BY pf.forecast_date + ORDER BY pf.forecast_date + `, [startISO, clampedEndISO]); + + // --- New product pipeline contribution --- + // Average daily revenue from new product introductions (last 12 months). + // Only used for EXTRAPOLATED days beyond the forecast horizon — within the + // 90-day horizon, preorder/launch products are already forecast by lifecycle curves. + const { rows: [pipeline] } = await executeQuery(` + SELECT + COALESCE(AVG(monthly_revenue), 0) AS avg_monthly_revenue + FROM ( + SELECT DATE_TRUNC('month', pm.date_first_received) AS month, + COUNT(*) AS monthly_products, + SUM(pm.first_30_days_revenue) AS monthly_revenue + FROM product_metrics pm + WHERE pm.is_visible = true + AND pm.date_first_received >= NOW() - INTERVAL '12 months' + AND pm.date_first_received < DATE_TRUNC('month', NOW()) + GROUP BY 1 + ) sub + `); + // Compute average product price for converting revenue to unit estimates + const { rows: [priceRow] } = await executeQuery(` + SELECT COALESCE(AVG(current_price) FILTER (WHERE current_price > 0 AND sales_30d > 0), 7) AS avg_price + FROM product_metrics + WHERE is_visible = true + `); + const avgPrice = parseFloat(priceRow.avg_price) || 7; + + // Daily new-product revenue = (avg products/month × avg 30d revenue/product) / 30 + const avgMonthlyRevenue = parseFloat(pipeline.avg_monthly_revenue) || 0; + const newProductDailyRevenue = avgMonthlyRevenue / 30; + const newProductDailyUnits = newProductDailyRevenue / avgPrice; + + let totalRevenue = dailyForecasts.reduce((sum, d) => sum + d.revenue, 0); + let totalUnits = dailyForecasts.reduce((sum, d) => sum + d.units, 0); + + // --- Extrapolation beyond forecast horizon (rest-of-year) --- + if (needsExtrapolation) { + // Monthly seasonal indices from last 12 months of actual revenue + const { rows: seasonalRows } = await executeQuery(` + SELECT EXTRACT(MONTH FROM o.date)::int AS month, + SUM(o.quantity * o.price) AS revenue + FROM orders o + WHERE o.canceled IS DISTINCT FROM TRUE + AND o.date >= NOW() - INTERVAL '12 months' + GROUP BY 1 + `); + const monthlyRevenue = {}; + let totalMonthlyRev = 0; + for (const r of seasonalRows) { + monthlyRevenue[r.month] = parseFloat(r.revenue) || 0; + totalMonthlyRev += monthlyRevenue[r.month]; + } + const avgMonthRev = totalMonthlyRev / Math.max(Object.keys(monthlyRevenue).length, 1); + const seasonalIndex = {}; + for (let m = 1; m <= 12; m++) { + seasonalIndex[m] = monthlyRevenue[m] ? monthlyRevenue[m] / avgMonthRev : 1.0; + } + + // Baseline: avg daily revenue from last 7 days of forecast (mature tail) + const tailDays = dailyForecasts.slice(-7); + const baselineDaily = tailDays.length > 0 + ? tailDays.reduce((s, d) => s + d.revenue, 0) / tailDays.length + : 0; + + // Generate estimated days beyond horizon + const extraStart = new Date(forecastHorizon); + extraStart.setDate(extraStart.getDate() + 1); + const extraEnd = new Date(endISO + 'T00:00:00'); + + for (let d = new Date(extraStart); d <= extraEnd; d.setDate(d.getDate() + 1)) { + const month = d.getMonth() + 1; + const seasonal = seasonalIndex[month] || 1.0; + // Beyond horizon: existing product tail + new product pipeline + const estRevenue = baselineDaily * seasonal + newProductDailyRevenue; + const estUnits = (baselineDaily * seasonal) / avgPrice + newProductDailyUnits; + + dailyForecasts.push({ + date: d.toISOString().split('T')[0], + units: parseFloat(estUnits.toFixed(1)), + revenue: parseFloat(estRevenue.toFixed(2)), + confidence: 0, // lower confidence for extrapolated data + estimated: true, + }); + totalRevenue += estRevenue; + totalUnits += estUnits; + } } - - // Return dummy response - const response = { - forecastSales: 500, - forecastRevenue: 25000, - confidenceLevel: 0.85, - dailyForecasts: dummyData, - categoryForecasts: [ - { category: "Electronics", units: 120, revenue: 6000, confidence: 0.9 }, - { category: "Clothing", units: 80, revenue: 4000, confidence: 0.8 }, - { category: "Home Goods", units: 150, revenue: 7500, confidence: 0.75 }, - { category: "Others", units: 150, revenue: 7500, confidence: 0.7 } - ] - }; - - return res.json(response); - } - - // If the table exists, try to query it with proper error handling - try { - // Get summary metrics - const { rows: metrics } = await executeQuery(` - SELECT - COALESCE(SUM(forecast_units), 0) as total_forecast_units, - COALESCE(SUM(forecast_revenue), 0) as total_forecast_revenue, - COALESCE(AVG(confidence_level), 0) as overall_confidence - FROM sales_forecasts - WHERE forecast_date BETWEEN $1 AND $2 - `, [startDate, endDate]); - // Get daily forecasts - const { rows: dailyForecasts } = await executeQuery(` - SELECT - DATE(forecast_date) as date, - COALESCE(SUM(forecast_revenue), 0) as revenue, - COALESCE(AVG(confidence_level), 0) as confidence - FROM sales_forecasts - WHERE forecast_date BETWEEN $1 AND $2 - GROUP BY DATE(forecast_date) - ORDER BY date - `, [startDate, endDate]); - - // Get category forecasts - const { rows: categoryForecasts } = await executeQuery(` - SELECT - c.name as category, - COALESCE(SUM(cf.forecast_units), 0) as units, - COALESCE(SUM(cf.forecast_revenue), 0) as revenue, - COALESCE(AVG(cf.confidence_level), 0) as confidence - FROM category_forecasts cf - JOIN categories c ON cf.category_id = c.cat_id - WHERE cf.forecast_date BETWEEN $1 AND $2 - GROUP BY c.cat_id, c.name + // Lifecycle phase breakdown (from actual forecast data only) + const { rows: phaseRows } = await executeQuery(` + SELECT pf.lifecycle_phase AS phase, + COUNT(DISTINCT pf.pid) AS products, + COALESCE(SUM(pf.forecast_units), 0) AS units, + COALESCE(SUM(pf.forecast_revenue), 0) AS revenue + FROM product_forecasts pf + JOIN product_metrics pm ON pm.pid = pf.pid + WHERE pm.is_visible = true + AND pf.forecast_date BETWEEN $1 AND $2 + GROUP BY pf.lifecycle_phase ORDER BY revenue DESC - `, [startDate, endDate]); + `, [startISO, clampedEndISO]); - // Format response - const response = { - forecastSales: parseInt(metrics[0]?.total_forecast_units) || 0, - forecastRevenue: parseFloat(metrics[0]?.total_forecast_revenue) || 0, - confidenceLevel: parseFloat(metrics[0]?.overall_confidence) || 0, - dailyForecasts: dailyForecasts.map(d => ({ - date: d.date, - revenue: parseFloat(d.revenue) || 0, - confidence: parseFloat(d.confidence) || 0 - })), - categoryForecasts: categoryForecasts.map(c => ({ - category: c.category, - units: parseInt(c.units) || 0, - revenue: parseFloat(c.revenue) || 0, - confidence: parseFloat(c.confidence) || 0 - })) - }; + const phaseTotal = phaseRows.reduce((s, r) => s + (parseFloat(r.revenue) || 0), 0); + const phaseBreakdown = phaseRows + .filter(r => parseFloat(r.revenue) > 0) + .map(r => ({ + phase: r.phase, + products: parseInt(r.products) || 0, + units: Math.round(parseFloat(r.units) || 0), + revenue: parseFloat(parseFloat(r.revenue).toFixed(2)), + percentage: phaseTotal > 0 + ? parseFloat(((parseFloat(r.revenue) / phaseTotal) * 100).toFixed(1)) + : 0, + })); - res.json(response); - } catch (err) { - console.error('Error with forecast tables structure, returning dummy data:', err); - - // Generate dummy data for forecast as fallback - const days = 30; - const dummyData = []; - const startDateObj = new Date(startDate); - - for (let i = 0; i < days; i++) { - const currentDate = new Date(startDateObj); - currentDate.setDate(startDateObj.getDate() + i); - - const baseValue = 500 + Math.random() * 200; - dummyData.push({ - date: currentDate.toISOString().split('T')[0], - revenue: parseFloat((baseValue + Math.random() * 100).toFixed(2)), - confidence: parseFloat((0.7 + Math.random() * 0.2).toFixed(2)) - }); + // Category breakdown (from actual forecast data only) + const { rows: categoryRows } = await executeQuery(` + WITH product_root_category AS ( + SELECT DISTINCT ON (pf.pid) + pf.pid, ch.name AS category + FROM product_forecasts pf + JOIN product_metrics pm ON pm.pid = pf.pid + JOIN product_categories pc ON pc.pid = pf.pid + JOIN category_hierarchy ch ON ch.cat_id = pc.cat_id AND ch.level = 0 + WHERE pm.is_visible = true + AND ch.name NOT IN ('Deals', 'Black Friday') + AND pf.forecast_date BETWEEN $1 AND $2 + ORDER BY pf.pid, ch.name + ) + SELECT prc.category, + SUM(pf.forecast_units) AS units, + SUM(pf.forecast_revenue) AS revenue + FROM product_forecasts pf + JOIN product_root_category prc ON prc.pid = pf.pid + WHERE pf.forecast_date BETWEEN $1 AND $2 + GROUP BY prc.category + ORDER BY revenue DESC + LIMIT 8 + `, [startISO, clampedEndISO]); + + const dailyForecastsByPhase = dailyPhaseRows.map(d => ({ + date: d.date instanceof Date ? d.date.toISOString().split('T')[0] : d.date, + preorder: parseFloat(d.preorder) || 0, + launch: parseFloat(d.launch) || 0, + decay: parseFloat(d.decay) || 0, + mature: parseFloat(d.mature) || 0, + slow_mover: parseFloat(d.slow_mover) || 0, + dormant: parseFloat(d.dormant) || 0, + })); + + // Add extrapolated days to phase series (distribute proportionally using last phase ratios) + if (needsExtrapolation && dailyForecastsByPhase.length > 0) { + const lastPhaseDay = dailyForecastsByPhase[dailyForecastsByPhase.length - 1]; + const phases = ['preorder', 'launch', 'decay', 'mature', 'slow_mover', 'dormant']; + const lastTotal = phases.reduce((s, p) => s + lastPhaseDay[p], 0); + const phaseRatios = {}; + for (const p of phases) { + phaseRatios[p] = lastTotal > 0 ? lastPhaseDay[p] / lastTotal : 1 / phases.length; + } + // Match extrapolated days from dailyForecasts + for (let i = dailyForecastsByPhase.length; i < dailyForecasts.length; i++) { + const dayRev = dailyForecasts[i].revenue; + const entry = { date: dailyForecasts[i].date }; + for (const p of phases) { + entry[p] = parseFloat((dayRev * phaseRatios[p]).toFixed(2)); + } + dailyForecastsByPhase.push(entry); + } } - - // Return dummy response - const response = { - forecastSales: 500, - forecastRevenue: 25000, - confidenceLevel: 0.85, - dailyForecasts: dummyData, - categoryForecasts: [ - { category: "Electronics", units: 120, revenue: 6000, confidence: 0.9 }, - { category: "Clothing", units: 80, revenue: 4000, confidence: 0.8 }, - { category: "Home Goods", units: 150, revenue: 7500, confidence: 0.75 }, - { category: "Others", units: 150, revenue: 7500, confidence: 0.7 } - ] - }; - - res.json(response); + + return res.json({ + forecastSales: Math.round(totalUnits), + forecastRevenue: totalRevenue.toFixed(2), + confidenceLevel, + dailyForecasts, + dailyForecastsByPhase, + phaseBreakdown, + categoryForecasts: categoryRows.map(c => ({ + category: c.category, + units: Math.round(parseFloat(c.units)), + revenue: parseFloat(parseFloat(c.revenue).toFixed(2)), + })), + }); } + + // --- Fallback: velocity-based projection (no forecast data yet) --- + const { rows: [totals] } = await executeQuery(` + SELECT + COALESCE(SUM(sales_velocity_daily), 0) AS daily_units, + COALESCE(SUM(sales_velocity_daily * current_price), 0) AS daily_revenue, + COUNT(*) FILTER (WHERE sales_velocity_daily > 0) AS active_products + FROM product_metrics + WHERE is_visible = true AND sales_velocity_daily > 0 + `); + + const dailyUnits = parseFloat(totals.daily_units) || 0; + const dailyRevenue = parseFloat(totals.daily_revenue) || 0; + + const dailyForecasts = []; + for (let i = 0; i < days; i++) { + const d = new Date(startDate); + d.setDate(startDate.getDate() + i); + dailyForecasts.push({ + date: d.toISOString().split('T')[0], + units: parseFloat(dailyUnits.toFixed(1)), + revenue: parseFloat(dailyRevenue.toFixed(2)), + confidence: 0, + }); + } + + const { rows: categoryRows } = await executeQuery(` + WITH product_root_category AS ( + SELECT DISTINCT ON (pm.pid) pm.pid, + pm.sales_velocity_daily, pm.current_price, + ch.name AS category + FROM product_metrics pm + JOIN product_categories pc ON pc.pid = pm.pid + JOIN category_hierarchy ch ON ch.cat_id = pc.cat_id AND ch.level = 0 + WHERE pm.is_visible = true AND pm.sales_velocity_daily > 0 + AND ch.name NOT IN ('Deals', 'Black Friday') + ORDER BY pm.pid, ch.name + ) + SELECT category, + ROUND(SUM(sales_velocity_daily)::numeric, 1) AS daily_units, + ROUND(SUM(sales_velocity_daily * current_price)::numeric, 2) AS daily_revenue + FROM product_root_category + GROUP BY category ORDER BY daily_revenue DESC LIMIT 8 + `); + + res.json({ + forecastSales: Math.round(dailyUnits * days), + forecastRevenue: (dailyRevenue * days).toFixed(2), + confidenceLevel: 0, + dailyForecasts, + categoryForecasts: categoryRows.map(c => ({ + category: c.category, + units: Math.round(parseFloat(c.daily_units) * days), + revenue: parseFloat((parseFloat(c.daily_revenue) * days).toFixed(2)), + })), + }); } catch (err) { console.error('Error fetching forecast metrics:', err); res.status(500).json({ error: 'Failed to fetch forecast metrics' }); } }); +// GET /dashboard/forecast/accuracy +// Returns forecast accuracy metrics computed by the forecast engine. +// Reads from forecast_accuracy table (populated after each forecast run). +router.get('/forecast/accuracy', async (req, res) => { + try { + // Check if forecast_accuracy table exists and has data + const { rows: [tableCheck] } = await executeQuery(` + SELECT EXISTS ( + SELECT 1 FROM information_schema.tables + WHERE table_name = 'forecast_accuracy' + ) AS exists + `); + + if (!tableCheck.exists) { + return res.json({ hasData: false, message: 'Accuracy data not yet available' }); + } + + // Get the latest run that has accuracy data + const { rows: runRows } = await executeQuery(` + SELECT DISTINCT fa.run_id, fr.finished_at + FROM forecast_accuracy fa + JOIN forecast_runs fr ON fr.id = fa.run_id + ORDER BY fr.finished_at DESC + LIMIT 1 + `); + + if (runRows.length === 0) { + return res.json({ hasData: false, message: 'No accuracy data computed yet' }); + } + + const latestRunId = runRows[0].run_id; + const computedAt = runRows[0].finished_at; + + // Count days of history available + const { rows: [historyInfo] } = await executeQuery(` + SELECT + COUNT(DISTINCT forecast_date) AS days_of_history, + MIN(forecast_date) AS earliest_date, + MAX(forecast_date) AS latest_date + FROM product_forecasts_history + `); + + // Fetch all accuracy metrics for the latest run + const { rows: metrics } = await executeQuery(` + SELECT metric_type, dimension_value, sample_size, + total_actual_units, total_forecast_units, + mae, wmape, bias, rmse + FROM forecast_accuracy + WHERE run_id = $1 + ORDER BY metric_type, dimension_value + `, [latestRunId]); + + // Organize into response structure + const overall = metrics.find(m => m.metric_type === 'overall'); + const byPhase = metrics + .filter(m => m.metric_type === 'by_phase') + .map(m => ({ + phase: m.dimension_value, + sampleSize: parseInt(m.sample_size), + totalActual: parseFloat(m.total_actual_units) || 0, + totalForecast: parseFloat(m.total_forecast_units) || 0, + mae: m.mae != null ? parseFloat(parseFloat(m.mae).toFixed(4)) : null, + wmape: m.wmape != null ? parseFloat((parseFloat(m.wmape) * 100).toFixed(1)) : null, + bias: m.bias != null ? parseFloat(parseFloat(m.bias).toFixed(4)) : null, + rmse: m.rmse != null ? parseFloat(parseFloat(m.rmse).toFixed(4)) : null, + })) + .sort((a, b) => (b.totalActual || 0) - (a.totalActual || 0)); + + const byLeadTime = metrics + .filter(m => m.metric_type === 'by_lead_time') + .map(m => ({ + bucket: m.dimension_value, + sampleSize: parseInt(m.sample_size), + mae: m.mae != null ? parseFloat(parseFloat(m.mae).toFixed(4)) : null, + wmape: m.wmape != null ? parseFloat((parseFloat(m.wmape) * 100).toFixed(1)) : null, + bias: m.bias != null ? parseFloat(parseFloat(m.bias).toFixed(4)) : null, + rmse: m.rmse != null ? parseFloat(parseFloat(m.rmse).toFixed(4)) : null, + })) + .sort((a, b) => { + const order = { '1-7d': 0, '8-14d': 1, '15-30d': 2, '31-60d': 3, '61-90d': 4 }; + return (order[a.bucket] ?? 99) - (order[b.bucket] ?? 99); + }); + + const byMethod = metrics + .filter(m => m.metric_type === 'by_method') + .map(m => ({ + method: m.dimension_value, + sampleSize: parseInt(m.sample_size), + mae: m.mae != null ? parseFloat(parseFloat(m.mae).toFixed(4)) : null, + wmape: m.wmape != null ? parseFloat((parseFloat(m.wmape) * 100).toFixed(1)) : null, + bias: m.bias != null ? parseFloat(parseFloat(m.bias).toFixed(4)) : null, + })); + + const dailyTrend = metrics + .filter(m => m.metric_type === 'daily') + .map(m => ({ + date: m.dimension_value, + mae: m.mae != null ? parseFloat(parseFloat(m.mae).toFixed(4)) : null, + wmape: m.wmape != null ? parseFloat((parseFloat(m.wmape) * 100).toFixed(1)) : null, + bias: m.bias != null ? parseFloat(parseFloat(m.bias).toFixed(4)) : null, + })) + .sort((a, b) => a.date.localeCompare(b.date)); + + // Historical accuracy trend (across runs) + const { rows: trendRows } = await executeQuery(` + SELECT fa.run_id, fr.finished_at::date AS run_date, + fa.mae, fa.wmape, fa.bias, fa.rmse, fa.sample_size + FROM forecast_accuracy fa + JOIN forecast_runs fr ON fr.id = fa.run_id + WHERE fa.metric_type = 'overall' + AND fa.dimension_value = 'all' + ORDER BY fr.finished_at + `); + + const accuracyTrend = trendRows.map(r => ({ + date: r.run_date instanceof Date ? r.run_date.toISOString().split('T')[0] : r.run_date, + mae: r.mae != null ? parseFloat(parseFloat(r.mae).toFixed(4)) : null, + wmape: r.wmape != null ? parseFloat((parseFloat(r.wmape) * 100).toFixed(1)) : null, + bias: r.bias != null ? parseFloat(parseFloat(r.bias).toFixed(4)) : null, + sampleSize: parseInt(r.sample_size), + })); + + res.json({ + hasData: true, + computedAt, + daysOfHistory: parseInt(historyInfo.days_of_history) || 0, + historyRange: { + from: historyInfo.earliest_date instanceof Date + ? historyInfo.earliest_date.toISOString().split('T')[0] + : historyInfo.earliest_date, + to: historyInfo.latest_date instanceof Date + ? historyInfo.latest_date.toISOString().split('T')[0] + : historyInfo.latest_date, + }, + overall: overall ? { + sampleSize: parseInt(overall.sample_size), + totalActual: parseFloat(overall.total_actual_units) || 0, + totalForecast: parseFloat(overall.total_forecast_units) || 0, + mae: overall.mae != null ? parseFloat(parseFloat(overall.mae).toFixed(4)) : null, + wmape: overall.wmape != null ? parseFloat((parseFloat(overall.wmape) * 100).toFixed(1)) : null, + bias: overall.bias != null ? parseFloat(parseFloat(overall.bias).toFixed(4)) : null, + rmse: overall.rmse != null ? parseFloat(parseFloat(overall.rmse).toFixed(4)) : null, + } : null, + byPhase, + byLeadTime, + byMethod, + dailyTrend, + accuracyTrend, + }); + } catch (err) { + console.error('Error fetching forecast accuracy:', err); + res.status(500).json({ error: 'Failed to fetch forecast accuracy' }); + } +}); + // GET /dashboard/overstock/metrics // Returns overstock metrics by category router.get('/overstock/metrics', async (req, res) => { @@ -427,7 +815,7 @@ router.get('/overstock/metrics', async (req, res) => { // Get category breakdowns separately const { rows: categoryData } = await executeQuery(` - SELECT + SELECT c.name as category_name, COUNT(DISTINCT pm.pid)::integer as overstocked_products, SUM(pm.overstocked_units)::integer as total_excess_units, @@ -443,6 +831,22 @@ router.get('/overstock/metrics', async (req, res) => { LIMIT 8 `); + // Overstock breakdown by lifecycle phase + const { rows: phaseOverstock } = await executeQuery(` + SELECT + COALESCE(pm.lifecycle_phase, 'unknown') AS phase, + COUNT(DISTINCT pm.pid)::integer AS products, + COALESCE(SUM(pm.overstocked_units), 0)::integer AS units, + ROUND(COALESCE(SUM(pm.overstocked_cost), 0)::numeric, 2) AS cost, + ROUND(COALESCE(SUM(pm.overstocked_retail), 0)::numeric, 2) AS retail + FROM product_metrics pm + WHERE pm.status = 'Overstock' AND pm.is_visible = true + AND COALESCE(pm.preorder_count, 0) = 0 + GROUP BY pm.lifecycle_phase + ORDER BY cost DESC + `); + const overstockPhaseTotalCost = phaseOverstock.reduce((s, r) => s + (parseFloat(r.cost) || 0), 0); + // Format response with explicit type conversion const response = { overstockedProducts: parseInt(summaryMetrics.total_overstocked) || 0, @@ -455,7 +859,17 @@ router.get('/overstock/metrics', async (req, res) => { units: parseInt(cat.total_excess_units) || 0, cost: parseFloat(cat.total_excess_cost) || 0, retail: parseFloat(cat.total_excess_retail) || 0 - })) + })), + phaseBreakdown: phaseOverstock.filter(r => parseFloat(r.cost) > 0).map(r => ({ + phase: r.phase, + products: parseInt(r.products) || 0, + units: parseInt(r.units) || 0, + cost: parseFloat(r.cost) || 0, + retail: parseFloat(r.retail) || 0, + percentage: overstockPhaseTotalCost > 0 + ? parseFloat(((parseFloat(r.cost) / overstockPhaseTotalCost) * 100).toFixed(1)) + : 0, + })), }; res.json(response); @@ -600,7 +1014,7 @@ router.get('/sales/metrics', async (req, res) => { // Get overall metrics for the period const { rows: [metrics] } = await executeQuery(` - SELECT + SELECT COUNT(DISTINCT order_number) as total_orders, SUM(quantity) as total_units, SUM(price * quantity) as total_revenue, @@ -610,6 +1024,40 @@ router.get('/sales/metrics', async (req, res) => { AND canceled = false `, [startDate, endDate]); + // Sales breakdown by lifecycle phase + const { rows: phaseSales } = await executeQuery(` + SELECT + COALESCE(pm.lifecycle_phase, 'unknown') AS phase, + COUNT(DISTINCT o.order_number)::integer AS orders, + COALESCE(SUM(o.quantity), 0)::integer AS units, + ROUND(COALESCE(SUM(o.price * o.quantity), 0)::numeric, 2) AS revenue, + ROUND(COALESCE(SUM(o.costeach * o.quantity), 0)::numeric, 2) AS cogs + FROM orders o + LEFT JOIN product_metrics pm ON o.pid = pm.pid + WHERE o.date BETWEEN $1 AND $2 AND o.canceled = false + GROUP BY pm.lifecycle_phase + ORDER BY revenue DESC + `, [startDate, endDate]); + const salePhaseTotalRev = phaseSales.reduce((s, r) => s + (parseFloat(r.revenue) || 0), 0); + + // Daily sales broken down by lifecycle phase (for stacked chart) + const { rows: dailyPhaseRows } = await executeQuery(` + SELECT + DATE(o.date) AS sale_date, + COALESCE(SUM(o.price * o.quantity) FILTER (WHERE COALESCE(pm.lifecycle_phase, 'unknown') = 'preorder'), 0) AS preorder, + COALESCE(SUM(o.price * o.quantity) FILTER (WHERE COALESCE(pm.lifecycle_phase, 'unknown') = 'launch'), 0) AS launch, + COALESCE(SUM(o.price * o.quantity) FILTER (WHERE COALESCE(pm.lifecycle_phase, 'unknown') = 'decay'), 0) AS decay, + COALESCE(SUM(o.price * o.quantity) FILTER (WHERE COALESCE(pm.lifecycle_phase, 'unknown') = 'mature'), 0) AS mature, + COALESCE(SUM(o.price * o.quantity) FILTER (WHERE COALESCE(pm.lifecycle_phase, 'unknown') = 'slow_mover'), 0) AS slow_mover, + COALESCE(SUM(o.price * o.quantity) FILTER (WHERE COALESCE(pm.lifecycle_phase, 'unknown') = 'dormant'), 0) AS dormant, + COALESCE(SUM(o.price * o.quantity) FILTER (WHERE pm.lifecycle_phase IS NULL), 0) AS unknown + FROM orders o + LEFT JOIN product_metrics pm ON o.pid = pm.pid + WHERE o.date BETWEEN $1 AND $2 AND o.canceled = false + GROUP BY DATE(o.date) + ORDER BY sale_date + `, [startDate, endDate]); + const response = { totalOrders: parseInt(metrics?.total_orders) || 0, totalUnitsSold: parseInt(metrics?.total_units) || 0, @@ -620,7 +1068,27 @@ router.get('/sales/metrics', async (req, res) => { units: parseInt(day.total_units) || 0, revenue: parseFloat(day.total_revenue) || 0, cogs: parseFloat(day.total_cogs) || 0 - })) + })), + dailySalesByPhase: dailyPhaseRows.map(d => ({ + date: d.sale_date, + preorder: parseFloat(d.preorder) || 0, + launch: parseFloat(d.launch) || 0, + decay: parseFloat(d.decay) || 0, + mature: parseFloat(d.mature) || 0, + slow_mover: parseFloat(d.slow_mover) || 0, + dormant: parseFloat(d.dormant) || 0, + unknown: parseFloat(d.unknown) || 0, + })), + phaseBreakdown: phaseSales.filter(r => parseFloat(r.revenue) > 0).map(r => ({ + phase: r.phase, + orders: parseInt(r.orders) || 0, + units: parseInt(r.units) || 0, + revenue: parseFloat(r.revenue) || 0, + cogs: parseFloat(r.cogs) || 0, + percentage: salePhaseTotalRev > 0 + ? parseFloat(((parseFloat(r.revenue) / salePhaseTotalRev) * 100).toFixed(1)) + : 0, + })), }; res.json(response); diff --git a/inventory-server/src/routes/products.js b/inventory-server/src/routes/products.js index 6397c2c..72f3b5e 100644 --- a/inventory-server/src/routes/products.js +++ b/inventory-server/src/routes/products.js @@ -782,4 +782,49 @@ router.get('/:id/time-series', async (req, res) => { } }); +// GET /products/:id/forecast +// Returns the 90-day daily forecast for a single product from product_forecasts +router.get('/:id/forecast', async (req, res) => { + const { id } = req.params; + try { + const pool = req.app.locals.pool; + + const { rows } = await pool.query(` + SELECT + forecast_date AS date, + forecast_units AS units, + forecast_revenue AS revenue, + lifecycle_phase AS phase, + forecast_method AS method, + confidence_lower, + confidence_upper + FROM product_forecasts + WHERE pid = $1 + ORDER BY forecast_date + `, [id]); + + if (rows.length === 0) { + return res.json({ forecast: [], phase: null, method: null }); + } + + const phase = rows[0].phase; + const method = rows[0].method; + + res.json({ + phase, + method, + forecast: rows.map(r => ({ + date: r.date instanceof Date ? r.date.toISOString().split('T')[0] : r.date, + units: parseFloat(r.units) || 0, + revenue: parseFloat(r.revenue) || 0, + confidenceLower: parseFloat(r.confidence_lower) || 0, + confidenceUpper: parseFloat(r.confidence_upper) || 0, + })), + }); + } catch (error) { + console.error('Error fetching product forecast:', error); + res.status(500).json({ error: 'Failed to fetch product forecast' }); + } +}); + module.exports = router; diff --git a/inventory/src/components/overview/BestSellers.tsx b/inventory/src/components/overview/BestSellers.tsx index c45b4e9..ba905d3 100644 --- a/inventory/src/components/overview/BestSellers.tsx +++ b/inventory/src/components/overview/BestSellers.tsx @@ -79,7 +79,7 @@ export function BestSellers() { ) : ( <> - + diff --git a/inventory/src/components/overview/ForecastAccuracy.tsx b/inventory/src/components/overview/ForecastAccuracy.tsx new file mode 100644 index 0000000..f3a9a18 --- /dev/null +++ b/inventory/src/components/overview/ForecastAccuracy.tsx @@ -0,0 +1,294 @@ +import { useQuery } from "@tanstack/react-query" +import { BarChart, Bar, ResponsiveContainer, XAxis, YAxis, Tooltip as RechartsTooltip, Cell, LineChart, Line } from "recharts" +import config from "@/config" +import { Target, TrendingDown, ArrowUpDown } from "lucide-react" +import { Tooltip as UITooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/components/ui/tooltip" +import { PHASE_CONFIG } from "@/utils/lifecyclePhases" + +interface OverallMetrics { + sampleSize: number + totalActual: number + totalForecast: number + mae: number | null + wmape: number | null + bias: number | null + rmse: number | null +} + +interface PhaseAccuracy { + phase: string + sampleSize: number + totalActual: number + totalForecast: number + mae: number | null + wmape: number | null + bias: number | null + rmse: number | null +} + +interface LeadTimeAccuracy { + bucket: string + sampleSize: number + mae: number | null + wmape: number | null + bias: number | null + rmse: number | null +} + +interface AccuracyTrendPoint { + date: string + mae: number | null + wmape: number | null + bias: number | null + sampleSize: number +} + +interface AccuracyData { + hasData: boolean + message?: string + computedAt?: string + daysOfHistory?: number + historyRange?: { from: string; to: string } + overall?: OverallMetrics + byPhase?: PhaseAccuracy[] + byLeadTime?: LeadTimeAccuracy[] + byMethod?: { method: string; sampleSize: number; mae: number | null; wmape: number | null; bias: number | null }[] + dailyTrend?: { date: string; mae: number | null; wmape: number | null; bias: number | null }[] + accuracyTrend?: AccuracyTrendPoint[] +} + +function MetricSkeleton() { + return
; +} + +function formatWmape(wmape: number | null): string { + if (wmape === null) return "N/A" + return `${wmape.toFixed(1)}%` +} + +function formatBias(bias: number | null): string { + if (bias === null) return "N/A" + const sign = bias > 0 ? "+" : "" + return `${sign}${bias.toFixed(3)}` +} + +function getAccuracyColor(wmape: number | null): string { + if (wmape === null) return "text-muted-foreground" + if (wmape <= 30) return "text-green-600" + if (wmape <= 50) return "text-yellow-600" + return "text-red-600" +} + +export function ForecastAccuracy() { + const { data, error, isLoading } = useQuery({ + queryKey: ["forecast-accuracy"], + queryFn: async () => { + const response = await fetch(`${config.apiUrl}/dashboard/forecast/accuracy`) + if (!response.ok) { + throw new Error("Failed to fetch forecast accuracy") + } + return response.json() + }, + refetchInterval: 5 * 60 * 1000, + }) + + if (error) { + return ( +
+

Forecast Accuracy

+

Failed to load accuracy data

+
+ ) + } + + if (!isLoading && data && !data.hasData) { + return ( +
+

Forecast Accuracy

+

+ Accuracy data will be available after the forecast engine has run for at least 2 days, + building up historical comparisons between predictions and actual sales. +

+
+ ) + } + + const phaseChartData = (data?.byPhase || []) + .filter(p => p.wmape !== null && p.phase !== 'dormant') + .map(p => ({ + phase: PHASE_CONFIG[p.phase]?.label || p.phase, + rawPhase: p.phase, + wmape: p.wmape, + mae: p.mae, + bias: p.bias, + sampleSize: p.sampleSize, + })) + .sort((a, b) => (a.wmape ?? 100) - (b.wmape ?? 100)) + + const leadTimeData = (data?.byLeadTime || []).map(lt => ({ + bucket: lt.bucket, + wmape: lt.wmape, + mae: lt.mae, + sampleSize: lt.sampleSize, + })) + + return ( +
+

Forecast Accuracy

+ {isLoading ? ( +
+ + +
+ ) : ( + <> + {/* Headline metrics */} +
+
+
+ +

WMAPE

+
+

+ {formatWmape(data?.overall?.wmape ?? null)} +

+
+
+
+ +

MAE

+
+

+ {data?.overall?.mae !== null ? data?.overall?.mae?.toFixed(2) : "N/A"} + units +

+
+
+
+ +

Bias

+
+

+ {formatBias(data?.overall?.bias ?? null)} + + {(data?.overall?.bias ?? 0) > 0 ? "over" : (data?.overall?.bias ?? 0) < 0 ? "under" : ""} + +

+
+
+ + {/* Phase accuracy bar */} + {phaseChartData.length > 0 && ( +
+

WMAPE by Lifecycle Phase

+ +
+ {phaseChartData.map((p) => { + const cfg = PHASE_CONFIG[p.rawPhase] || { label: p.phase, color: "#94A3B8" } + const maxWmape = Math.max(...phaseChartData.map(d => d.wmape ?? 0), 1) + const barWidth = ((p.wmape ?? 0) / maxWmape) * 100 + return ( + + +
+ {cfg.label} +
+
0 ? 4 : 0, + }} + /> +
+ + {formatWmape(p.wmape)} + +
+ + +
{cfg.label}
+
WMAPE: {formatWmape(p.wmape)}
+
MAE: {p.mae?.toFixed(3) ?? "N/A"} units
+
Bias: {formatBias(p.bias)}
+
{p.sampleSize.toLocaleString()} samples
+
+ + ) + })} +
+ +
+ )} + + {/* Lead time accuracy chart */} + {leadTimeData.length > 0 && ( +
+

Accuracy by Lead Time

+
+ + + + `${v}%`} + /> + [`${value?.toFixed(1)}%`, "WMAPE"]} + /> + + {leadTimeData.map((entry, index) => ( + + ))} + + + +
+
+ )} + + {/* Accuracy trend sparkline */} + {data?.accuracyTrend && data.accuracyTrend.length > 1 && ( +
+

Accuracy Trend (WMAPE)

+
+ + + + + + +
+
+ )} + + {/* Footer info */} + {data?.daysOfHistory !== undefined && ( +

+ Based on {data.daysOfHistory} day{data.daysOfHistory !== 1 ? "s" : ""} of history + {data.overall?.sampleSize ? ` (${data.overall.sampleSize.toLocaleString()} samples)` : ""} +

+ )} + + )} +
+ ) +} diff --git a/inventory/src/components/overview/ForecastMetrics.tsx b/inventory/src/components/overview/ForecastMetrics.tsx index 8669792..fb4a9e8 100644 --- a/inventory/src/components/overview/ForecastMetrics.tsx +++ b/inventory/src/components/overview/ForecastMetrics.tsx @@ -1,13 +1,46 @@ import { useQuery } from "@tanstack/react-query" import { CardHeader, CardTitle, CardContent } from "@/components/ui/card" -import { AreaChart, Area, ResponsiveContainer, XAxis, YAxis, Tooltip } from "recharts" +import { AreaChart, Area, ResponsiveContainer, XAxis, YAxis, Tooltip as RechartsTooltip } from "recharts" import { useState } from "react" import config from "@/config" import { formatCurrency } from "@/utils/formatCurrency" -import { TrendingUp, DollarSign } from "lucide-react" -import { DateRange } from "react-day-picker" +import { TrendingUp, DollarSign, Target } from "lucide-react" +import { Tooltip as UITooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/components/ui/tooltip" +import { Popover, PopoverContent, PopoverTrigger } from "@/components/ui/popover" +import { Tabs, TabsList, TabsTrigger } from "@/components/ui/tabs" +import { Button } from "@/components/ui/button" +import { ForecastAccuracy } from "@/components/overview/ForecastAccuracy" import { addDays, format } from "date-fns" -import { DateRangePicker } from "@/components/ui/date-range-picker-narrow" +import { PHASE_CONFIG, PHASE_KEYS } from "@/utils/lifecyclePhases" + +function MetricSkeleton() { + return
; +} + +type Period = 30 | 90 | 'year'; + +function getEndDate(period: Period): Date { + if (period === 'year') return new Date(new Date().getFullYear(), 11, 31); + return addDays(new Date(), period); +} + +interface PhaseData { + phase: string + products: number + units: number + revenue: number + percentage: number +} + +interface DailyPhaseData { + date: string + preorder: number + launch: number + decay: number + mature: number + slow_mover: number + dormant: number +} interface ForecastData { forecastSales: number @@ -19,6 +52,8 @@ interface ForecastData { revenue: string confidence: number }[] + dailyForecastsByPhase?: DailyPhaseData[] + phaseBreakdown?: PhaseData[] categoryForecasts: { category: string units: number @@ -28,17 +63,14 @@ interface ForecastData { } export function ForecastMetrics() { - const [dateRange, setDateRange] = useState({ - from: new Date(), - to: addDays(new Date(), 30), - }); + const [period, setPeriod] = useState(30); const { data, error, isLoading } = useQuery({ - queryKey: ["forecast-metrics", dateRange], + queryKey: ["forecast-metrics", period], queryFn: async () => { const params = new URLSearchParams({ - startDate: dateRange.from?.toISOString() || "", - endDate: dateRange.to?.toISOString() || "", + startDate: new Date().toISOString(), + endDate: getEndDate(period).toISOString(), }); const response = await fetch(`${config.apiUrl}/dashboard/forecast/metrics?${params}`) if (!response.ok) { @@ -50,25 +82,35 @@ export function ForecastMetrics() { }, }) + const hasPhaseData = data?.dailyForecastsByPhase && data.dailyForecastsByPhase.length > 0 + return ( <> Forecast -
- { - if (range) setDateRange(range); - }} - future={true} - /> +
+ + + + + + + + + setPeriod(v === 'year' ? 'year' : Number(v) as Period)}> + + 30D + 90D + EOY + +
{error ? (
Error: {error.message}
- ) : isLoading ? ( -
Loading forecast metrics...
) : ( <>
@@ -77,52 +119,125 @@ export function ForecastMetrics() {

Forecast Sales

-

{data?.forecastSales.toLocaleString() || 0}

+ {isLoading || !data ? : ( +

{data.forecastSales.toLocaleString()}

+ )}

Forecast Revenue

-

{formatCurrency(Number(data?.forecastRevenue) || 0)}

+ {isLoading || !data ? : ( +

{formatCurrency(Number(data.forecastRevenue) || 0)}

+ )}
+ {isLoading ? ( +
+

Forecast Revenue By Lifecycle Phase

+
+
+ ) : data?.phaseBreakdown && data.phaseBreakdown.length > 0 && ( +
+

Forecast Revenue By Lifecycle Phase

+ +
+ {data.phaseBreakdown.map((p) => { + const cfg = PHASE_CONFIG[p.phase] || { label: p.phase, color: "#94A3B8" } + return ( + + +
0 ? 4 : 0, + }} + /> + + +
+
+ {cfg.label} + {p.percentage}% +
+
{formatCurrency(p.revenue)}
+
{p.products.toLocaleString()} products
+ + + ) + })} +
+ +
+ )} +
- - - - - [formatCurrency(Number(value)), "Revenue"]} - labelFormatter={(date) => format(new Date(date), 'MMM d, yyyy')} - /> - - - + {isLoading ? ( +
+
+
+ ) : ( + + + + + { + const cfg = PHASE_CONFIG[name] + return [formatCurrency(value), cfg?.label || name] + }} + labelFormatter={(date) => format(new Date(date + 'T00:00:00'), 'MMM d, yyyy')} + itemSorter={(item) => -(item.value as number || 0)} + /> + {hasPhaseData ? ( + PHASE_KEYS.map((phase) => { + const cfg = PHASE_CONFIG[phase] + return ( + + ) + }) + ) : ( + + )} + + + )}
)} ) -} \ No newline at end of file +} diff --git a/inventory/src/components/overview/OverstockMetrics.tsx b/inventory/src/components/overview/OverstockMetrics.tsx index ec2d21f..e537957 100644 --- a/inventory/src/components/overview/OverstockMetrics.tsx +++ b/inventory/src/components/overview/OverstockMetrics.tsx @@ -2,7 +2,18 @@ import { useQuery } from "@tanstack/react-query" import { CardHeader, CardTitle, CardContent } from "@/components/ui/card" import config from "@/config" import { formatCurrency } from "@/utils/formatCurrency" -import { Package, Layers, DollarSign, ShoppingCart } from "lucide-react" +import { AlertTriangle, Layers, DollarSign, Tag } from "lucide-react" +import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/components/ui/tooltip" +import { PHASE_CONFIG } from "@/utils/lifecyclePhases" + +interface PhaseBreakdown { + phase: string + products: number + units: number + cost: number + retail: number + percentage: number +} interface OverstockMetricsData { overstockedProducts: number @@ -16,6 +27,7 @@ interface OverstockMetricsData { cost: number retail: number }[] + phaseBreakdown?: PhaseBreakdown[] } function MetricSkeleton() { @@ -44,7 +56,7 @@ export function OverstockMetrics() {
- +

Overstocked Products

{isLoading || !data ? : ( @@ -71,13 +83,48 @@ export function OverstockMetrics() {
- +

Overstocked Retail

{isLoading || !data ? : (

{formatCurrency(data.total_excess_retail)}

)}
+ {data?.phaseBreakdown && data.phaseBreakdown.length > 0 && ( +
+

Overstocked Cost By Lifecycle Phase

+ +
+ {data.phaseBreakdown.map((p) => { + const cfg = PHASE_CONFIG[p.phase] || { label: p.phase, color: "#94A3B8" } + return ( + + +
0 ? 3 : 0, + }} + /> + + +
+
+ {cfg.label} + {p.percentage}% +
+
{formatCurrency(p.cost)}
+
{p.products} products · {p.units} units
+ + + ) + })} +
+ +
+ )}
)} diff --git a/inventory/src/components/overview/PurchaseMetrics.tsx b/inventory/src/components/overview/PurchaseMetrics.tsx index 400abfd..981e541 100644 --- a/inventory/src/components/overview/PurchaseMetrics.tsx +++ b/inventory/src/components/overview/PurchaseMetrics.tsx @@ -3,7 +3,7 @@ import { CardHeader, CardTitle, CardContent } from "@/components/ui/card" import { PieChart, Pie, ResponsiveContainer, Cell, Sector } from "recharts" import config from "@/config" import { formatCurrency } from "@/utils/formatCurrency" -import { ClipboardList, AlertCircle, Layers, DollarSign, ShoppingCart } from "lucide-react" +import { ClipboardList, AlertCircle, Truck, DollarSign, Tag } from "lucide-react" import { useState } from "react" interface PurchaseMetricsData { @@ -90,49 +90,49 @@ export function PurchaseMetrics() { {isError ? (

Failed to load purchase metrics

) : ( -
-
-
-
-
- -

Active Purchase Orders

+
+
+
+
+
+ +

Active Purchase Orders

{isLoading || !data ? : (

{data.activePurchaseOrders.toLocaleString()}

)}
-
-
- -

Overdue Purchase Orders

+
+
+ +

Overdue Purchase Orders

{isLoading || !data ? : (

{data.overduePurchaseOrders.toLocaleString()}

)}
-
-
- -

On Order Units

+
+
+ +

On Order Units

{isLoading || !data ? : (

{data.onOrderUnits.toLocaleString()}

)}
-
-
- -

On Order Cost

+
+
+ +

On Order Cost

{isLoading || !data ? : (

{formatCurrency(data.onOrderCost)}

)}
-
-
- -

On Order Retail

+
+
+ +

On Order Retail

{isLoading || !data ? : (

{formatCurrency(data.onOrderRetail)}

@@ -140,9 +140,9 @@ export function PurchaseMetrics() {
-
+
-
Purchase Orders By Vendor
+
PO Costs By Vendor
{isLoading || !data ? (
diff --git a/inventory/src/components/overview/ReplenishmentMetrics.tsx b/inventory/src/components/overview/ReplenishmentMetrics.tsx index 7b31e6f..3e94644 100644 --- a/inventory/src/components/overview/ReplenishmentMetrics.tsx +++ b/inventory/src/components/overview/ReplenishmentMetrics.tsx @@ -2,13 +2,24 @@ import { useQuery } from "@tanstack/react-query" import { CardHeader, CardTitle, CardContent } from "@/components/ui/card" import config from "@/config" import { formatCurrency } from "@/utils/formatCurrency" -import { Package, DollarSign, ShoppingCart } from "lucide-react" +import { PackagePlus, DollarSign, Tag } from "lucide-react" +import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/components/ui/tooltip" +import { PHASE_CONFIG } from "@/utils/lifecyclePhases" + +interface PhaseBreakdown { + phase: string + products: number + units: number + cost: number + percentage: number +} interface ReplenishmentMetricsData { productsToReplenish: number unitsToReplenish: number replenishmentCost: number replenishmentRetail: number + phaseBreakdown?: PhaseBreakdown[] topVariants: { id: number title: string @@ -47,7 +58,7 @@ export function ReplenishmentMetrics() {
- +

Units to Replenish

{isLoading || !data ? : ( @@ -65,13 +76,48 @@ export function ReplenishmentMetrics() {
- +

Replenishment Retail

{isLoading || !data ? : (

{formatCurrency(data.replenishmentRetail)}

)}
+ {data?.phaseBreakdown && data.phaseBreakdown.length > 0 && ( +
+

Replenishment Cost By Lifecycle Phase

+ +
+ {data.phaseBreakdown.map((p) => { + const cfg = PHASE_CONFIG[p.phase] || { label: p.phase, color: "#94A3B8" } + return ( + + +
0 ? 3 : 0, + }} + /> + + +
+
+ {cfg.label} + {p.percentage}% +
+
{formatCurrency(p.cost)}
+
{p.products} products · {p.units} units
+ + + ) + })} +
+ +
+ )}
)} diff --git a/inventory/src/components/overview/SalesMetrics.tsx b/inventory/src/components/overview/SalesMetrics.tsx index d85a2a6..9d6296e 100644 --- a/inventory/src/components/overview/SalesMetrics.tsx +++ b/inventory/src/components/overview/SalesMetrics.tsx @@ -1,13 +1,36 @@ import { useQuery } from "@tanstack/react-query" import { CardHeader, CardTitle, CardContent } from "@/components/ui/card" -import { AreaChart, Area, ResponsiveContainer, XAxis, YAxis, Tooltip } from "recharts" +import { AreaChart, Area, ResponsiveContainer, XAxis, YAxis, Tooltip as RechartsTooltip } from "recharts" import { useState } from "react" import config from "@/config" import { formatCurrency } from "@/utils/formatCurrency" import { ClipboardList, Package, DollarSign, ShoppingCart } from "lucide-react" -import { DateRange } from "react-day-picker" +import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/components/ui/tooltip" +import { Tabs, TabsList, TabsTrigger } from "@/components/ui/tabs" import { addDays, format } from "date-fns" -import { DateRangePicker } from "@/components/ui/date-range-picker-narrow" +import { PHASE_CONFIG, PHASE_KEYS_WITH_UNKNOWN as PHASE_KEYS } from "@/utils/lifecyclePhases" + +type Period = 7 | 30 | 90; + +interface PhaseBreakdown { + phase: string + orders: number + units: number + revenue: number + cogs: number + percentage: number +} + +interface DailyPhaseData { + date: string + preorder: number + launch: number + decay: number + mature: number + slow_mover: number + dormant: number + unknown: number +} interface SalesData { totalOrders: number @@ -20,6 +43,8 @@ interface SalesData { revenue: number cogs: number }[] + dailySalesByPhase?: DailyPhaseData[] + phaseBreakdown?: PhaseBreakdown[] } function MetricSkeleton() { @@ -27,17 +52,14 @@ function MetricSkeleton() { } export function SalesMetrics() { - const [dateRange, setDateRange] = useState({ - from: addDays(new Date(), -30), - to: new Date(), - }); + const [period, setPeriod] = useState(30); const { data, isError, isLoading } = useQuery({ - queryKey: ["sales-metrics", dateRange], + queryKey: ["sales-metrics", period], queryFn: async () => { const params = new URLSearchParams({ - startDate: dateRange.from?.toISOString() || "", - endDate: dateRange.to?.toISOString() || "", + startDate: addDays(new Date(), -period).toISOString(), + endDate: new Date().toISOString(), }); const response = await fetch(`${config.apiUrl}/dashboard/sales/metrics?${params}`) if (!response.ok) throw new Error("Failed to fetch sales metrics"); @@ -45,19 +67,19 @@ export function SalesMetrics() { }, }) + const hasPhaseData = data?.dailySalesByPhase && data.dailySalesByPhase.length > 0 + return ( <> Sales -
- { - if (range) setDateRange(range); - }} - future={false} - /> -
+ setPeriod(Number(v) as Period)}> + + 7D + 30D + 90D + +
{isError ? ( @@ -103,6 +125,42 @@ export function SalesMetrics() {
+ {data?.phaseBreakdown && data.phaseBreakdown.length > 0 && ( +
+

Revenue By Lifecycle Phase

+ +
+ {data.phaseBreakdown.map((p) => { + const cfg = PHASE_CONFIG[p.phase] || { label: p.phase, color: "#94A3B8" } + return ( + + +
0 ? 3 : 0, + }} + /> + + +
+
+ {cfg.label} + {p.percentage}% +
+
{formatCurrency(p.revenue)}
+
{p.units.toLocaleString()} units · {p.orders.toLocaleString()} orders
+ + + ) + })} +
+ +
+ )} +
{isLoading ? (
@@ -111,7 +169,7 @@ export function SalesMetrics() { ) : ( - [formatCurrency(Number(value)), "Revenue"]} + { + const cfg = PHASE_CONFIG[name] + return [formatCurrency(value), cfg?.label || name] + }} labelFormatter={(date) => format(new Date(date), 'MMM d, yyyy')} + itemSorter={(item) => -(item.value as number || 0)} /> - + {hasPhaseData ? ( + PHASE_KEYS.map((phase) => { + const cfg = PHASE_CONFIG[phase] + return ( + + ) + }) + ) : ( + + )} )} diff --git a/inventory/src/components/overview/StockMetrics.tsx b/inventory/src/components/overview/StockMetrics.tsx index 71c32ad..c09d0b9 100644 --- a/inventory/src/components/overview/StockMetrics.tsx +++ b/inventory/src/components/overview/StockMetrics.tsx @@ -3,8 +3,18 @@ import { CardHeader, CardTitle, CardContent } from "@/components/ui/card" import { PieChart, Pie, ResponsiveContainer, Cell, Sector } from "recharts" import config from "@/config" import { formatCurrency } from "@/utils/formatCurrency" -import { Package, Layers, DollarSign, ShoppingCart } from "lucide-react" +import { Package, PackageCheck, Layers, DollarSign, Tag } from "lucide-react" import { useState } from "react" +import { PHASE_CONFIG } from "@/utils/lifecyclePhases" + +interface PhaseStock { + phase: string + products: number + units: number + cost: number + retail: number + percentage: number +} interface StockMetricsData { totalProducts: number @@ -19,6 +29,7 @@ interface StockMetricsData { cost: number retail: number }[] + phaseStock?: PhaseStock[] } const COLORS = [ @@ -32,66 +43,54 @@ const COLORS = [ "#FF7C43", ] -const renderActiveShape = (props: any) => { - const { cx, cy, innerRadius, outerRadius, startAngle, endAngle, fill, brand, retail } = props; - - // Split brand name into words and create lines of max 12 chars - const words = brand.split(' '); +function wrapLabel(text: string, maxLen = 12): string[] { + const words = text.split(' '); const lines: string[] = []; - let currentLine = ''; - + let cur = ''; words.forEach((word: string) => { - if ((currentLine + ' ' + word).length <= 12) { - currentLine = currentLine ? `${currentLine} ${word}` : word; + if ((cur + ' ' + word).length <= maxLen) { + cur = cur ? `${cur} ${word}` : word; } else { - if (currentLine) lines.push(currentLine); - currentLine = word; + if (cur) lines.push(cur); + cur = word; } }); - if (currentLine) lines.push(currentLine); + if (cur) lines.push(cur); + return lines; +} + +const renderActiveShape = (props: any) => { + const { cx, cy, innerRadius, outerRadius, startAngle, endAngle, fill, brand, cost } = props; + const lines = wrapLabel(brand); return ( - - + + {lines.map((line, i) => ( - - {line} - + {line} ))} - - {formatCurrency(retail)} + + {formatCurrency(cost)} + + + ); +}; + +const renderPhaseActiveShape = (props: any) => { + const { cx, cy, innerRadius, outerRadius, startAngle, endAngle, fill, phase, cost } = props; + const cfg = PHASE_CONFIG[phase] || { label: phase }; + const lines = wrapLabel(cfg.label); + + return ( + + + + {lines.map((line, i) => ( + {line} + ))} + + {formatCurrency(cost)} ); @@ -103,6 +102,7 @@ function MetricSkeleton() { export function StockMetrics() { const [activeIndex, setActiveIndex] = useState(); + const [activePhaseIndex, setActivePhaseIndex] = useState(); const { data, isError, isLoading } = useQuery({ queryKey: ["stock-metrics"], @@ -122,49 +122,49 @@ export function StockMetrics() { {isError ? (

Failed to load stock metrics

) : ( -
-
-
-
-
- -

Products

+
+
+
+
+
+ +

Products

{isLoading || !data ? : (

{data.totalProducts.toLocaleString()}

)}
-
-
- -

Products In Stock

+
+
+ +

Products In Stock

{isLoading || !data ? : (

{data.productsInStock.toLocaleString()}

)}
-
-
- -

Stock Units

+
+
+ +

Stock Units

{isLoading || !data ? : (

{data.totalStockUnits.toLocaleString()}

)}
-
-
- -

Stock Cost

+
+
+ +

Stock Cost

{isLoading || !data ? : (

{formatCurrency(data.totalStockCost)}

)}
-
-
- -

Stock Retail

+
+
+ +

Stock Retail

{isLoading || !data ? : (

{formatCurrency(data.totalStockRetail)}

@@ -172,9 +172,9 @@ export function StockMetrics() {
-
-
-
Stock Retail By Brand
+
+
+
Stock Cost By Brand
{isLoading || !data ? (
@@ -185,7 +185,7 @@ export function StockMetrics() {
+
+
Stock Cost By Phase
+
+ {isLoading || !data?.phaseStock ? ( +
+
+
+ ) : ( + + + setActivePhaseIndex(index)} + onMouseLeave={() => setActivePhaseIndex(undefined)} + > + {data.phaseStock.map((entry) => { + const cfg = PHASE_CONFIG[entry.phase] || { color: "#94A3B8" } + return ( + + ) + })} + + + + )} +
+
)} diff --git a/inventory/src/components/overview/TopReplenishProducts.tsx b/inventory/src/components/overview/TopReplenishProducts.tsx index 7f3144f..585833f 100644 --- a/inventory/src/components/overview/TopReplenishProducts.tsx +++ b/inventory/src/components/overview/TopReplenishProducts.tsx @@ -46,7 +46,7 @@ export function TopReplenishProducts() { ) : isLoading ? ( ) : ( - +
diff --git a/inventory/src/components/products/ProductDetail.tsx b/inventory/src/components/products/ProductDetail.tsx index c0a405f..c9d0057 100644 --- a/inventory/src/components/products/ProductDetail.tsx +++ b/inventory/src/components/products/ProductDetail.tsx @@ -19,8 +19,9 @@ import { StatusBadge } from "@/components/products/StatusBadge"; import { transformMetricsRow } from "@/utils/transformUtils"; import { cn } from "@/lib/utils"; import config from "@/config"; -import { ResponsiveContainer, LineChart, Line, XAxis, YAxis, Tooltip, CartesianGrid, Legend } from "recharts"; +import { ResponsiveContainer, LineChart, Line, AreaChart, Area, XAxis, YAxis, Tooltip, CartesianGrid, Legend } from "recharts"; import { Badge } from "@/components/ui/badge"; +import { format } from "date-fns"; import { Table, TableHeader, TableRow, TableHead, TableBody, TableCell } from "@/components/ui/table"; // Interfaces for POs and time series data @@ -46,6 +47,26 @@ interface ProductTimeSeries { recentPurchases: ProductPurchaseOrder[]; } +interface ProductForecast { + phase: string | null; + method: string | null; + forecast: { + date: string; + units: number; + revenue: number; + confidenceLower: number; + confidenceUpper: number; + }[]; +} + +const PHASE_LABELS: Record = { + preorder: "Pre-order", + launch: "Launch", + decay: "Active Decay", + mature: "Evergreen", + dormant: "Dormant", +}; + interface ProductDetailProps { productId: number | null; onClose: () => void; @@ -109,6 +130,18 @@ export function ProductDetail({ productId, onClose }: ProductDetailProps) { enabled: !!productId, // Only run query when productId is truthy }); + // Fetch product forecast data + const { data: forecastData, isLoading: isLoadingForecast } = useQuery({ + queryKey: ["productForecast", productId], + queryFn: async () => { + if (!productId) throw new Error("Product ID is required"); + const response = await fetch(`${config.apiUrl}/products/${productId}/forecast`, {credentials: 'include'}); + if (!response.ok) throw new Error("Failed to fetch forecast"); + return response.json(); + }, + enabled: !!productId, + }); + // Get PO status display names (DB stores text statuses) const getPOStatusName = (status: string): string => { const statusMap: Record = { @@ -328,6 +361,72 @@ export function ProductDetail({ productId, onClose }: ProductDetailProps) { + {/* Forecast Chart */} + + + 90-Day Forecast + + {forecastData?.phase + ? `${PHASE_LABELS[forecastData.phase] || forecastData.phase} phase \u00b7 ${forecastData.method || 'unknown'} method` + : 'Lifecycle-aware demand forecast'} + + + + {isLoadingForecast ? ( +
+ +
+ ) : forecastData && forecastData.forecast.length > 0 ? ( + + + + format(new Date(d + 'T00:00:00'), 'MMM d')} + interval="preserveStartEnd" + tick={{ fontSize: 11 }} + /> + + + format(new Date(d + 'T00:00:00'), 'MMM d, yyyy')} + formatter={(value: number, name: string) => { + if (name === 'Revenue') return [formatCurrency(value), name]; + return [value.toFixed(1), name]; + }} + /> + + + + + + ) : ( +
+

No forecast data available for this product.

+
+ )} +
+
+ Sales Performance (30 Days) @@ -535,6 +634,8 @@ export function ProductDetail({ productId, onClose }: ProductDetailProps) { Forecasting + + diff --git a/inventory/src/pages/Overview.tsx b/inventory/src/pages/Overview.tsx index 2efe7da..c293119 100644 --- a/inventory/src/pages/Overview.tsx +++ b/inventory/src/pages/Overview.tsx @@ -18,11 +18,11 @@ export function Overview() { {/* First row - Stock and Purchase metrics */} -
- +
+ - +
diff --git a/inventory/src/utils/lifecyclePhases.ts b/inventory/src/utils/lifecyclePhases.ts new file mode 100644 index 0000000..3b495f3 --- /dev/null +++ b/inventory/src/utils/lifecyclePhases.ts @@ -0,0 +1,15 @@ +export const PHASE_CONFIG: Record = { + preorder: { label: "Pre-order", color: "#3B82F6" }, + launch: { label: "Launch", color: "#22C55E" }, + decay: { label: "Active", color: "#F59E0B" }, + mature: { label: "Evergreen", color: "#8B5CF6" }, + slow_mover: { label: "Slow Mover", color: "#14B8A6" }, + dormant: { label: "Dormant", color: "#6B7280" }, + unknown: { label: "Unclassified", color: "#94A3B8" }, +} + +/** Stacking order for phase area/bar charts (bottom to top) */ +export const PHASE_KEYS = ["mature", "slow_mover", "decay", "launch", "preorder", "dormant"] as const + +/** Same as PHASE_KEYS but includes the unknown bucket (for sales data where lifecycle_phase can be NULL) */ +export const PHASE_KEYS_WITH_UNKNOWN = ["mature", "slow_mover", "decay", "launch", "preorder", "dormant", "unknown"] as const diff --git a/inventory/tsconfig.tsbuildinfo b/inventory/tsconfig.tsbuildinfo index 78079bc..b07a951 100644 --- a/inventory/tsconfig.tsbuildinfo +++ b/inventory/tsconfig.tsbuildinfo @@ -1 +1 @@ -{"root":["./src/app.tsx","./src/config.ts","./src/main.tsx","./src/vite-env.d.ts","./src/components/config.ts","./src/components/analytics/agingsellthrough.tsx","./src/components/analytics/capitalefficiency.tsx","./src/components/analytics/discountimpact.tsx","./src/components/analytics/growthmomentum.tsx","./src/components/analytics/inventoryflow.tsx","./src/components/analytics/inventorytrends.tsx","./src/components/analytics/inventoryvaluetrend.tsx","./src/components/analytics/portfolioanalysis.tsx","./src/components/analytics/seasonalpatterns.tsx","./src/components/analytics/stockhealth.tsx","./src/components/analytics/stockoutrisk.tsx","./src/components/auth/firstaccessiblepage.tsx","./src/components/auth/protected.tsx","./src/components/auth/requireauth.tsx","./src/components/chat/chatroom.tsx","./src/components/chat/chattest.tsx","./src/components/chat/roomlist.tsx","./src/components/chat/searchresults.tsx","./src/components/dashboard/financialoverview.tsx","./src/components/dashboard/operationsmetrics.tsx","./src/components/dashboard/payrollmetrics.tsx","./src/components/dashboard/periodselectionpopover.tsx","./src/components/dashboard/shared/dashboardbadge.tsx","./src/components/dashboard/shared/dashboardcharttooltip.tsx","./src/components/dashboard/shared/dashboardsectionheader.tsx","./src/components/dashboard/shared/dashboardskeleton.tsx","./src/components/dashboard/shared/dashboardstatcard.tsx","./src/components/dashboard/shared/dashboardstatcardmini.tsx","./src/components/dashboard/shared/dashboardstates.tsx","./src/components/dashboard/shared/dashboardtable.tsx","./src/components/dashboard/shared/index.ts","./src/components/discount-simulator/configpanel.tsx","./src/components/discount-simulator/resultschart.tsx","./src/components/discount-simulator/resultstable.tsx","./src/components/discount-simulator/summarycard.tsx","./src/components/forecasting/daterangepickerquick.tsx","./src/components/forecasting/quickorderbuilder.tsx","./src/components/forecasting/columns.tsx","./src/components/layout/appsidebar.tsx","./src/components/layout/mainlayout.tsx","./src/components/layout/navuser.tsx","./src/components/newsletter/campaignhistorydialog.tsx","./src/components/newsletter/newsletterstats.tsx","./src/components/newsletter/recommendationtable.tsx","./src/components/overview/bestsellers.tsx","./src/components/overview/forecastmetrics.tsx","./src/components/overview/overstockmetrics.tsx","./src/components/overview/overview.tsx","./src/components/overview/purchasemetrics.tsx","./src/components/overview/replenishmentmetrics.tsx","./src/components/overview/salesmetrics.tsx","./src/components/overview/stockmetrics.tsx","./src/components/overview/topoverstockedproducts.tsx","./src/components/overview/topreplenishproducts.tsx","./src/components/overview/vendorperformance.tsx","./src/components/product-editor/comboboxfield.tsx","./src/components/product-editor/editablecomboboxfield.tsx","./src/components/product-editor/editableinput.tsx","./src/components/product-editor/editablemultiselect.tsx","./src/components/product-editor/imagemanager.tsx","./src/components/product-editor/producteditform.tsx","./src/components/product-editor/productsearch.tsx","./src/components/product-editor/types.ts","./src/components/product-import/createproductcategorydialog.tsx","./src/components/product-import/reactspreadsheetimport.tsx","./src/components/product-import/config.ts","./src/components/product-import/index.ts","./src/components/product-import/translationsrsiprops.ts","./src/components/product-import/types.ts","./src/components/product-import/components/closeconfirmationdialog.tsx","./src/components/product-import/components/modalwrapper.tsx","./src/components/product-import/components/providers.tsx","./src/components/product-import/components/savesessiondialog.tsx","./src/components/product-import/components/savedsessionslist.tsx","./src/components/product-import/components/table.tsx","./src/components/product-import/hooks/usersi.ts","./src/components/product-import/steps/steps.tsx","./src/components/product-import/steps/uploadflow.tsx","./src/components/product-import/steps/imageuploadstep/imageuploadstep.tsx","./src/components/product-import/steps/imageuploadstep/types.ts","./src/components/product-import/steps/imageuploadstep/components/droppablecontainer.tsx","./src/components/product-import/steps/imageuploadstep/components/genericdropzone.tsx","./src/components/product-import/steps/imageuploadstep/components/unassignedimagessection.tsx","./src/components/product-import/steps/imageuploadstep/components/productcard/copybutton.tsx","./src/components/product-import/steps/imageuploadstep/components/productcard/imagedropzone.tsx","./src/components/product-import/steps/imageuploadstep/components/productcard/productcard.tsx","./src/components/product-import/steps/imageuploadstep/components/productcard/sortableimage.tsx","./src/components/product-import/steps/imageuploadstep/components/unassignedimagessection/unassignedimageitem.tsx","./src/components/product-import/steps/imageuploadstep/hooks/usebulkimageupload.ts","./src/components/product-import/steps/imageuploadstep/hooks/usedraganddrop.ts","./src/components/product-import/steps/imageuploadstep/hooks/useproductimageoperations.ts","./src/components/product-import/steps/imageuploadstep/hooks/useproductimagesinit.ts","./src/components/product-import/steps/imageuploadstep/hooks/useurlimageupload.ts","./src/components/product-import/steps/matchcolumnsstep/matchcolumnsstep.tsx","./src/components/product-import/steps/matchcolumnsstep/types.ts","./src/components/product-import/steps/matchcolumnsstep/components/matchicon.tsx","./src/components/product-import/steps/matchcolumnsstep/components/templatecolumn.tsx","./src/components/product-import/steps/matchcolumnsstep/utils/findmatch.ts","./src/components/product-import/steps/matchcolumnsstep/utils/findunmatchedrequiredfields.ts","./src/components/product-import/steps/matchcolumnsstep/utils/getfieldoptions.ts","./src/components/product-import/steps/matchcolumnsstep/utils/getmatchedcolumns.ts","./src/components/product-import/steps/matchcolumnsstep/utils/normalizecheckboxvalue.ts","./src/components/product-import/steps/matchcolumnsstep/utils/normalizetabledata.ts","./src/components/product-import/steps/matchcolumnsstep/utils/setcolumn.ts","./src/components/product-import/steps/matchcolumnsstep/utils/setignorecolumn.ts","./src/components/product-import/steps/matchcolumnsstep/utils/setsubcolumn.ts","./src/components/product-import/steps/matchcolumnsstep/utils/uniqueentries.ts","./src/components/product-import/steps/selectheaderstep/selectheaderstep.tsx","./src/components/product-import/steps/selectheaderstep/components/selectheadertable.tsx","./src/components/product-import/steps/selectheaderstep/components/columns.tsx","./src/components/product-import/steps/selectsheetstep/selectsheetstep.tsx","./src/components/product-import/steps/uploadstep/uploadstep.tsx","./src/components/product-import/steps/uploadstep/components/dropzone.tsx","./src/components/product-import/steps/uploadstep/components/columns.tsx","./src/components/product-import/steps/uploadstep/utils/readfilesasync.ts","./src/components/product-import/steps/validationstep/index.tsx","./src/components/product-import/steps/validationstep/components/aisuggestionbadge.tsx","./src/components/product-import/steps/validationstep/components/copydownbanner.tsx","./src/components/product-import/steps/validationstep/components/floatingselectionbar.tsx","./src/components/product-import/steps/validationstep/components/initializingoverlay.tsx","./src/components/product-import/steps/validationstep/components/searchabletemplateselect.tsx","./src/components/product-import/steps/validationstep/components/suggestionbadges.tsx","./src/components/product-import/steps/validationstep/components/validationcontainer.tsx","./src/components/product-import/steps/validationstep/components/validationfooter.tsx","./src/components/product-import/steps/validationstep/components/validationtable.tsx","./src/components/product-import/steps/validationstep/components/validationtoolbar.tsx","./src/components/product-import/steps/validationstep/components/cells/checkboxcell.tsx","./src/components/product-import/steps/validationstep/components/cells/comboboxcell.tsx","./src/components/product-import/steps/validationstep/components/cells/inputcell.tsx","./src/components/product-import/steps/validationstep/components/cells/multiselectcell.tsx","./src/components/product-import/steps/validationstep/components/cells/multilineinput.tsx","./src/components/product-import/steps/validationstep/components/cells/selectcell.tsx","./src/components/product-import/steps/validationstep/contexts/aisuggestionscontext.tsx","./src/components/product-import/steps/validationstep/dialogs/aidebugdialog.tsx","./src/components/product-import/steps/validationstep/dialogs/aivalidationprogress.tsx","./src/components/product-import/steps/validationstep/dialogs/aivalidationresults.tsx","./src/components/product-import/steps/validationstep/dialogs/sanitycheckdialog.tsx","./src/components/product-import/steps/validationstep/hooks/useautoinlineaivalidation.ts","./src/components/product-import/steps/validationstep/hooks/usecopydownvalidation.ts","./src/components/product-import/steps/validationstep/hooks/usefieldoptions.ts","./src/components/product-import/steps/validationstep/hooks/useinlineaivalidation.ts","./src/components/product-import/steps/validationstep/hooks/useproductlines.ts","./src/components/product-import/steps/validationstep/hooks/usesanitycheck.ts","./src/components/product-import/steps/validationstep/hooks/usetemplatemanagement.ts","./src/components/product-import/steps/validationstep/hooks/useupcvalidation.ts","./src/components/product-import/steps/validationstep/hooks/usevalidationactions.ts","./src/components/product-import/steps/validationstep/hooks/useaivalidation/index.ts","./src/components/product-import/steps/validationstep/hooks/useaivalidation/useaiapi.ts","./src/components/product-import/steps/validationstep/hooks/useaivalidation/useaiprogress.ts","./src/components/product-import/steps/validationstep/hooks/useaivalidation/useaitransform.ts","./src/components/product-import/steps/validationstep/store/selectors.ts","./src/components/product-import/steps/validationstep/store/types.ts","./src/components/product-import/steps/validationstep/store/validationstore.ts","./src/components/product-import/steps/validationstep/utils/aivalidationutils.ts","./src/components/product-import/steps/validationstep/utils/countryutils.ts","./src/components/product-import/steps/validationstep/utils/datamutations.ts","./src/components/product-import/steps/validationstep/utils/inlineaipayload.ts","./src/components/product-import/steps/validationstep/utils/priceutils.ts","./src/components/product-import/steps/validationstep/utils/upcutils.ts","./src/components/product-import/steps/validationstepold/index.tsx","./src/components/product-import/steps/validationstepold/types.ts","./src/components/product-import/steps/validationstepold/components/aivalidationdialogs.tsx","./src/components/product-import/steps/validationstepold/components/basecellcontent.tsx","./src/components/product-import/steps/validationstepold/components/initializingvalidation.tsx","./src/components/product-import/steps/validationstepold/components/searchabletemplateselect.tsx","./src/components/product-import/steps/validationstepold/components/upcvalidationtableadapter.tsx","./src/components/product-import/steps/validationstepold/components/validationcell.tsx","./src/components/product-import/steps/validationstepold/components/validationcontainer.tsx","./src/components/product-import/steps/validationstepold/components/validationtable.tsx","./src/components/product-import/steps/validationstepold/components/cells/checkboxcell.tsx","./src/components/product-import/steps/validationstepold/components/cells/inputcell.tsx","./src/components/product-import/steps/validationstepold/components/cells/multiselectcell.tsx","./src/components/product-import/steps/validationstepold/components/cells/multilineinput.tsx","./src/components/product-import/steps/validationstepold/components/cells/selectcell.tsx","./src/components/product-import/steps/validationstepold/hooks/useaivalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/usefieldvalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/usefiltermanagement.tsx","./src/components/product-import/steps/validationstepold/hooks/useinitialvalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/useproductlinesfetching.tsx","./src/components/product-import/steps/validationstepold/hooks/userowoperations.tsx","./src/components/product-import/steps/validationstepold/hooks/usetemplatemanagement.tsx","./src/components/product-import/steps/validationstepold/hooks/useuniqueitemnumbersvalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/useuniquevalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/useupcvalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/usevalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/usevalidationstate.tsx","./src/components/product-import/steps/validationstepold/hooks/validationtypes.ts","./src/components/product-import/steps/validationstepold/types/index.ts","./src/components/product-import/steps/validationstepold/utils/aivalidationutils.ts","./src/components/product-import/steps/validationstepold/utils/countryutils.ts","./src/components/product-import/steps/validationstepold/utils/datamutations.ts","./src/components/product-import/steps/validationstepold/utils/priceutils.ts","./src/components/product-import/steps/validationstepold/utils/upcutils.ts","./src/components/product-import/utils/exceedsmaxrecords.ts","./src/components/product-import/utils/mapdata.ts","./src/components/product-import/utils/mapworkbook.ts","./src/components/product-import/utils/steps.ts","./src/components/products/productdetail.tsx","./src/components/products/productfilters.tsx","./src/components/products/productsummarycards.tsx","./src/components/products/producttable.tsx","./src/components/products/producttableskeleton.tsx","./src/components/products/productviews.tsx","./src/components/products/statusbadge.tsx","./src/components/products/columndefinitions.ts","./src/components/purchase-orders/categorymetricscard.tsx","./src/components/purchase-orders/filtercontrols.tsx","./src/components/purchase-orders/ordermetricscard.tsx","./src/components/purchase-orders/paginationcontrols.tsx","./src/components/purchase-orders/pipelinecard.tsx","./src/components/purchase-orders/purchaseorderaccordion.tsx","./src/components/purchase-orders/purchaseorderstable.tsx","./src/components/purchase-orders/vendormetricscard.tsx","./src/components/settings/datamanagement.tsx","./src/components/settings/globalsettings.tsx","./src/components/settings/permissionselector.tsx","./src/components/settings/productsettings.tsx","./src/components/settings/promptmanagement.tsx","./src/components/settings/reusableimagemanagement.tsx","./src/components/settings/templatemanagement.tsx","./src/components/settings/userform.tsx","./src/components/settings/userlist.tsx","./src/components/settings/usermanagement.tsx","./src/components/settings/vendorsettings.tsx","./src/components/templates/searchproducttemplatedialog.tsx","./src/components/templates/templateform.tsx","./src/components/ui/accordion.tsx","./src/components/ui/alert-dialog.tsx","./src/components/ui/alert.tsx","./src/components/ui/avatar.tsx","./src/components/ui/badge.tsx","./src/components/ui/button.tsx","./src/components/ui/calendar.tsx","./src/components/ui/card.tsx","./src/components/ui/carousel.tsx","./src/components/ui/checkbox.tsx","./src/components/ui/code.tsx","./src/components/ui/collapsible.tsx","./src/components/ui/command.tsx","./src/components/ui/date-range-picker-narrow.tsx","./src/components/ui/date-range-picker.tsx","./src/components/ui/dialog.tsx","./src/components/ui/drawer.tsx","./src/components/ui/dropdown-menu.tsx","./src/components/ui/form.tsx","./src/components/ui/input.tsx","./src/components/ui/label.tsx","./src/components/ui/page-loading.tsx","./src/components/ui/pagination.tsx","./src/components/ui/popover.tsx","./src/components/ui/progress.tsx","./src/components/ui/radio-group.tsx","./src/components/ui/scroll-area.tsx","./src/components/ui/select.tsx","./src/components/ui/separator.tsx","./src/components/ui/sheet.tsx","./src/components/ui/sidebar.tsx","./src/components/ui/skeleton.tsx","./src/components/ui/sonner.tsx","./src/components/ui/switch.tsx","./src/components/ui/table.tsx","./src/components/ui/tabs.tsx","./src/components/ui/textarea.tsx","./src/components/ui/toast.tsx","./src/components/ui/toaster.tsx","./src/components/ui/toggle-group.tsx","./src/components/ui/toggle.tsx","./src/components/ui/tooltip.tsx","./src/config/dashboard.ts","./src/contexts/authcontext.tsx","./src/contexts/dashboardscrollcontext.tsx","./src/contexts/importsessioncontext.tsx","./src/hooks/use-mobile.tsx","./src/hooks/use-toast.ts","./src/hooks/usedebounce.ts","./src/hooks/useimportautosave.ts","./src/lib/utils.ts","./src/lib/dashboard/chartconfig.ts","./src/lib/dashboard/designtokens.ts","./src/pages/analytics.tsx","./src/pages/blackfridaydashboard.tsx","./src/pages/brands.tsx","./src/pages/categories.tsx","./src/pages/chat.tsx","./src/pages/dashboard.tsx","./src/pages/discountsimulator.tsx","./src/pages/forecasting.tsx","./src/pages/htslookup.tsx","./src/pages/import.tsx","./src/pages/login.tsx","./src/pages/newsletter.tsx","./src/pages/overview.tsx","./src/pages/producteditor.tsx","./src/pages/products.tsx","./src/pages/purchaseorders.tsx","./src/pages/settings.tsx","./src/pages/smalldashboard.tsx","./src/pages/vendors.tsx","./src/services/apiv2.ts","./src/services/importsessionapi.ts","./src/services/producteditor.ts","./src/types/dashboard-shims.d.ts","./src/types/dashboard.d.ts","./src/types/discount-simulator.ts","./src/types/globals.d.ts","./src/types/importsession.ts","./src/types/products.ts","./src/types/react-data-grid.d.ts","./src/types/status-codes.ts","./src/utils/emojiutils.ts","./src/utils/formatcurrency.ts","./src/utils/naturallanguageperiod.ts","./src/utils/productutils.ts","./src/utils/transformutils.ts"],"version":"5.6.3"} \ No newline at end of file +{"root":["./src/app.tsx","./src/config.ts","./src/main.tsx","./src/vite-env.d.ts","./src/components/config.ts","./src/components/analytics/agingsellthrough.tsx","./src/components/analytics/capitalefficiency.tsx","./src/components/analytics/discountimpact.tsx","./src/components/analytics/growthmomentum.tsx","./src/components/analytics/inventoryflow.tsx","./src/components/analytics/inventorytrends.tsx","./src/components/analytics/inventoryvaluetrend.tsx","./src/components/analytics/portfolioanalysis.tsx","./src/components/analytics/seasonalpatterns.tsx","./src/components/analytics/stockhealth.tsx","./src/components/analytics/stockoutrisk.tsx","./src/components/auth/firstaccessiblepage.tsx","./src/components/auth/protected.tsx","./src/components/auth/requireauth.tsx","./src/components/chat/chatroom.tsx","./src/components/chat/chattest.tsx","./src/components/chat/roomlist.tsx","./src/components/chat/searchresults.tsx","./src/components/dashboard/financialoverview.tsx","./src/components/dashboard/operationsmetrics.tsx","./src/components/dashboard/payrollmetrics.tsx","./src/components/dashboard/periodselectionpopover.tsx","./src/components/dashboard/shared/dashboardbadge.tsx","./src/components/dashboard/shared/dashboardcharttooltip.tsx","./src/components/dashboard/shared/dashboardsectionheader.tsx","./src/components/dashboard/shared/dashboardskeleton.tsx","./src/components/dashboard/shared/dashboardstatcard.tsx","./src/components/dashboard/shared/dashboardstatcardmini.tsx","./src/components/dashboard/shared/dashboardstates.tsx","./src/components/dashboard/shared/dashboardtable.tsx","./src/components/dashboard/shared/index.ts","./src/components/discount-simulator/configpanel.tsx","./src/components/discount-simulator/resultschart.tsx","./src/components/discount-simulator/resultstable.tsx","./src/components/discount-simulator/summarycard.tsx","./src/components/forecasting/daterangepickerquick.tsx","./src/components/forecasting/quickorderbuilder.tsx","./src/components/forecasting/columns.tsx","./src/components/layout/appsidebar.tsx","./src/components/layout/mainlayout.tsx","./src/components/layout/navuser.tsx","./src/components/newsletter/campaignhistorydialog.tsx","./src/components/newsletter/newsletterstats.tsx","./src/components/newsletter/recommendationtable.tsx","./src/components/overview/bestsellers.tsx","./src/components/overview/forecastaccuracy.tsx","./src/components/overview/forecastmetrics.tsx","./src/components/overview/overstockmetrics.tsx","./src/components/overview/purchasemetrics.tsx","./src/components/overview/replenishmentmetrics.tsx","./src/components/overview/salesmetrics.tsx","./src/components/overview/stockmetrics.tsx","./src/components/overview/topoverstockedproducts.tsx","./src/components/overview/topreplenishproducts.tsx","./src/components/product-editor/comboboxfield.tsx","./src/components/product-editor/editablecomboboxfield.tsx","./src/components/product-editor/editableinput.tsx","./src/components/product-editor/editablemultiselect.tsx","./src/components/product-editor/imagemanager.tsx","./src/components/product-editor/producteditform.tsx","./src/components/product-editor/productsearch.tsx","./src/components/product-editor/types.ts","./src/components/product-import/createproductcategorydialog.tsx","./src/components/product-import/reactspreadsheetimport.tsx","./src/components/product-import/config.ts","./src/components/product-import/index.ts","./src/components/product-import/translationsrsiprops.ts","./src/components/product-import/types.ts","./src/components/product-import/components/closeconfirmationdialog.tsx","./src/components/product-import/components/modalwrapper.tsx","./src/components/product-import/components/providers.tsx","./src/components/product-import/components/savesessiondialog.tsx","./src/components/product-import/components/savedsessionslist.tsx","./src/components/product-import/components/table.tsx","./src/components/product-import/hooks/usersi.ts","./src/components/product-import/steps/steps.tsx","./src/components/product-import/steps/uploadflow.tsx","./src/components/product-import/steps/imageuploadstep/imageuploadstep.tsx","./src/components/product-import/steps/imageuploadstep/types.ts","./src/components/product-import/steps/imageuploadstep/components/droppablecontainer.tsx","./src/components/product-import/steps/imageuploadstep/components/genericdropzone.tsx","./src/components/product-import/steps/imageuploadstep/components/unassignedimagessection.tsx","./src/components/product-import/steps/imageuploadstep/components/productcard/copybutton.tsx","./src/components/product-import/steps/imageuploadstep/components/productcard/imagedropzone.tsx","./src/components/product-import/steps/imageuploadstep/components/productcard/productcard.tsx","./src/components/product-import/steps/imageuploadstep/components/productcard/sortableimage.tsx","./src/components/product-import/steps/imageuploadstep/components/unassignedimagessection/unassignedimageitem.tsx","./src/components/product-import/steps/imageuploadstep/hooks/usebulkimageupload.ts","./src/components/product-import/steps/imageuploadstep/hooks/usedraganddrop.ts","./src/components/product-import/steps/imageuploadstep/hooks/useproductimageoperations.ts","./src/components/product-import/steps/imageuploadstep/hooks/useproductimagesinit.ts","./src/components/product-import/steps/imageuploadstep/hooks/useurlimageupload.ts","./src/components/product-import/steps/matchcolumnsstep/matchcolumnsstep.tsx","./src/components/product-import/steps/matchcolumnsstep/types.ts","./src/components/product-import/steps/matchcolumnsstep/components/matchicon.tsx","./src/components/product-import/steps/matchcolumnsstep/components/templatecolumn.tsx","./src/components/product-import/steps/matchcolumnsstep/utils/findmatch.ts","./src/components/product-import/steps/matchcolumnsstep/utils/findunmatchedrequiredfields.ts","./src/components/product-import/steps/matchcolumnsstep/utils/getfieldoptions.ts","./src/components/product-import/steps/matchcolumnsstep/utils/getmatchedcolumns.ts","./src/components/product-import/steps/matchcolumnsstep/utils/normalizecheckboxvalue.ts","./src/components/product-import/steps/matchcolumnsstep/utils/normalizetabledata.ts","./src/components/product-import/steps/matchcolumnsstep/utils/setcolumn.ts","./src/components/product-import/steps/matchcolumnsstep/utils/setignorecolumn.ts","./src/components/product-import/steps/matchcolumnsstep/utils/setsubcolumn.ts","./src/components/product-import/steps/matchcolumnsstep/utils/uniqueentries.ts","./src/components/product-import/steps/selectheaderstep/selectheaderstep.tsx","./src/components/product-import/steps/selectheaderstep/components/selectheadertable.tsx","./src/components/product-import/steps/selectheaderstep/components/columns.tsx","./src/components/product-import/steps/selectsheetstep/selectsheetstep.tsx","./src/components/product-import/steps/uploadstep/uploadstep.tsx","./src/components/product-import/steps/uploadstep/components/dropzone.tsx","./src/components/product-import/steps/uploadstep/components/columns.tsx","./src/components/product-import/steps/uploadstep/utils/readfilesasync.ts","./src/components/product-import/steps/validationstep/index.tsx","./src/components/product-import/steps/validationstep/components/aisuggestionbadge.tsx","./src/components/product-import/steps/validationstep/components/copydownbanner.tsx","./src/components/product-import/steps/validationstep/components/floatingselectionbar.tsx","./src/components/product-import/steps/validationstep/components/initializingoverlay.tsx","./src/components/product-import/steps/validationstep/components/searchabletemplateselect.tsx","./src/components/product-import/steps/validationstep/components/suggestionbadges.tsx","./src/components/product-import/steps/validationstep/components/validationcontainer.tsx","./src/components/product-import/steps/validationstep/components/validationfooter.tsx","./src/components/product-import/steps/validationstep/components/validationtable.tsx","./src/components/product-import/steps/validationstep/components/validationtoolbar.tsx","./src/components/product-import/steps/validationstep/components/cells/checkboxcell.tsx","./src/components/product-import/steps/validationstep/components/cells/comboboxcell.tsx","./src/components/product-import/steps/validationstep/components/cells/inputcell.tsx","./src/components/product-import/steps/validationstep/components/cells/multiselectcell.tsx","./src/components/product-import/steps/validationstep/components/cells/multilineinput.tsx","./src/components/product-import/steps/validationstep/components/cells/selectcell.tsx","./src/components/product-import/steps/validationstep/contexts/aisuggestionscontext.tsx","./src/components/product-import/steps/validationstep/dialogs/aidebugdialog.tsx","./src/components/product-import/steps/validationstep/dialogs/aivalidationprogress.tsx","./src/components/product-import/steps/validationstep/dialogs/aivalidationresults.tsx","./src/components/product-import/steps/validationstep/dialogs/sanitycheckdialog.tsx","./src/components/product-import/steps/validationstep/hooks/useautoinlineaivalidation.ts","./src/components/product-import/steps/validationstep/hooks/usecopydownvalidation.ts","./src/components/product-import/steps/validationstep/hooks/usefieldoptions.ts","./src/components/product-import/steps/validationstep/hooks/useinlineaivalidation.ts","./src/components/product-import/steps/validationstep/hooks/useproductlines.ts","./src/components/product-import/steps/validationstep/hooks/usesanitycheck.ts","./src/components/product-import/steps/validationstep/hooks/usetemplatemanagement.ts","./src/components/product-import/steps/validationstep/hooks/useupcvalidation.ts","./src/components/product-import/steps/validationstep/hooks/usevalidationactions.ts","./src/components/product-import/steps/validationstep/hooks/useaivalidation/index.ts","./src/components/product-import/steps/validationstep/hooks/useaivalidation/useaiapi.ts","./src/components/product-import/steps/validationstep/hooks/useaivalidation/useaiprogress.ts","./src/components/product-import/steps/validationstep/hooks/useaivalidation/useaitransform.ts","./src/components/product-import/steps/validationstep/store/selectors.ts","./src/components/product-import/steps/validationstep/store/types.ts","./src/components/product-import/steps/validationstep/store/validationstore.ts","./src/components/product-import/steps/validationstep/utils/aivalidationutils.ts","./src/components/product-import/steps/validationstep/utils/countryutils.ts","./src/components/product-import/steps/validationstep/utils/datamutations.ts","./src/components/product-import/steps/validationstep/utils/inlineaipayload.ts","./src/components/product-import/steps/validationstep/utils/priceutils.ts","./src/components/product-import/steps/validationstep/utils/upcutils.ts","./src/components/product-import/steps/validationstepold/index.tsx","./src/components/product-import/steps/validationstepold/types.ts","./src/components/product-import/steps/validationstepold/components/aivalidationdialogs.tsx","./src/components/product-import/steps/validationstepold/components/basecellcontent.tsx","./src/components/product-import/steps/validationstepold/components/initializingvalidation.tsx","./src/components/product-import/steps/validationstepold/components/searchabletemplateselect.tsx","./src/components/product-import/steps/validationstepold/components/upcvalidationtableadapter.tsx","./src/components/product-import/steps/validationstepold/components/validationcell.tsx","./src/components/product-import/steps/validationstepold/components/validationcontainer.tsx","./src/components/product-import/steps/validationstepold/components/validationtable.tsx","./src/components/product-import/steps/validationstepold/components/cells/checkboxcell.tsx","./src/components/product-import/steps/validationstepold/components/cells/inputcell.tsx","./src/components/product-import/steps/validationstepold/components/cells/multiselectcell.tsx","./src/components/product-import/steps/validationstepold/components/cells/multilineinput.tsx","./src/components/product-import/steps/validationstepold/components/cells/selectcell.tsx","./src/components/product-import/steps/validationstepold/hooks/useaivalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/usefieldvalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/usefiltermanagement.tsx","./src/components/product-import/steps/validationstepold/hooks/useinitialvalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/useproductlinesfetching.tsx","./src/components/product-import/steps/validationstepold/hooks/userowoperations.tsx","./src/components/product-import/steps/validationstepold/hooks/usetemplatemanagement.tsx","./src/components/product-import/steps/validationstepold/hooks/useuniqueitemnumbersvalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/useuniquevalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/useupcvalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/usevalidation.tsx","./src/components/product-import/steps/validationstepold/hooks/usevalidationstate.tsx","./src/components/product-import/steps/validationstepold/hooks/validationtypes.ts","./src/components/product-import/steps/validationstepold/types/index.ts","./src/components/product-import/steps/validationstepold/utils/aivalidationutils.ts","./src/components/product-import/steps/validationstepold/utils/countryutils.ts","./src/components/product-import/steps/validationstepold/utils/datamutations.ts","./src/components/product-import/steps/validationstepold/utils/priceutils.ts","./src/components/product-import/steps/validationstepold/utils/upcutils.ts","./src/components/product-import/utils/exceedsmaxrecords.ts","./src/components/product-import/utils/mapdata.ts","./src/components/product-import/utils/mapworkbook.ts","./src/components/product-import/utils/steps.ts","./src/components/products/productdetail.tsx","./src/components/products/productfilters.tsx","./src/components/products/productsummarycards.tsx","./src/components/products/producttable.tsx","./src/components/products/producttableskeleton.tsx","./src/components/products/productviews.tsx","./src/components/products/statusbadge.tsx","./src/components/products/columndefinitions.ts","./src/components/purchase-orders/categorymetricscard.tsx","./src/components/purchase-orders/filtercontrols.tsx","./src/components/purchase-orders/ordermetricscard.tsx","./src/components/purchase-orders/paginationcontrols.tsx","./src/components/purchase-orders/pipelinecard.tsx","./src/components/purchase-orders/purchaseorderaccordion.tsx","./src/components/purchase-orders/purchaseorderstable.tsx","./src/components/purchase-orders/vendormetricscard.tsx","./src/components/settings/datamanagement.tsx","./src/components/settings/globalsettings.tsx","./src/components/settings/permissionselector.tsx","./src/components/settings/productsettings.tsx","./src/components/settings/promptmanagement.tsx","./src/components/settings/reusableimagemanagement.tsx","./src/components/settings/templatemanagement.tsx","./src/components/settings/userform.tsx","./src/components/settings/userlist.tsx","./src/components/settings/usermanagement.tsx","./src/components/settings/vendorsettings.tsx","./src/components/templates/searchproducttemplatedialog.tsx","./src/components/templates/templateform.tsx","./src/components/ui/accordion.tsx","./src/components/ui/alert-dialog.tsx","./src/components/ui/alert.tsx","./src/components/ui/avatar.tsx","./src/components/ui/badge.tsx","./src/components/ui/button.tsx","./src/components/ui/calendar.tsx","./src/components/ui/card.tsx","./src/components/ui/carousel.tsx","./src/components/ui/checkbox.tsx","./src/components/ui/code.tsx","./src/components/ui/collapsible.tsx","./src/components/ui/command.tsx","./src/components/ui/date-range-picker-narrow.tsx","./src/components/ui/date-range-picker.tsx","./src/components/ui/dialog.tsx","./src/components/ui/drawer.tsx","./src/components/ui/dropdown-menu.tsx","./src/components/ui/form.tsx","./src/components/ui/input.tsx","./src/components/ui/label.tsx","./src/components/ui/page-loading.tsx","./src/components/ui/pagination.tsx","./src/components/ui/popover.tsx","./src/components/ui/progress.tsx","./src/components/ui/radio-group.tsx","./src/components/ui/scroll-area.tsx","./src/components/ui/select.tsx","./src/components/ui/separator.tsx","./src/components/ui/sheet.tsx","./src/components/ui/sidebar.tsx","./src/components/ui/skeleton.tsx","./src/components/ui/sonner.tsx","./src/components/ui/switch.tsx","./src/components/ui/table.tsx","./src/components/ui/tabs.tsx","./src/components/ui/textarea.tsx","./src/components/ui/toast.tsx","./src/components/ui/toaster.tsx","./src/components/ui/toggle-group.tsx","./src/components/ui/toggle.tsx","./src/components/ui/tooltip.tsx","./src/config/dashboard.ts","./src/contexts/authcontext.tsx","./src/contexts/dashboardscrollcontext.tsx","./src/contexts/importsessioncontext.tsx","./src/hooks/use-mobile.tsx","./src/hooks/use-toast.ts","./src/hooks/usedebounce.ts","./src/hooks/useimportautosave.ts","./src/lib/utils.ts","./src/lib/dashboard/chartconfig.ts","./src/lib/dashboard/designtokens.ts","./src/pages/analytics.tsx","./src/pages/blackfridaydashboard.tsx","./src/pages/brands.tsx","./src/pages/categories.tsx","./src/pages/chat.tsx","./src/pages/dashboard.tsx","./src/pages/discountsimulator.tsx","./src/pages/forecasting.tsx","./src/pages/htslookup.tsx","./src/pages/import.tsx","./src/pages/login.tsx","./src/pages/newsletter.tsx","./src/pages/overview.tsx","./src/pages/producteditor.tsx","./src/pages/products.tsx","./src/pages/purchaseorders.tsx","./src/pages/settings.tsx","./src/pages/smalldashboard.tsx","./src/pages/vendors.tsx","./src/services/apiv2.ts","./src/services/importsessionapi.ts","./src/services/producteditor.ts","./src/types/dashboard-shims.d.ts","./src/types/dashboard.d.ts","./src/types/discount-simulator.ts","./src/types/globals.d.ts","./src/types/importsession.ts","./src/types/products.ts","./src/types/react-data-grid.d.ts","./src/types/status-codes.ts","./src/utils/emojiutils.ts","./src/utils/formatcurrency.ts","./src/utils/lifecyclephases.ts","./src/utils/naturallanguageperiod.ts","./src/utils/productutils.ts","./src/utils/transformutils.ts"],"version":"5.6.3"} \ No newline at end of file