diff --git a/inventory-server/db/config-schema-new.sql b/inventory-server/db/config-schema-new.sql new file mode 100644 index 0000000..0661da2 --- /dev/null +++ b/inventory-server/db/config-schema-new.sql @@ -0,0 +1,113 @@ +-- Drop tables in reverse order of dependency +DROP TABLE IF EXISTS public.settings_product CASCADE; +DROP TABLE IF EXISTS public.settings_vendor CASCADE; +DROP TABLE IF EXISTS public.settings_global CASCADE; + +-- Table Definition: settings_global +CREATE TABLE public.settings_global ( + setting_key VARCHAR PRIMARY KEY, + setting_value VARCHAR NOT NULL, + description TEXT, + updated_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP +); + +-- Table Definition: settings_vendor +CREATE TABLE public.settings_vendor ( + vendor VARCHAR PRIMARY KEY, -- Matches products.vendor + default_lead_time_days INT, + default_days_of_stock INT, + updated_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP +); +-- Index for faster lookups if needed (PK usually sufficient) +-- CREATE INDEX idx_settings_vendor_vendor ON public.settings_vendor(vendor); + +-- Table Definition: settings_product +CREATE TABLE public.settings_product ( + pid INT8 PRIMARY KEY, + lead_time_days INT, -- Overrides vendor/global + days_of_stock INT, -- Overrides vendor/global + safety_stock INT DEFAULT 0, -- Minimum desired stock level + forecast_method VARCHAR DEFAULT 'standard', -- e.g., 'standard', 'seasonal' + exclude_from_forecast BOOLEAN DEFAULT FALSE, + updated_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT fk_settings_product_pid FOREIGN KEY (pid) REFERENCES public.products(pid) ON DELETE CASCADE ON UPDATE CASCADE +); + + +-- Description: Inserts or updates standard default global settings. +-- Safe to rerun; will update existing keys with these default values. +-- Dependencies: `settings_global` table must exist. +-- Frequency: Run once initially, or rerun if you want to reset global defaults. + +INSERT INTO public.settings_global (setting_key, setting_value, description) VALUES + ('abc_revenue_threshold_a', '0.80', 'Revenue percentage for Class A (cumulative)'), + ('abc_revenue_threshold_b', '0.95', 'Revenue percentage for Class B (cumulative)'), + ('abc_calculation_basis', 'revenue_30d', 'Metric for ABC calc (revenue_30d, sales_30d, lifetime_revenue)'), + ('abc_calculation_period', '30', 'Days period for ABC calculation if not lifetime'), + ('default_forecast_method', 'standard', 'Default forecast method (standard, seasonal)'), + ('default_lead_time_days', '14', 'Global default lead time in days'), + ('default_days_of_stock', '30', 'Global default days of stock coverage target'), + -- Set default safety stock to 0 units. Can be overridden per product. + -- If you wanted safety stock in days, you'd store 'days' here and calculate units later. + ('default_safety_stock_units', '0', 'Global default safety stock in units') +ON CONFLICT (setting_key) DO UPDATE SET + setting_value = EXCLUDED.setting_value, + description = EXCLUDED.description, + updated_at = CURRENT_TIMESTAMP; -- Update timestamp if default value changes + + + +-- Description: Creates placeholder rows in `settings_vendor` for each unique vendor +-- found in the `products` table. Does NOT set specific overrides. +-- Safe to rerun; will NOT overwrite existing vendor settings. +-- Dependencies: `settings_vendor` table must exist, `products` table populated. +-- Frequency: Run once after initial product load, or periodically if new vendors are added. + +INSERT INTO public.settings_vendor ( + vendor, + default_lead_time_days, + default_days_of_stock + -- updated_at will use its default CURRENT_TIMESTAMP on insert +) +SELECT + DISTINCT p.vendor, + -- Explicitly cast NULL to INTEGER to resolve type mismatch + CAST(NULL AS INTEGER), + CAST(NULL AS INTEGER) +FROM + public.products p +WHERE + p.vendor IS NOT NULL + AND p.vendor <> '' -- Exclude blank vendors if necessary + +ON CONFLICT (vendor) DO NOTHING; -- IMPORTANT: Do not overwrite existing vendor settings + +SELECT COUNT(*) FROM public.settings_vendor; -- Verify rows were inserted + + +-- Description: Creates placeholder rows in `settings_product` for each unique product +-- found in the `products` table. Sets basic defaults but no specific overrides. +-- Safe to rerun; will NOT overwrite existing product settings. +-- Dependencies: `settings_product` table must exist, `products` table populated. +-- Frequency: Run once after initial product load, or periodically if new products are added. + +INSERT INTO public.settings_product ( + pid, + lead_time_days, -- NULL = Inherit from Vendor/Global + days_of_stock, -- NULL = Inherit from Vendor/Global + safety_stock, -- Default to 0 units initially + forecast_method, -- NULL = Inherit from Global ('standard') + exclude_from_forecast -- Default to FALSE + -- updated_at will use its default CURRENT_TIMESTAMP on insert +) +SELECT + p.pid, + CAST(NULL AS INTEGER), -- Explicitly cast NULL to INTEGER + CAST(NULL AS INTEGER), -- Explicitly cast NULL to INTEGER + COALESCE((SELECT setting_value::int FROM settings_global WHERE setting_key = 'default_safety_stock_units'), 0), -- Use global default safety stock units + CAST(NULL AS VARCHAR), -- Cast NULL to VARCHAR for forecast_method (already varchar, but explicit) + FALSE -- Default: Include in forecast +FROM + public.products p + +ON CONFLICT (pid) DO NOTHING; -- IMPORTANT: Do not overwrite existing product-specific settings \ No newline at end of file diff --git a/inventory-server/db/config-schema.sql b/inventory-server/db/config-schema.sql index b8da2ae..7c14a62 100644 --- a/inventory-server/db/config-schema.sql +++ b/inventory-server/db/config-schema.sql @@ -1,261 +1,261 @@ --- Configuration tables schema +-- -- Configuration tables schema --- Create function for updating timestamps if it doesn't exist -CREATE OR REPLACE FUNCTION update_updated_column() -RETURNS TRIGGER AS $$ -BEGIN - NEW.updated = CURRENT_TIMESTAMP; - RETURN NEW; -END; -$$ language 'plpgsql'; +-- -- Create function for updating timestamps if it doesn't exist +-- CREATE OR REPLACE FUNCTION update_updated_column() +-- RETURNS TRIGGER AS $$ +-- BEGIN +-- NEW.updated = CURRENT_TIMESTAMP; +-- RETURN NEW; +-- END; +-- $$ language 'plpgsql'; --- Create function for updating updated_at timestamps -CREATE OR REPLACE FUNCTION update_updated_at_column() -RETURNS TRIGGER AS $$ -BEGIN - NEW.updated_at = CURRENT_TIMESTAMP; - RETURN NEW; -END; -$$ language 'plpgsql'; +-- -- Create function for updating updated_at timestamps +-- CREATE OR REPLACE FUNCTION update_updated_at_column() +-- RETURNS TRIGGER AS $$ +-- BEGIN +-- NEW.updated_at = CURRENT_TIMESTAMP; +-- RETURN NEW; +-- END; +-- $$ language 'plpgsql'; --- Stock threshold configurations -CREATE TABLE stock_thresholds ( - id INTEGER NOT NULL, - category_id BIGINT, -- NULL means default/global threshold - vendor VARCHAR(100), -- NULL means applies to all vendors - critical_days INTEGER NOT NULL DEFAULT 7, - reorder_days INTEGER NOT NULL DEFAULT 14, - overstock_days INTEGER NOT NULL DEFAULT 90, - low_stock_threshold INTEGER NOT NULL DEFAULT 5, - min_reorder_quantity INTEGER NOT NULL DEFAULT 1, - created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, - updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (id), - FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE, - UNIQUE (category_id, vendor) -); +-- -- Stock threshold configurations +-- CREATE TABLE stock_thresholds ( +-- id INTEGER NOT NULL, +-- category_id BIGINT, -- NULL means default/global threshold +-- vendor VARCHAR(100), -- NULL means applies to all vendors +-- critical_days INTEGER NOT NULL DEFAULT 7, +-- reorder_days INTEGER NOT NULL DEFAULT 14, +-- overstock_days INTEGER NOT NULL DEFAULT 90, +-- low_stock_threshold INTEGER NOT NULL DEFAULT 5, +-- min_reorder_quantity INTEGER NOT NULL DEFAULT 1, +-- created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, +-- updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, +-- PRIMARY KEY (id), +-- FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE, +-- UNIQUE (category_id, vendor) +-- ); -CREATE TRIGGER update_stock_thresholds_updated - BEFORE UPDATE ON stock_thresholds - FOR EACH ROW - EXECUTE FUNCTION update_updated_at_column(); +-- CREATE TRIGGER update_stock_thresholds_updated +-- BEFORE UPDATE ON stock_thresholds +-- FOR EACH ROW +-- EXECUTE FUNCTION update_updated_at_column(); -CREATE INDEX idx_st_metrics ON stock_thresholds(category_id, vendor); +-- CREATE INDEX idx_st_metrics ON stock_thresholds(category_id, vendor); --- Lead time threshold configurations -CREATE TABLE lead_time_thresholds ( - id INTEGER NOT NULL, - category_id BIGINT, -- NULL means default/global threshold - vendor VARCHAR(100), -- NULL means applies to all vendors - target_days INTEGER NOT NULL DEFAULT 14, - warning_days INTEGER NOT NULL DEFAULT 21, - critical_days INTEGER NOT NULL DEFAULT 30, - created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, - updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (id), - FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE, - UNIQUE (category_id, vendor) -); +-- -- Lead time threshold configurations +-- CREATE TABLE lead_time_thresholds ( +-- id INTEGER NOT NULL, +-- category_id BIGINT, -- NULL means default/global threshold +-- vendor VARCHAR(100), -- NULL means applies to all vendors +-- target_days INTEGER NOT NULL DEFAULT 14, +-- warning_days INTEGER NOT NULL DEFAULT 21, +-- critical_days INTEGER NOT NULL DEFAULT 30, +-- created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, +-- updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, +-- PRIMARY KEY (id), +-- FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE, +-- UNIQUE (category_id, vendor) +-- ); -CREATE TRIGGER update_lead_time_thresholds_updated - BEFORE UPDATE ON lead_time_thresholds - FOR EACH ROW - EXECUTE FUNCTION update_updated_at_column(); +-- CREATE TRIGGER update_lead_time_thresholds_updated +-- BEFORE UPDATE ON lead_time_thresholds +-- FOR EACH ROW +-- EXECUTE FUNCTION update_updated_at_column(); --- Sales velocity window configurations -CREATE TABLE sales_velocity_config ( - id INTEGER NOT NULL, - category_id BIGINT, -- NULL means default/global threshold - vendor VARCHAR(100), -- NULL means applies to all vendors - daily_window_days INTEGER NOT NULL DEFAULT 30, - weekly_window_days INTEGER NOT NULL DEFAULT 7, - monthly_window_days INTEGER NOT NULL DEFAULT 90, - created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, - updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (id), - FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE, - UNIQUE (category_id, vendor) -); +-- -- Sales velocity window configurations +-- CREATE TABLE sales_velocity_config ( +-- id INTEGER NOT NULL, +-- category_id BIGINT, -- NULL means default/global threshold +-- vendor VARCHAR(100), -- NULL means applies to all vendors +-- daily_window_days INTEGER NOT NULL DEFAULT 30, +-- weekly_window_days INTEGER NOT NULL DEFAULT 7, +-- monthly_window_days INTEGER NOT NULL DEFAULT 90, +-- created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, +-- updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, +-- PRIMARY KEY (id), +-- FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE, +-- UNIQUE (category_id, vendor) +-- ); -CREATE TRIGGER update_sales_velocity_config_updated - BEFORE UPDATE ON sales_velocity_config - FOR EACH ROW - EXECUTE FUNCTION update_updated_at_column(); +-- CREATE TRIGGER update_sales_velocity_config_updated +-- BEFORE UPDATE ON sales_velocity_config +-- FOR EACH ROW +-- EXECUTE FUNCTION update_updated_at_column(); -CREATE INDEX idx_sv_metrics ON sales_velocity_config(category_id, vendor); +-- CREATE INDEX idx_sv_metrics ON sales_velocity_config(category_id, vendor); --- ABC Classification configurations -CREATE TABLE abc_classification_config ( - id INTEGER NOT NULL PRIMARY KEY, - a_threshold DECIMAL(5,2) NOT NULL DEFAULT 20.0, - b_threshold DECIMAL(5,2) NOT NULL DEFAULT 50.0, - classification_period_days INTEGER NOT NULL DEFAULT 90, - created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, - updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP -); +-- -- ABC Classification configurations +-- CREATE TABLE abc_classification_config ( +-- id INTEGER NOT NULL PRIMARY KEY, +-- a_threshold DECIMAL(5,2) NOT NULL DEFAULT 20.0, +-- b_threshold DECIMAL(5,2) NOT NULL DEFAULT 50.0, +-- classification_period_days INTEGER NOT NULL DEFAULT 90, +-- created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, +-- updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP +-- ); -CREATE TRIGGER update_abc_classification_config_updated - BEFORE UPDATE ON abc_classification_config - FOR EACH ROW - EXECUTE FUNCTION update_updated_at_column(); +-- CREATE TRIGGER update_abc_classification_config_updated +-- BEFORE UPDATE ON abc_classification_config +-- FOR EACH ROW +-- EXECUTE FUNCTION update_updated_at_column(); --- Safety stock configurations -CREATE TABLE safety_stock_config ( - id INTEGER NOT NULL, - category_id BIGINT, -- NULL means default/global threshold - vendor VARCHAR(100), -- NULL means applies to all vendors - coverage_days INTEGER NOT NULL DEFAULT 14, - service_level DECIMAL(5,2) NOT NULL DEFAULT 95.0, - created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, - updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (id), - FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE, - UNIQUE (category_id, vendor) -); +-- -- Safety stock configurations +-- CREATE TABLE safety_stock_config ( +-- id INTEGER NOT NULL, +-- category_id BIGINT, -- NULL means default/global threshold +-- vendor VARCHAR(100), -- NULL means applies to all vendors +-- coverage_days INTEGER NOT NULL DEFAULT 14, +-- service_level DECIMAL(5,2) NOT NULL DEFAULT 95.0, +-- created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, +-- updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, +-- PRIMARY KEY (id), +-- FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE, +-- UNIQUE (category_id, vendor) +-- ); -CREATE TRIGGER update_safety_stock_config_updated - BEFORE UPDATE ON safety_stock_config - FOR EACH ROW - EXECUTE FUNCTION update_updated_at_column(); +-- CREATE TRIGGER update_safety_stock_config_updated +-- BEFORE UPDATE ON safety_stock_config +-- FOR EACH ROW +-- EXECUTE FUNCTION update_updated_at_column(); -CREATE INDEX idx_ss_metrics ON safety_stock_config(category_id, vendor); +-- CREATE INDEX idx_ss_metrics ON safety_stock_config(category_id, vendor); --- Turnover rate configurations -CREATE TABLE turnover_config ( - id INTEGER NOT NULL, - category_id BIGINT, -- NULL means default/global threshold - vendor VARCHAR(100), -- NULL means applies to all vendors - calculation_period_days INTEGER NOT NULL DEFAULT 30, - target_rate DECIMAL(10,2) NOT NULL DEFAULT 1.0, - created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, - updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (id), - FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE, - UNIQUE (category_id, vendor) -); +-- -- Turnover rate configurations +-- CREATE TABLE turnover_config ( +-- id INTEGER NOT NULL, +-- category_id BIGINT, -- NULL means default/global threshold +-- vendor VARCHAR(100), -- NULL means applies to all vendors +-- calculation_period_days INTEGER NOT NULL DEFAULT 30, +-- target_rate DECIMAL(10,2) NOT NULL DEFAULT 1.0, +-- created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, +-- updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, +-- PRIMARY KEY (id), +-- FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE, +-- UNIQUE (category_id, vendor) +-- ); -CREATE TRIGGER update_turnover_config_updated - BEFORE UPDATE ON turnover_config - FOR EACH ROW - EXECUTE FUNCTION update_updated_at_column(); +-- CREATE TRIGGER update_turnover_config_updated +-- BEFORE UPDATE ON turnover_config +-- FOR EACH ROW +-- EXECUTE FUNCTION update_updated_at_column(); --- Create table for sales seasonality factors -CREATE TABLE sales_seasonality ( - month INTEGER NOT NULL, - seasonality_factor DECIMAL(5,3) DEFAULT 0, - last_updated TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (month), - CONSTRAINT month_range CHECK (month BETWEEN 1 AND 12), - CONSTRAINT seasonality_range CHECK (seasonality_factor BETWEEN -1.0 AND 1.0) -); +-- -- Create table for sales seasonality factors +-- CREATE TABLE sales_seasonality ( +-- month INTEGER NOT NULL, +-- seasonality_factor DECIMAL(5,3) DEFAULT 0, +-- last_updated TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, +-- PRIMARY KEY (month), +-- CONSTRAINT month_range CHECK (month BETWEEN 1 AND 12), +-- CONSTRAINT seasonality_range CHECK (seasonality_factor BETWEEN -1.0 AND 1.0) +-- ); -CREATE TRIGGER update_sales_seasonality_updated - BEFORE UPDATE ON sales_seasonality - FOR EACH ROW - EXECUTE FUNCTION update_updated_at_column(); +-- CREATE TRIGGER update_sales_seasonality_updated +-- BEFORE UPDATE ON sales_seasonality +-- FOR EACH ROW +-- EXECUTE FUNCTION update_updated_at_column(); --- Create table for financial calculation parameters -CREATE TABLE financial_calc_config ( - id INTEGER NOT NULL PRIMARY KEY, - order_cost DECIMAL(10,2) NOT NULL DEFAULT 25.00, -- The fixed cost per purchase order (used in EOQ) - holding_rate DECIMAL(10,4) NOT NULL DEFAULT 0.25, -- The annual inventory holding cost as a percentage of unit cost (used in EOQ) - service_level_z_score DECIMAL(10,4) NOT NULL DEFAULT 1.96, -- Z-score for ~95% service level (used in Safety Stock) - min_reorder_qty INTEGER NOT NULL DEFAULT 1, -- Minimum reorder quantity - default_reorder_qty INTEGER NOT NULL DEFAULT 5, -- Default reorder quantity when sales data is insufficient - default_safety_stock INTEGER NOT NULL DEFAULT 5, -- Default safety stock when sales data is insufficient - created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, - updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP -); +-- -- Create table for financial calculation parameters +-- CREATE TABLE financial_calc_config ( +-- id INTEGER NOT NULL PRIMARY KEY, +-- order_cost DECIMAL(10,2) NOT NULL DEFAULT 25.00, -- The fixed cost per purchase order (used in EOQ) +-- holding_rate DECIMAL(10,4) NOT NULL DEFAULT 0.25, -- The annual inventory holding cost as a percentage of unit cost (used in EOQ) +-- service_level_z_score DECIMAL(10,4) NOT NULL DEFAULT 1.96, -- Z-score for ~95% service level (used in Safety Stock) +-- min_reorder_qty INTEGER NOT NULL DEFAULT 1, -- Minimum reorder quantity +-- default_reorder_qty INTEGER NOT NULL DEFAULT 5, -- Default reorder quantity when sales data is insufficient +-- default_safety_stock INTEGER NOT NULL DEFAULT 5, -- Default safety stock when sales data is insufficient +-- created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, +-- updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP +-- ); -CREATE TRIGGER update_financial_calc_config_updated - BEFORE UPDATE ON financial_calc_config - FOR EACH ROW - EXECUTE FUNCTION update_updated_at_column(); +-- CREATE TRIGGER update_financial_calc_config_updated +-- BEFORE UPDATE ON financial_calc_config +-- FOR EACH ROW +-- EXECUTE FUNCTION update_updated_at_column(); --- Insert default global thresholds -INSERT INTO stock_thresholds (id, category_id, vendor, critical_days, reorder_days, overstock_days) -VALUES (1, NULL, NULL, 7, 14, 90) -ON CONFLICT (id) DO UPDATE SET - critical_days = EXCLUDED.critical_days, - reorder_days = EXCLUDED.reorder_days, - overstock_days = EXCLUDED.overstock_days; +-- -- Insert default global thresholds +-- INSERT INTO stock_thresholds (id, category_id, vendor, critical_days, reorder_days, overstock_days) +-- VALUES (1, NULL, NULL, 7, 14, 90) +-- ON CONFLICT (id) DO UPDATE SET +-- critical_days = EXCLUDED.critical_days, +-- reorder_days = EXCLUDED.reorder_days, +-- overstock_days = EXCLUDED.overstock_days; -INSERT INTO lead_time_thresholds (id, category_id, vendor, target_days, warning_days, critical_days) -VALUES (1, NULL, NULL, 14, 21, 30) -ON CONFLICT (id) DO UPDATE SET - target_days = EXCLUDED.target_days, - warning_days = EXCLUDED.warning_days, - critical_days = EXCLUDED.critical_days; +-- INSERT INTO lead_time_thresholds (id, category_id, vendor, target_days, warning_days, critical_days) +-- VALUES (1, NULL, NULL, 14, 21, 30) +-- ON CONFLICT (id) DO UPDATE SET +-- target_days = EXCLUDED.target_days, +-- warning_days = EXCLUDED.warning_days, +-- critical_days = EXCLUDED.critical_days; -INSERT INTO sales_velocity_config (id, category_id, vendor, daily_window_days, weekly_window_days, monthly_window_days) -VALUES (1, NULL, NULL, 30, 7, 90) -ON CONFLICT (id) DO UPDATE SET - daily_window_days = EXCLUDED.daily_window_days, - weekly_window_days = EXCLUDED.weekly_window_days, - monthly_window_days = EXCLUDED.monthly_window_days; +-- INSERT INTO sales_velocity_config (id, category_id, vendor, daily_window_days, weekly_window_days, monthly_window_days) +-- VALUES (1, NULL, NULL, 30, 7, 90) +-- ON CONFLICT (id) DO UPDATE SET +-- daily_window_days = EXCLUDED.daily_window_days, +-- weekly_window_days = EXCLUDED.weekly_window_days, +-- monthly_window_days = EXCLUDED.monthly_window_days; -INSERT INTO abc_classification_config (id, a_threshold, b_threshold, classification_period_days) -VALUES (1, 20.0, 50.0, 90) -ON CONFLICT (id) DO UPDATE SET - a_threshold = EXCLUDED.a_threshold, - b_threshold = EXCLUDED.b_threshold, - classification_period_days = EXCLUDED.classification_period_days; +-- INSERT INTO abc_classification_config (id, a_threshold, b_threshold, classification_period_days) +-- VALUES (1, 20.0, 50.0, 90) +-- ON CONFLICT (id) DO UPDATE SET +-- a_threshold = EXCLUDED.a_threshold, +-- b_threshold = EXCLUDED.b_threshold, +-- classification_period_days = EXCLUDED.classification_period_days; -INSERT INTO safety_stock_config (id, category_id, vendor, coverage_days, service_level) -VALUES (1, NULL, NULL, 14, 95.0) -ON CONFLICT (id) DO UPDATE SET - coverage_days = EXCLUDED.coverage_days, - service_level = EXCLUDED.service_level; +-- INSERT INTO safety_stock_config (id, category_id, vendor, coverage_days, service_level) +-- VALUES (1, NULL, NULL, 14, 95.0) +-- ON CONFLICT (id) DO UPDATE SET +-- coverage_days = EXCLUDED.coverage_days, +-- service_level = EXCLUDED.service_level; -INSERT INTO turnover_config (id, category_id, vendor, calculation_period_days, target_rate) -VALUES (1, NULL, NULL, 30, 1.0) -ON CONFLICT (id) DO UPDATE SET - calculation_period_days = EXCLUDED.calculation_period_days, - target_rate = EXCLUDED.target_rate; +-- INSERT INTO turnover_config (id, category_id, vendor, calculation_period_days, target_rate) +-- VALUES (1, NULL, NULL, 30, 1.0) +-- ON CONFLICT (id) DO UPDATE SET +-- calculation_period_days = EXCLUDED.calculation_period_days, +-- target_rate = EXCLUDED.target_rate; --- Insert default seasonality factors (neutral) -INSERT INTO sales_seasonality (month, seasonality_factor) -VALUES - (1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0), - (7, 0), (8, 0), (9, 0), (10, 0), (11, 0), (12, 0) -ON CONFLICT (month) DO UPDATE SET - last_updated = CURRENT_TIMESTAMP; +-- -- Insert default seasonality factors (neutral) +-- INSERT INTO sales_seasonality (month, seasonality_factor) +-- VALUES +-- (1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0), +-- (7, 0), (8, 0), (9, 0), (10, 0), (11, 0), (12, 0) +-- ON CONFLICT (month) DO UPDATE SET +-- last_updated = CURRENT_TIMESTAMP; --- Insert default values -INSERT INTO financial_calc_config (id, order_cost, holding_rate, service_level_z_score, min_reorder_qty, default_reorder_qty, default_safety_stock) -VALUES (1, 25.00, 0.25, 1.96, 1, 5, 5) -ON CONFLICT (id) DO UPDATE SET - order_cost = EXCLUDED.order_cost, - holding_rate = EXCLUDED.holding_rate, - service_level_z_score = EXCLUDED.service_level_z_score, - min_reorder_qty = EXCLUDED.min_reorder_qty, - default_reorder_qty = EXCLUDED.default_reorder_qty, - default_safety_stock = EXCLUDED.default_safety_stock; +-- -- Insert default values +-- INSERT INTO financial_calc_config (id, order_cost, holding_rate, service_level_z_score, min_reorder_qty, default_reorder_qty, default_safety_stock) +-- VALUES (1, 25.00, 0.25, 1.96, 1, 5, 5) +-- ON CONFLICT (id) DO UPDATE SET +-- order_cost = EXCLUDED.order_cost, +-- holding_rate = EXCLUDED.holding_rate, +-- service_level_z_score = EXCLUDED.service_level_z_score, +-- min_reorder_qty = EXCLUDED.min_reorder_qty, +-- default_reorder_qty = EXCLUDED.default_reorder_qty, +-- default_safety_stock = EXCLUDED.default_safety_stock; --- View to show thresholds with category names -CREATE OR REPLACE VIEW stock_thresholds_view AS -SELECT - st.*, - c.name as category_name, - CASE - WHEN st.category_id IS NULL AND st.vendor IS NULL THEN 'Global Default' - WHEN st.category_id IS NULL THEN 'Vendor: ' || st.vendor - WHEN st.vendor IS NULL THEN 'Category: ' || c.name - ELSE 'Category: ' || c.name || ' / Vendor: ' || st.vendor - END as threshold_scope -FROM - stock_thresholds st -LEFT JOIN - categories c ON st.category_id = c.cat_id -ORDER BY - CASE - WHEN st.category_id IS NULL AND st.vendor IS NULL THEN 1 - WHEN st.category_id IS NULL THEN 2 - WHEN st.vendor IS NULL THEN 3 - ELSE 4 - END, - c.name, - st.vendor; +-- -- View to show thresholds with category names +-- CREATE OR REPLACE VIEW stock_thresholds_view AS +-- SELECT +-- st.*, +-- c.name as category_name, +-- CASE +-- WHEN st.category_id IS NULL AND st.vendor IS NULL THEN 'Global Default' +-- WHEN st.category_id IS NULL THEN 'Vendor: ' || st.vendor +-- WHEN st.vendor IS NULL THEN 'Category: ' || c.name +-- ELSE 'Category: ' || c.name || ' / Vendor: ' || st.vendor +-- END as threshold_scope +-- FROM +-- stock_thresholds st +-- LEFT JOIN +-- categories c ON st.category_id = c.cat_id +-- ORDER BY +-- CASE +-- WHEN st.category_id IS NULL AND st.vendor IS NULL THEN 1 +-- WHEN st.category_id IS NULL THEN 2 +-- WHEN st.vendor IS NULL THEN 3 +-- ELSE 4 +-- END, +-- c.name, +-- st.vendor; -- History and status tables CREATE TABLE IF NOT EXISTS calculate_history ( diff --git a/inventory-server/db/new-metrics-schema.sql b/inventory-server/db/metrics-schema-new.sql similarity index 100% rename from inventory-server/db/new-metrics-schema.sql rename to inventory-server/db/metrics-schema-new.sql diff --git a/inventory-server/scripts/metrics new/update_daily_snapshots.sql b/inventory-server/scripts/metrics new/update_daily_snapshots.sql new file mode 100644 index 0000000..c99c0d2 --- /dev/null +++ b/inventory-server/scripts/metrics new/update_daily_snapshots.sql @@ -0,0 +1,142 @@ +-- Description: Calculates and updates daily aggregated product data for the current day. +-- Uses UPSERT (INSERT ON CONFLICT UPDATE) for idempotency. +-- Dependencies: Core import tables (products, orders, purchase_orders), calculate_status table. +-- Frequency: Hourly (Run ~5-10 minutes after hourly data import completes). + +DO $$ +DECLARE + _module_name VARCHAR := 'daily_snapshots'; + _start_time TIMESTAMPTZ := clock_timestamp(); -- Time execution started + _last_calc_time TIMESTAMPTZ; + _target_date DATE := CURRENT_DATE; -- Always recalculate today for simplicity with hourly runs +BEGIN + -- Get the timestamp before the last successful run of this module + SELECT last_calculation_timestamp INTO _last_calc_time + FROM public.calculate_status + WHERE module_name = _module_name; + + RAISE NOTICE 'Running % for date %. Start Time: %', _module_name, _target_date, _start_time; + + -- Use CTEs to aggregate data for the target date + WITH SalesData AS ( + SELECT + p.pid, + p.sku, + -- Aggregate Sales (Quantity > 0, Status not Canceled/Returned) + COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN o.quantity ELSE 0 END), 0) AS units_sold, + COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN o.price * o.quantity ELSE 0 END), 0.00) AS gross_revenue_unadjusted, -- Before discount + COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN o.discount ELSE 0 END), 0.00) AS discounts, + COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN COALESCE(o.costeach, p.landing_cost_price, p.cost_price) * o.quantity ELSE 0 END), 0.00) AS cogs, + COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN p.regular_price * o.quantity ELSE 0 END), 0.00) AS gross_regular_revenue, -- Use current regular price for simplicity here + + -- Aggregate Returns (Quantity < 0 or Status = Returned) + COALESCE(SUM(CASE WHEN o.quantity < 0 OR COALESCE(o.status, 'pending') = 'returned' THEN ABS(o.quantity) ELSE 0 END), 0) AS units_returned, + COALESCE(SUM(CASE WHEN o.quantity < 0 OR COALESCE(o.status, 'pending') = 'returned' THEN o.price * ABS(o.quantity) ELSE 0 END), 0.00) AS returns_revenue + FROM public.products p -- Start from products to include those with no orders today + LEFT JOIN public.orders o + ON p.pid = o.pid + AND o.date >= _target_date -- Filter orders for the target date + AND o.date < _target_date + INTERVAL '1 day' + GROUP BY p.pid, p.sku + ), + ReceivingData AS ( + SELECT + po.pid, + COALESCE(SUM((rh.item->>'qty')::numeric), 0) AS units_received, + COALESCE(SUM((rh.item->>'qty')::numeric * COALESCE((rh.item->>'cost')::numeric, po.cost_price)), 0.00) AS cost_received + FROM public.purchase_orders po + CROSS JOIN LATERAL jsonb_array_elements(po.receiving_history) AS rh(item) + WHERE (rh.item->>'received_at')::date = _target_date -- Filter receipts for the target date + GROUP BY po.pid + ), + CurrentStock AS ( + -- Select current stock values directly from products table + SELECT + pid, + stock_quantity, + COALESCE(landing_cost_price, cost_price, 0.00) as effective_cost_price, + COALESCE(price, 0.00) as current_price, + COALESCE(regular_price, 0.00) as current_regular_price + FROM public.products + ) + -- Upsert into the daily snapshots table + INSERT INTO public.daily_product_snapshots ( + snapshot_date, + pid, + sku, + eod_stock_quantity, + eod_stock_cost, + eod_stock_retail, + eod_stock_gross, + stockout_flag, + units_sold, + units_returned, + gross_revenue, + discounts, + returns_revenue, + net_revenue, + cogs, + gross_regular_revenue, + profit, + units_received, + cost_received, + calculation_timestamp + ) + SELECT + _target_date AS snapshot_date, + p.pid, + p.sku, + -- Inventory Metrics (Using CurrentStock) + cs.stock_quantity AS eod_stock_quantity, + cs.stock_quantity * cs.effective_cost_price AS eod_stock_cost, + cs.stock_quantity * cs.current_price AS eod_stock_retail, + cs.stock_quantity * cs.current_regular_price AS eod_stock_gross, + (cs.stock_quantity <= 0) AS stockout_flag, + -- Sales Metrics (From SalesData) + COALESCE(sd.units_sold, 0), + COALESCE(sd.units_returned, 0), + COALESCE(sd.gross_revenue_unadjusted, 0.00), + COALESCE(sd.discounts, 0.00), + COALESCE(sd.returns_revenue, 0.00), + COALESCE(sd.gross_revenue_unadjusted, 0.00) - COALESCE(sd.discounts, 0.00) AS net_revenue, + COALESCE(sd.cogs, 0.00), + COALESCE(sd.gross_regular_revenue, 0.00), + (COALESCE(sd.gross_revenue_unadjusted, 0.00) - COALESCE(sd.discounts, 0.00)) - COALESCE(sd.cogs, 0.00) AS profit, -- Basic profit: Net Revenue - COGS + -- Receiving Metrics (From ReceivingData) + COALESCE(rd.units_received, 0), + COALESCE(rd.cost_received, 0.00), + _start_time -- Timestamp of this calculation run + FROM public.products p + LEFT JOIN CurrentStock cs ON p.pid = cs.pid + LEFT JOIN SalesData sd ON p.pid = sd.pid + LEFT JOIN ReceivingData rd ON p.pid = rd.pid + WHERE p.pid IS NOT NULL -- Ensure we only insert for existing products + + ON CONFLICT (snapshot_date, pid) DO UPDATE SET + sku = EXCLUDED.sku, + eod_stock_quantity = EXCLUDED.eod_stock_quantity, + eod_stock_cost = EXCLUDED.eod_stock_cost, + eod_stock_retail = EXCLUDED.eod_stock_retail, + eod_stock_gross = EXCLUDED.eod_stock_gross, + stockout_flag = EXCLUDED.stockout_flag, + units_sold = EXCLUDED.units_sold, + units_returned = EXCLUDED.units_returned, + gross_revenue = EXCLUDED.gross_revenue, + discounts = EXCLUDED.discounts, + returns_revenue = EXCLUDED.returns_revenue, + net_revenue = EXCLUDED.net_revenue, + cogs = EXCLUDED.cogs, + gross_regular_revenue = EXCLUDED.gross_regular_revenue, + profit = EXCLUDED.profit, + units_received = EXCLUDED.units_received, + cost_received = EXCLUDED.cost_received, + calculation_timestamp = EXCLUDED.calculation_timestamp; -- Use the timestamp from this run + + -- Update the status table with the timestamp from the START of this run + UPDATE public.calculate_status + SET last_calculation_timestamp = _start_time + WHERE module_name = _module_name; + + RAISE NOTICE 'Finished % for date %. Duration: %', _module_name, _target_date, clock_timestamp() - _start_time; + +END $$; \ No newline at end of file diff --git a/inventory-server/scripts/metrics new/update_periodic_metrics.sql b/inventory-server/scripts/metrics new/update_periodic_metrics.sql new file mode 100644 index 0000000..5988c81 --- /dev/null +++ b/inventory-server/scripts/metrics new/update_periodic_metrics.sql @@ -0,0 +1,114 @@ +-- Description: Calculates metrics that don't need hourly updates, like ABC class +-- and average lead time. +-- Dependencies: product_metrics, purchase_orders, settings_global, calculate_status. +-- Frequency: Daily or Weekly (e.g., run via cron job overnight). + +DO $$ +DECLARE + _module_name VARCHAR := 'periodic_metrics'; + _start_time TIMESTAMPTZ := clock_timestamp(); + _last_calc_time TIMESTAMPTZ; + _abc_basis VARCHAR; + _abc_period INT; + _threshold_a NUMERIC; + _threshold_b NUMERIC; +BEGIN + -- Get the timestamp before the last successful run of this module + SELECT last_calculation_timestamp INTO _last_calc_time + FROM public.calculate_status + WHERE module_name = _module_name; + + RAISE NOTICE 'Running % module. Start Time: %', _module_name, _start_time; + + -- 1. Calculate Average Lead Time + RAISE NOTICE 'Calculating Average Lead Time...'; + WITH LeadTimes AS ( + SELECT + pid, + AVG(GREATEST(1, DATE_PART('day', last_received_date - date))) AS avg_days -- Use GREATEST(1,...) to avoid 0 or negative days + FROM public.purchase_orders + WHERE status = 'received' -- Or potentially 'full_received' if using that status + AND last_received_date IS NOT NULL + AND date IS NOT NULL + AND last_received_date >= date -- Ensure received date is not before order date + GROUP BY pid + ) + UPDATE public.product_metrics pm + SET avg_lead_time_days = lt.avg_days::int + FROM LeadTimes lt + WHERE pm.pid = lt.pid + AND pm.avg_lead_time_days IS DISTINCT FROM lt.avg_days::int; -- Only update if changed + RAISE NOTICE 'Finished Average Lead Time calculation.'; + + + -- 2. Calculate ABC Classification + RAISE NOTICE 'Calculating ABC Classification...'; + -- Get ABC settings + SELECT setting_value INTO _abc_basis FROM public.settings_global WHERE setting_key = 'abc_calculation_basis' LIMIT 1; + SELECT setting_value::numeric INTO _threshold_a FROM public.settings_global WHERE setting_key = 'abc_revenue_threshold_a' LIMIT 1; + SELECT setting_value::numeric INTO _threshold_b FROM public.settings_global WHERE setting_key = 'abc_revenue_threshold_b' LIMIT 1; + _abc_basis := COALESCE(_abc_basis, 'revenue_30d'); -- Default basis + _threshold_a := COALESCE(_threshold_a, 0.80); + _threshold_b := COALESCE(_threshold_b, 0.95); + + RAISE NOTICE 'Using ABC Basis: %, Threshold A: %, Threshold B: %', _abc_basis, _threshold_a, _threshold_b; + + WITH RankedProducts AS ( + SELECT + pid, + -- Dynamically select the metric based on setting + CASE _abc_basis + WHEN 'sales_30d' THEN COALESCE(sales_30d, 0) + WHEN 'lifetime_revenue' THEN COALESCE(lifetime_revenue, 0)::numeric -- Cast needed if different type + ELSE COALESCE(revenue_30d, 0) -- Default to revenue_30d + END AS metric_value + FROM public.product_metrics + WHERE is_replenishable = TRUE -- Typically only classify replenishable items + ), + Cumulative AS ( + SELECT + pid, + metric_value, + SUM(metric_value) OVER (ORDER BY metric_value DESC NULLS LAST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) as cumulative_metric, + SUM(metric_value) OVER () as total_metric + FROM RankedProducts + WHERE metric_value > 0 -- Exclude items with no contribution + ) + UPDATE public.product_metrics pm + SET abc_class = + CASE + WHEN c.cumulative_metric / NULLIF(c.total_metric, 0) <= _threshold_a THEN 'A' + WHEN c.cumulative_metric / NULLIF(c.total_metric, 0) <= _threshold_b THEN 'B' + ELSE 'C' + END + FROM Cumulative c + WHERE pm.pid = c.pid + AND pm.abc_class IS DISTINCT FROM ( -- Only update if changed + CASE + WHEN c.cumulative_metric / NULLIF(c.total_metric, 0) <= _threshold_a THEN 'A' + WHEN c.cumulative_metric / NULLIF(c.total_metric, 0) <= _threshold_b THEN 'B' + ELSE 'C' + END); + + -- Set non-contributing or non-replenishable to 'C' or NULL if preferred + UPDATE public.product_metrics + SET abc_class = 'C' -- Or NULL + WHERE abc_class IS NULL AND is_replenishable = TRUE; -- Catch those with 0 metric value + + UPDATE public.product_metrics + SET abc_class = NULL -- Or 'N/A'? + WHERE is_replenishable = FALSE AND abc_class IS NOT NULL; -- Unclassify non-replenishable items + + + RAISE NOTICE 'Finished ABC Classification calculation.'; + + -- Add other periodic calculations here if needed (e.g., recalculating first/last dates) + + -- Update the status table with the timestamp from the START of this run + UPDATE public.calculate_status + SET last_calculation_timestamp = _start_time + WHERE module_name = _module_name; + + RAISE NOTICE 'Finished % module. Duration: %', _module_name, clock_timestamp() - _start_time; + +END $$; \ No newline at end of file diff --git a/inventory-server/scripts/metrics new/update_product_metrics.sql b/inventory-server/scripts/metrics new/update_product_metrics.sql new file mode 100644 index 0000000..bac9653 --- /dev/null +++ b/inventory-server/scripts/metrics new/update_product_metrics.sql @@ -0,0 +1,304 @@ +-- Description: Calculates and updates the main product_metrics table based on current data +-- and aggregated daily snapshots. Uses UPSERT for idempotency. +-- Dependencies: Core import tables, daily_product_snapshots, configuration tables, calculate_status. +-- Frequency: Hourly (Run AFTER update_daily_snapshots.sql completes). + +DO $$ +DECLARE + _module_name VARCHAR := 'product_metrics'; + _start_time TIMESTAMPTZ := clock_timestamp(); + _last_calc_time TIMESTAMPTZ; + _current_date DATE := CURRENT_DATE; +BEGIN + -- Get the timestamp before the last successful run of this module + SELECT last_calculation_timestamp INTO _last_calc_time + FROM public.calculate_status + WHERE module_name = _module_name; + + RAISE NOTICE 'Running % module. Start Time: %', _module_name, _start_time; + + -- Use CTEs to gather all necessary information + WITH CurrentInfo AS ( + SELECT + p.pid, + p.sku, + p.title, + p.brand, + p.vendor, + COALESCE(p.image_175, p.image) as image_url, + p.visible as is_visible, + p.replenishable as is_replenishable, + COALESCE(p.price, 0.00) as current_price, + COALESCE(p.regular_price, 0.00) as current_regular_price, + COALESCE(p.cost_price, 0.00) as current_cost_price, + COALESCE(p.landing_cost_price, p.cost_price, 0.00) as current_effective_cost, -- Use landing if available, else cost + p.stock_quantity as current_stock, + p.created_at, + p.first_received, + p.date_last_sold, + p.moq, + p.uom -- Assuming UOM logic is handled elsewhere or simple (e.g., 1=each) + FROM public.products p + ), + OnOrderInfo AS ( + SELECT + pid, + COALESCE(SUM(ordered - received), 0) AS on_order_qty, + COALESCE(SUM((ordered - received) * cost_price), 0.00) AS on_order_cost, + MIN(expected_date) AS earliest_expected_date + FROM public.purchase_orders + WHERE status IN ('open', 'partially_received', 'ordered', 'preordered', 'receiving_started', 'electronically_sent', 'electronically_ready_send') -- Adjust based on your status workflow representing active POs not fully received + AND (ordered - received) > 0 + GROUP BY pid + ), + HistoricalDates AS ( + -- Note: Calculating these MIN/MAX values hourly can be slow on large tables. + -- Consider calculating periodically or storing on products if import can populate them. + SELECT + p.pid, + MIN(o.date)::date AS date_first_sold, + MAX(o.date)::date AS max_order_date, -- Use MAX for potential recalc of date_last_sold + MIN(rh.first_receipt_date) AS date_first_received_calc, + MAX(rh.last_receipt_date) AS date_last_received_calc + FROM public.products p + LEFT JOIN public.orders o ON p.pid = o.pid AND o.quantity > 0 AND o.status NOT IN ('canceled', 'returned') + LEFT JOIN ( + SELECT + po.pid, + MIN((rh.item->>'received_at')::date) as first_receipt_date, + MAX((rh.item->>'received_at')::date) as last_receipt_date + FROM public.purchase_orders po + CROSS JOIN LATERAL jsonb_array_elements(po.receiving_history) AS rh(item) + WHERE jsonb_typeof(po.receiving_history) = 'array' AND jsonb_array_length(po.receiving_history) > 0 + GROUP BY po.pid + ) rh ON p.pid = rh.pid + GROUP BY p.pid + ), + SnapshotAggregates AS ( + SELECT + pid, + -- Rolling periods (ensure dates are inclusive/exclusive as needed) + SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '6 days' THEN units_sold ELSE 0 END) AS sales_7d, + SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '6 days' THEN net_revenue ELSE 0 END) AS revenue_7d, + SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '13 days' THEN units_sold ELSE 0 END) AS sales_14d, + SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '13 days' THEN net_revenue ELSE 0 END) AS revenue_14d, + SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' THEN units_sold ELSE 0 END) AS sales_30d, + SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' THEN net_revenue ELSE 0 END) AS revenue_30d, + SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' THEN cogs ELSE 0 END) AS cogs_30d, + SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' THEN profit ELSE 0 END) AS profit_30d, + SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' THEN units_returned ELSE 0 END) AS returns_units_30d, + SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' THEN returns_revenue ELSE 0 END) AS returns_revenue_30d, + SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' THEN discounts ELSE 0 END) AS discounts_30d, + SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' THEN gross_revenue ELSE 0 END) AS gross_revenue_30d, + SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' THEN gross_regular_revenue ELSE 0 END) AS gross_regular_revenue_30d, + SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' AND stockout_flag THEN 1 ELSE 0 END) AS stockout_days_30d, + SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '364 days' THEN units_sold ELSE 0 END) AS sales_365d, + SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '364 days' THEN net_revenue ELSE 0 END) AS revenue_365d, + SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' THEN units_received ELSE 0 END) AS received_qty_30d, + SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' THEN cost_received ELSE 0 END) AS received_cost_30d, + + -- Averages (check for NULLIF 0 days in period if filtering dates) + AVG(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' THEN eod_stock_quantity END) AS avg_stock_units_30d, + AVG(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' THEN eod_stock_cost END) AS avg_stock_cost_30d, + AVG(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' THEN eod_stock_retail END) AS avg_stock_retail_30d, + AVG(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' THEN eod_stock_gross END) AS avg_stock_gross_30d, + + -- Lifetime + SUM(units_sold) AS lifetime_sales, + SUM(net_revenue) AS lifetime_revenue, + + -- Yesterday + SUM(CASE WHEN snapshot_date = _current_date - INTERVAL '1 day' THEN units_sold ELSE 0 END) as yesterday_sales + + FROM public.daily_product_snapshots + WHERE snapshot_date <= _current_date -- Include today's snapshot + AND snapshot_date >= _current_date - INTERVAL '365 days' -- Limit history scan slightly + GROUP BY pid + ), + FirstPeriodMetrics AS ( + SELECT + pid, + date_first_sold, + SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '6 days' THEN units_sold ELSE 0 END) AS first_7_days_sales, + SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '6 days' THEN net_revenue ELSE 0 END) AS first_7_days_revenue, + SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '29 days' THEN units_sold ELSE 0 END) AS first_30_days_sales, + SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '29 days' THEN net_revenue ELSE 0 END) AS first_30_days_revenue, + SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '59 days' THEN units_sold ELSE 0 END) AS first_60_days_sales, + SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '59 days' THEN net_revenue ELSE 0 END) AS first_60_days_revenue, + SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '89 days' THEN units_sold ELSE 0 END) AS first_90_days_sales, + SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '89 days' THEN net_revenue ELSE 0 END) AS first_90_days_revenue + FROM public.daily_product_snapshots ds + JOIN HistoricalDates hd USING(pid) + WHERE date_first_sold IS NOT NULL + AND snapshot_date >= date_first_sold + AND snapshot_date <= date_first_sold + INTERVAL '90 days' -- Limit scan range + GROUP BY pid, date_first_sold + ), + Settings AS ( + SELECT + p.pid, + COALESCE(sp.lead_time_days, sv.default_lead_time_days, (SELECT setting_value FROM settings_global WHERE setting_key = 'default_lead_time_days')::int, 14) AS effective_lead_time, + COALESCE(sp.days_of_stock, sv.default_days_of_stock, (SELECT setting_value FROM settings_global WHERE setting_key = 'default_days_of_stock')::int, 30) AS effective_days_of_stock, + COALESCE(sp.safety_stock, 0) AS effective_safety_stock, -- Assuming safety stock is units, not days from global for now + COALESCE(sp.exclude_from_forecast, FALSE) AS exclude_forecast + FROM public.products p + LEFT JOIN public.settings_product sp ON p.pid = sp.pid + LEFT JOIN public.settings_vendor sv ON p.vendor = sv.vendor + ) + -- Final UPSERT into product_metrics + INSERT INTO public.product_metrics ( + pid, last_calculated, sku, title, brand, vendor, image_url, is_visible, is_replenishable, + current_price, current_regular_price, current_cost_price, current_landing_cost_price, + current_stock, current_stock_cost, current_stock_retail, current_stock_gross, + on_order_qty, on_order_cost, on_order_retail, earliest_expected_date, + date_created, date_first_received, date_last_received, date_first_sold, date_last_sold, age_days, + sales_7d, revenue_7d, sales_14d, revenue_14d, sales_30d, revenue_30d, cogs_30d, profit_30d, + returns_units_30d, returns_revenue_30d, discounts_30d, gross_revenue_30d, gross_regular_revenue_30d, + stockout_days_30d, sales_365d, revenue_365d, + avg_stock_units_30d, avg_stock_cost_30d, avg_stock_retail_30d, avg_stock_gross_30d, + received_qty_30d, received_cost_30d, + lifetime_sales, lifetime_revenue, + first_7_days_sales, first_7_days_revenue, first_30_days_sales, first_30_days_revenue, + first_60_days_sales, first_60_days_revenue, first_90_days_sales, first_90_days_revenue, + asp_30d, acp_30d, avg_ros_30d, avg_sales_per_day_30d, avg_sales_per_month_30d, + margin_30d, markup_30d, gmroi_30d, stockturn_30d, return_rate_30d, discount_rate_30d, + stockout_rate_30d, markdown_30d, markdown_rate_30d, sell_through_30d, + -- avg_lead_time_days, -- Calculated periodically + -- abc_class, -- Calculated periodically + sales_velocity_daily, config_lead_time, config_days_of_stock, config_safety_stock, + planning_period_days, lead_time_forecast_units, days_of_stock_forecast_units, + planning_period_forecast_units, lead_time_closing_stock, days_of_stock_closing_stock, + replenishment_needed_raw, replenishment_units, replenishment_cost, replenishment_retail, replenishment_profit, + to_order_units, forecast_lost_sales_units, forecast_lost_revenue, + stock_cover_in_days, po_cover_in_days, sells_out_in_days, replenish_date, + overstocked_units, overstocked_cost, overstocked_retail, is_old_stock, + yesterday_sales + ) + SELECT + ci.pid, _start_time, ci.sku, ci.title, ci.brand, ci.vendor, ci.image_url, ci.is_visible, ci.is_replenishable, + ci.current_price, ci.current_regular_price, ci.current_cost_price, ci.current_effective_cost, + ci.current_stock, ci.current_stock * ci.current_effective_cost, ci.current_stock * ci.current_price, ci.current_stock * ci.current_regular_price, + COALESCE(ooi.on_order_qty, 0), COALESCE(ooi.on_order_cost, 0.00), COALESCE(ooi.on_order_qty, 0) * ci.current_price, ooi.earliest_expected_date, + ci.created_at::date, COALESCE(ci.first_received::date, hd.date_first_received_calc), hd.date_last_received_calc, hd.date_first_sold, COALESCE(ci.date_last_sold, hd.max_order_date), DATE_PART('day', _current_date - LEAST(ci.created_at::date, hd.date_first_sold)), + sa.sales_7d, sa.revenue_7d, sa.sales_14d, sa.revenue_14d, sa.sales_30d, sa.revenue_30d, sa.cogs_30d, sa.profit_30d, + sa.returns_units_30d, sa.returns_revenue_30d, sa.discounts_30d, sa.gross_revenue_30d, sa.gross_regular_revenue_30d, + sa.stockout_days_30d, sa.sales_365d, sa.revenue_365d, + sa.avg_stock_units_30d, sa.avg_stock_cost_30d, sa.avg_stock_retail_30d, sa.avg_stock_gross_30d, + sa.received_qty_30d, sa.received_cost_30d, + sa.lifetime_sales, sa.lifetime_revenue, + fpm.first_7_days_sales, fpm.first_7_days_revenue, fpm.first_30_days_sales, fpm.first_30_days_revenue, + fpm.first_60_days_sales, fpm.first_60_days_revenue, fpm.first_90_days_sales, fpm.first_90_days_revenue, + + -- Calculated KPIs + sa.revenue_30d / NULLIF(sa.sales_30d, 0) AS asp_30d, + sa.cogs_30d / NULLIF(sa.sales_30d, 0) AS acp_30d, + sa.profit_30d / NULLIF(sa.sales_30d, 0) AS avg_ros_30d, + sa.sales_30d / 30.0 AS avg_sales_per_day_30d, + sa.sales_30d AS avg_sales_per_month_30d, -- Using 30d sales as proxy for month + (sa.profit_30d / NULLIF(sa.revenue_30d, 0)) * 100 AS margin_30d, + (sa.profit_30d / NULLIF(sa.cogs_30d, 0)) * 100 AS markup_30d, + sa.profit_30d / NULLIF(sa.avg_stock_cost_30d, 0) AS gmroi_30d, + sa.sales_30d / NULLIF(sa.avg_stock_units_30d, 0) AS stockturn_30d, + (sa.returns_units_30d / NULLIF(sa.sales_30d + sa.returns_units_30d, 0)) * 100 AS return_rate_30d, + (sa.discounts_30d / NULLIF(sa.gross_revenue_30d, 0)) * 100 AS discount_rate_30d, + (sa.stockout_days_30d / 30.0) * 100 AS stockout_rate_30d, + sa.gross_regular_revenue_30d - sa.gross_revenue_30d AS markdown_30d, + ((sa.gross_regular_revenue_30d - sa.gross_revenue_30d) / NULLIF(sa.gross_regular_revenue_30d, 0)) * 100 AS markdown_rate_30d, + (sa.sales_30d / NULLIF(ci.current_stock + sa.sales_30d, 0)) * 100 AS sell_through_30d, + + -- Forecasting intermediate values + (sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) AS sales_velocity_daily, + s.effective_lead_time AS config_lead_time, + s.effective_days_of_stock AS config_days_of_stock, + s.effective_safety_stock AS config_safety_stock, + (s.effective_lead_time + s.effective_days_of_stock) AS planning_period_days, + (sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_lead_time AS lead_time_forecast_units, + (sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_days_of_stock AS days_of_stock_forecast_units, + ((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_lead_time) + ((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_days_of_stock) AS planning_period_forecast_units, + (ci.current_stock + COALESCE(ooi.on_order_qty, 0) - ((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_lead_time)) AS lead_time_closing_stock, + ((ci.current_stock + COALESCE(ooi.on_order_qty, 0) - ((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_lead_time))) - ((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_days_of_stock) AS days_of_stock_closing_stock, + (((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_lead_time) + ((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_days_of_stock)) + s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0) AS replenishment_needed_raw, + + -- Final Forecasting / Replenishment Metrics (apply CEILING/GREATEST/etc.) + -- Note: These calculations are nested for clarity, can be simplified in prod + CEILING(GREATEST(0, ((((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_lead_time) + ((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_days_of_stock)) + s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0))))::int AS replenishment_units, + (CEILING(GREATEST(0, ((((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_lead_time) + ((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_days_of_stock)) + s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0))))::int) * ci.current_effective_cost AS replenishment_cost, + (CEILING(GREATEST(0, ((((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_lead_time) + ((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_days_of_stock)) + s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0))))::int) * ci.current_price AS replenishment_retail, + (CEILING(GREATEST(0, ((((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_lead_time) + ((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_days_of_stock)) + s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0))))::int) * (ci.current_price - ci.current_effective_cost) AS replenishment_profit, + + -- Placeholder for To Order (Apply MOQ/UOM logic here if needed, otherwise equals replenishment) + CEILING(GREATEST(0, ((((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_lead_time) + ((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_days_of_stock)) + s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0))))::int AS to_order_units, + + GREATEST(0, - (ci.current_stock + COALESCE(ooi.on_order_qty, 0) - ((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_lead_time))) AS forecast_lost_sales_units, + GREATEST(0, - (ci.current_stock + COALESCE(ooi.on_order_qty, 0) - ((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_lead_time))) * ci.current_price AS forecast_lost_revenue, + + ci.current_stock / NULLIF((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)), 0) AS stock_cover_in_days, + COALESCE(ooi.on_order_qty, 0) / NULLIF((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)), 0) AS po_cover_in_days, + (ci.current_stock + COALESCE(ooi.on_order_qty, 0)) / NULLIF((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)), 0) AS sells_out_in_days, + + -- Replenish Date: Date when stock is projected to hit safety stock, minus lead time + CASE + WHEN (sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) > 0 + THEN _current_date + FLOOR(GREATEST(0, ci.current_stock - s.effective_safety_stock) / (sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)))::int - s.effective_lead_time + ELSE NULL + END AS replenish_date, + + GREATEST(0, ci.current_stock - s.effective_safety_stock - (((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_lead_time) + ((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_days_of_stock)))::int AS overstocked_units, + (GREATEST(0, ci.current_stock - s.effective_safety_stock - (((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_lead_time) + ((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_days_of_stock)))) * ci.current_effective_cost AS overstocked_cost, + (GREATEST(0, ci.current_stock - s.effective_safety_stock - (((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_lead_time) + ((sa.sales_30d / NULLIF(30.0 - sa.stockout_days_30d, 0)) * s.effective_days_of_stock)))) * ci.current_price AS overstocked_retail, + + -- Old Stock Flag + (ci.created_at::date < _current_date - INTERVAL '60 day') AND + (COALESCE(ci.date_last_sold, hd.max_order_date) IS NULL OR COALESCE(ci.date_last_sold, hd.max_order_date) < _current_date - INTERVAL '60 day') AND + (hd.date_last_received_calc IS NULL OR hd.date_last_received_calc < _current_date - INTERVAL '60 day') AND + COALESCE(ooi.on_order_qty, 0) = 0 + AS is_old_stock, + + sa.yesterday_sales + + FROM CurrentInfo ci + LEFT JOIN OnOrderInfo ooi ON ci.pid = ooi.pid + LEFT JOIN HistoricalDates hd ON ci.pid = hd.pid + LEFT JOIN SnapshotAggregates sa ON ci.pid = sa.pid + LEFT JOIN FirstPeriodMetrics fpm ON ci.pid = fpm.pid + LEFT JOIN Settings s ON ci.pid = s.pid + WHERE s.exclude_forecast IS FALSE OR s.exclude_forecast IS NULL -- Exclude products explicitly marked + + ON CONFLICT (pid) DO UPDATE SET + last_calculated = EXCLUDED.last_calculated, + sku = EXCLUDED.sku, title = EXCLUDED.title, brand = EXCLUDED.brand, vendor = EXCLUDED.vendor, image_url = EXCLUDED.image_url, is_visible = EXCLUDED.is_visible, is_replenishable = EXCLUDED.is_replenishable, + current_price = EXCLUDED.current_price, current_regular_price = EXCLUDED.current_regular_price, current_cost_price = EXCLUDED.current_cost_price, current_landing_cost_price = EXCLUDED.current_landing_cost_price, + current_stock = EXCLUDED.current_stock, current_stock_cost = EXCLUDED.current_stock_cost, current_stock_retail = EXCLUDED.current_stock_retail, current_stock_gross = EXCLUDED.current_stock_gross, + on_order_qty = EXCLUDED.on_order_qty, on_order_cost = EXCLUDED.on_order_cost, on_order_retail = EXCLUDED.on_order_retail, earliest_expected_date = EXCLUDED.earliest_expected_date, + date_created = EXCLUDED.date_created, date_first_received = EXCLUDED.date_first_received, date_last_received = EXCLUDED.date_last_received, date_first_sold = EXCLUDED.date_first_sold, date_last_sold = EXCLUDED.date_last_sold, age_days = EXCLUDED.age_days, + sales_7d = EXCLUDED.sales_7d, revenue_7d = EXCLUDED.revenue_7d, sales_14d = EXCLUDED.sales_14d, revenue_14d = EXCLUDED.revenue_14d, sales_30d = EXCLUDED.sales_30d, revenue_30d = EXCLUDED.revenue_30d, cogs_30d = EXCLUDED.cogs_30d, profit_30d = EXCLUDED.profit_30d, + returns_units_30d = EXCLUDED.returns_units_30d, returns_revenue_30d = EXCLUDED.returns_revenue_30d, discounts_30d = EXCLUDED.discounts_30d, gross_revenue_30d = EXCLUDED.gross_revenue_30d, gross_regular_revenue_30d = EXCLUDED.gross_regular_revenue_30d, + stockout_days_30d = EXCLUDED.stockout_days_30d, sales_365d = EXCLUDED.sales_365d, revenue_365d = EXCLUDED.revenue_365d, + avg_stock_units_30d = EXCLUDED.avg_stock_units_30d, avg_stock_cost_30d = EXCLUDED.avg_stock_cost_30d, avg_stock_retail_30d = EXCLUDED.avg_stock_retail_30d, avg_stock_gross_30d = EXCLUDED.avg_stock_gross_30d, + received_qty_30d = EXCLUDED.received_qty_30d, received_cost_30d = EXCLUDED.received_cost_30d, + lifetime_sales = EXCLUDED.lifetime_sales, lifetime_revenue = EXCLUDED.lifetime_revenue, + first_7_days_sales = EXCLUDED.first_7_days_sales, first_7_days_revenue = EXCLUDED.first_7_days_revenue, first_30_days_sales = EXCLUDED.first_30_days_sales, first_30_days_revenue = EXCLUDED.first_30_days_revenue, + first_60_days_sales = EXCLUDED.first_60_days_sales, first_60_days_revenue = EXCLUDED.first_60_days_revenue, first_90_days_sales = EXCLUDED.first_90_days_sales, first_90_days_revenue = EXCLUDED.first_90_days_revenue, + asp_30d = EXCLUDED.asp_30d, acp_30d = EXCLUDED.acp_30d, avg_ros_30d = EXCLUDED.avg_ros_30d, avg_sales_per_day_30d = EXCLUDED.avg_sales_per_day_30d, avg_sales_per_month_30d = EXCLUDED.avg_sales_per_month_30d, + margin_30d = EXCLUDED.margin_30d, markup_30d = EXCLUDED.markup_30d, gmroi_30d = EXCLUDED.gmroi_30d, stockturn_30d = EXCLUDED.stockturn_30d, return_rate_30d = EXCLUDED.return_rate_30d, discount_rate_30d = EXCLUDED.discount_rate_30d, + stockout_rate_30d = EXCLUDED.stockout_rate_30d, markdown_30d = EXCLUDED.markdown_30d, markdown_rate_30d = EXCLUDED.markdown_rate_30d, sell_through_30d = EXCLUDED.sell_through_30d, + -- avg_lead_time_days = EXCLUDED.avg_lead_time_days, -- Updated Periodically + -- abc_class = EXCLUDED.abc_class, -- Updated Periodically + sales_velocity_daily = EXCLUDED.sales_velocity_daily, config_lead_time = EXCLUDED.config_lead_time, config_days_of_stock = EXCLUDED.config_days_of_stock, config_safety_stock = EXCLUDED.config_safety_stock, + planning_period_days = EXCLUDED.planning_period_days, lead_time_forecast_units = EXCLUDED.lead_time_forecast_units, days_of_stock_forecast_units = EXCLUDED.days_of_stock_forecast_units, + planning_period_forecast_units = EXCLUDED.planning_period_forecast_units, lead_time_closing_stock = EXCLUDED.lead_time_closing_stock, days_of_stock_closing_stock = EXCLUDED.days_of_stock_closing_stock, + replenishment_needed_raw = EXCLUDED.replenishment_needed_raw, replenishment_units = EXCLUDED.replenishment_units, replenishment_cost = EXCLUDED.replenishment_cost, replenishment_retail = EXcluded.replenishment_retail, replenishment_profit = EXCLUDED.replenishment_profit, + to_order_units = EXCLUDED.to_order_units, forecast_lost_sales_units = EXCLUDED.forecast_lost_sales_units, forecast_lost_revenue = EXCLUDED.forecast_lost_revenue, + stock_cover_in_days = EXCLUDED.stock_cover_in_days, po_cover_in_days = EXCLUDED.po_cover_in_days, sells_out_in_days = EXCLUDED.sells_out_in_days, replenish_date = EXCLUDED.replenish_date, + overstocked_units = EXCLUDED.overstocked_units, overstocked_cost = EXCLUDED.overstocked_cost, overstocked_retail = EXCLUDED.overstocked_retail, is_old_stock = EXCLUDED.is_old_stock, + yesterday_sales = EXCLUDED.yesterday_sales + ; + + -- Update the status table with the timestamp from the START of this run + UPDATE public.calculate_status + SET last_calculation_timestamp = _start_time + WHERE module_name = _module_name; + + RAISE NOTICE 'Finished % module. Duration: %', _module_name, clock_timestamp() - _start_time; + +END $$; \ No newline at end of file