Improve and debug calculate metrics script

This commit is contained in:
2025-01-12 14:52:46 -05:00
parent ac8563325a
commit 271a40f2c5
4 changed files with 350 additions and 101 deletions

View File

@@ -30,6 +30,10 @@ CREATE TABLE IF NOT EXISTS product_metrics (
daily_sales_avg DECIMAL(10,3), daily_sales_avg DECIMAL(10,3),
weekly_sales_avg DECIMAL(10,3), weekly_sales_avg DECIMAL(10,3),
monthly_sales_avg DECIMAL(10,3), monthly_sales_avg DECIMAL(10,3),
avg_quantity_per_order DECIMAL(10,3),
number_of_orders INT,
first_sale_date DATE,
last_sale_date DATE,
-- Stock metrics -- Stock metrics
days_of_inventory INT, days_of_inventory INT,
weeks_of_inventory INT, weeks_of_inventory INT,
@@ -38,6 +42,7 @@ CREATE TABLE IF NOT EXISTS product_metrics (
-- Financial metrics -- Financial metrics
avg_margin_percent DECIMAL(10,3), avg_margin_percent DECIMAL(10,3),
total_revenue DECIMAL(10,3), total_revenue DECIMAL(10,3),
inventory_value DECIMAL(10,3),
-- Purchase metrics -- Purchase metrics
avg_lead_time_days INT, avg_lead_time_days INT,
last_purchase_date DATE, last_purchase_date DATE,
@@ -46,7 +51,8 @@ CREATE TABLE IF NOT EXISTS product_metrics (
abc_class CHAR(1), abc_class CHAR(1),
stock_status VARCHAR(20), stock_status VARCHAR(20),
PRIMARY KEY (product_id), PRIMARY KEY (product_id),
FOREIGN KEY (product_id) REFERENCES products(product_id) ON DELETE CASCADE FOREIGN KEY (product_id) REFERENCES products(product_id) ON DELETE CASCADE,
INDEX idx_metrics_revenue (total_revenue)
); );
-- New table for time-based aggregates -- New table for time-based aggregates
@@ -82,11 +88,103 @@ CREATE TABLE IF NOT EXISTS vendor_metrics (
PRIMARY KEY (vendor) PRIMARY KEY (vendor)
); );
-- New table for stock threshold configurations
CREATE TABLE IF NOT EXISTS stock_thresholds (
id INT NOT NULL, -- Changed from AUTO_INCREMENT to explicitly set ID
category_id BIGINT, -- NULL means default/global threshold
vendor VARCHAR(100), -- NULL means applies to all vendors
critical_days INT NOT NULL DEFAULT 7,
reorder_days INT NOT NULL DEFAULT 14,
overstock_days INT NOT NULL DEFAULT 90,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (id),
FOREIGN KEY (category_id) REFERENCES categories(id) ON DELETE CASCADE,
UNIQUE KEY unique_category_vendor (category_id, vendor),
INDEX idx_thresholds_category_vendor (category_id, vendor)
);
-- Insert default thresholds with ID=1
INSERT INTO stock_thresholds (id, category_id, vendor, critical_days, reorder_days, overstock_days)
VALUES (1, NULL, NULL, 7, 14, 90)
ON DUPLICATE KEY UPDATE
critical_days = VALUES(critical_days),
reorder_days = VALUES(reorder_days),
overstock_days = VALUES(overstock_days);
-- Re-enable foreign key checks -- Re-enable foreign key checks
SET FOREIGN_KEY_CHECKS = 1; SET FOREIGN_KEY_CHECKS = 1;
-- Create view for inventory health (after all tables are created) -- Create view for inventory health (after all tables are created)
CREATE OR REPLACE VIEW inventory_health AS CREATE OR REPLACE VIEW inventory_health AS
WITH product_thresholds AS (
SELECT
p.product_id,
COALESCE(
-- Try category+vendor specific
(SELECT critical_days FROM stock_thresholds st
JOIN product_categories pc ON st.category_id = pc.category_id
WHERE pc.product_id = p.product_id
AND st.vendor = p.vendor LIMIT 1),
-- Try category specific
(SELECT critical_days FROM stock_thresholds st
JOIN product_categories pc ON st.category_id = pc.category_id
WHERE pc.product_id = p.product_id
AND st.vendor IS NULL LIMIT 1),
-- Try vendor specific
(SELECT critical_days FROM stock_thresholds st
WHERE st.category_id IS NULL
AND st.vendor = p.vendor LIMIT 1),
-- Fall back to default
(SELECT critical_days FROM stock_thresholds st
WHERE st.category_id IS NULL
AND st.vendor IS NULL LIMIT 1),
7
) as critical_days,
COALESCE(
-- Try category+vendor specific
(SELECT reorder_days FROM stock_thresholds st
JOIN product_categories pc ON st.category_id = pc.category_id
WHERE pc.product_id = p.product_id
AND st.vendor = p.vendor LIMIT 1),
-- Try category specific
(SELECT reorder_days FROM stock_thresholds st
JOIN product_categories pc ON st.category_id = pc.category_id
WHERE pc.product_id = p.product_id
AND st.vendor IS NULL LIMIT 1),
-- Try vendor specific
(SELECT reorder_days FROM stock_thresholds st
WHERE st.category_id IS NULL
AND st.vendor = p.vendor LIMIT 1),
-- Fall back to default
(SELECT reorder_days FROM stock_thresholds st
WHERE st.category_id IS NULL
AND st.vendor IS NULL LIMIT 1),
14
) as reorder_days,
COALESCE(
-- Try category+vendor specific
(SELECT overstock_days FROM stock_thresholds st
JOIN product_categories pc ON st.category_id = pc.category_id
WHERE pc.product_id = p.product_id
AND st.vendor = p.vendor LIMIT 1),
-- Try category specific
(SELECT overstock_days FROM stock_thresholds st
JOIN product_categories pc ON st.category_id = pc.category_id
WHERE pc.product_id = p.product_id
AND st.vendor IS NULL LIMIT 1),
-- Try vendor specific
(SELECT overstock_days FROM stock_thresholds st
WHERE st.category_id IS NULL
AND st.vendor = p.vendor LIMIT 1),
-- Fall back to default
(SELECT overstock_days FROM stock_thresholds st
WHERE st.category_id IS NULL
AND st.vendor IS NULL LIMIT 1),
90
) as overstock_days
FROM products p
)
SELECT SELECT
p.product_id, p.product_id,
p.SKU, p.SKU,
@@ -97,15 +195,18 @@ SELECT
COALESCE(pm.reorder_point, 0) as reorder_point, COALESCE(pm.reorder_point, 0) as reorder_point,
COALESCE(pm.safety_stock, 0) as safety_stock, COALESCE(pm.safety_stock, 0) as safety_stock,
CASE CASE
WHEN p.stock_quantity <= COALESCE(pm.safety_stock, 0) THEN 'Critical' WHEN pm.daily_sales_avg = 0 THEN 'New'
WHEN p.stock_quantity <= COALESCE(pm.reorder_point, 0) THEN 'Reorder' WHEN p.stock_quantity <= CEIL(pm.daily_sales_avg * pt.critical_days) THEN 'Critical'
WHEN p.stock_quantity > (COALESCE(pm.daily_sales_avg, 0) * 90) THEN 'Overstocked' WHEN p.stock_quantity <= CEIL(pm.daily_sales_avg * pt.reorder_days) THEN 'Reorder'
WHEN p.stock_quantity > (pm.daily_sales_avg * pt.overstock_days) THEN 'Overstocked'
ELSE 'Healthy' ELSE 'Healthy'
END as stock_status END as stock_status
FROM FROM
products p products p
LEFT JOIN LEFT JOIN
product_metrics pm ON p.product_id = pm.product_id product_metrics pm ON p.product_id = pm.product_id
LEFT JOIN
product_thresholds pt ON p.product_id = pt.product_id
WHERE WHERE
p.managing_stock = true; p.managing_stock = true;

View File

@@ -0,0 +1,17 @@
-- Indexes for orders table
CREATE INDEX IF NOT EXISTS idx_orders_product_date ON orders(product_id, date);
CREATE INDEX IF NOT EXISTS idx_orders_date ON orders(date);
-- Indexes for purchase_orders table
CREATE INDEX IF NOT EXISTS idx_po_product_date ON purchase_orders(product_id, date);
CREATE INDEX IF NOT EXISTS idx_po_product_status ON purchase_orders(product_id, status);
CREATE INDEX IF NOT EXISTS idx_po_vendor ON purchase_orders(vendor);
-- Indexes for product_metrics table
CREATE INDEX IF NOT EXISTS idx_metrics_revenue ON product_metrics(total_revenue);
-- Indexes for stock_thresholds table
CREATE INDEX IF NOT EXISTS idx_thresholds_category_vendor ON stock_thresholds(category_id, vendor);
-- Indexes for product_categories table
CREATE INDEX IF NOT EXISTS idx_product_categories_both ON product_categories(product_id, category_id);

View File

@@ -50,7 +50,8 @@ CREATE TABLE product_categories (
FOREIGN KEY (product_id) REFERENCES products(product_id) ON DELETE CASCADE, FOREIGN KEY (product_id) REFERENCES products(product_id) ON DELETE CASCADE,
FOREIGN KEY (category_id) REFERENCES categories(id) ON DELETE CASCADE, FOREIGN KEY (category_id) REFERENCES categories(id) ON DELETE CASCADE,
INDEX idx_category (category_id), INDEX idx_category (category_id),
INDEX idx_product (product_id) INDEX idx_product (product_id),
INDEX idx_product_categories_both (product_id, category_id)
) ENGINE=InnoDB; ) ENGINE=InnoDB;
-- Create orders table with its indexes -- Create orders table with its indexes
@@ -80,6 +81,7 @@ CREATE TABLE orders (
INDEX idx_date (date), INDEX idx_date (date),
INDEX idx_status (status), INDEX idx_status (status),
INDEX idx_orders_metrics (product_id, date, canceled, quantity, price), INDEX idx_orders_metrics (product_id, date, canceled, quantity, price),
INDEX idx_orders_product_date (product_id, date),
UNIQUE KEY unique_order_product (order_number, product_id) UNIQUE KEY unique_order_product (order_number, product_id)
) ENGINE=InnoDB; ) ENGINE=InnoDB;
@@ -104,6 +106,8 @@ CREATE TABLE purchase_orders (
INDEX idx_vendor (vendor), INDEX idx_vendor (vendor),
INDEX idx_status (status), INDEX idx_status (status),
INDEX idx_purchase_orders_metrics (product_id, date, status, ordered, received), INDEX idx_purchase_orders_metrics (product_id, date, status, ordered, received),
INDEX idx_po_product_date (product_id, date),
INDEX idx_po_product_status (product_id, status),
UNIQUE KEY unique_po_product (po_id, product_id) UNIQUE KEY unique_po_product (po_id, product_id)
) ENGINE=InnoDB; ) ENGINE=InnoDB;

View File

@@ -106,7 +106,11 @@ async function calculateMetrics() {
try { try {
// Get total number of products // Get total number of products
const [countResult] = await connection.query('SELECT COUNT(*) as total FROM products'); const [countResult] = await connection.query('SELECT COUNT(*) as total FROM products')
.catch(err => {
logError(err, 'Failed to count products');
throw err;
});
totalProducts = countResult[0].total; totalProducts = countResult[0].total;
// Initial progress with percentage // Initial progress with percentage
@@ -128,7 +132,11 @@ async function calculateMetrics() {
throw new Error('Operation cancelled'); throw new Error('Operation cancelled');
} }
const [products] = await connection.query('SELECT product_id FROM products LIMIT ? OFFSET ?', [batchSize, offset]); const [products] = await connection.query('SELECT product_id, vendor FROM products LIMIT ? OFFSET ?', [batchSize, offset])
.catch(err => {
logError(err, `Failed to fetch products batch at offset ${offset}`);
throw err;
});
processedCount += products.length; processedCount += products.length;
// Update progress after each batch // Update progress after each batch
@@ -144,128 +152,242 @@ async function calculateMetrics() {
}); });
// Process the batch // Process the batch
const metricsUpdates = [];
for (const product of products) { for (const product of products) {
// Calculate sales metrics try {
const [salesMetrics] = await connection.query(` // Calculate sales metrics with trends
SELECT const [salesMetrics] = await connection.query(`
SUM(o.quantity) as total_quantity_sold, WITH sales_summary AS (
SUM((o.price - COALESCE(o.discount, 0)) * o.quantity) as total_revenue, SELECT
SUM(COALESCE(p.cost_price, 0) * o.quantity) as total_cost, SUM(o.quantity) as total_quantity_sold,
MAX(o.date) as last_sale_date SUM((o.price - COALESCE(o.discount, 0)) * o.quantity) as total_revenue,
FROM orders o SUM(COALESCE(p.cost_price, 0) * o.quantity) as total_cost,
JOIN products p ON o.product_id = p.product_id MAX(o.date) as last_sale_date,
WHERE o.canceled = 0 AND o.product_id = ? MIN(o.date) as first_sale_date,
GROUP BY o.product_id COUNT(DISTINCT o.order_number) as number_of_orders,
`, [product.product_id]); AVG(o.quantity) as avg_quantity_per_order,
-- Calculate rolling averages
SUM(CASE WHEN o.date >= DATE_SUB(CURDATE(), INTERVAL 30 DAY) THEN o.quantity ELSE 0 END) as last_30_days_qty,
SUM(CASE WHEN o.date >= DATE_SUB(CURDATE(), INTERVAL 7 DAY) THEN o.quantity ELSE 0 END) as last_7_days_qty
FROM orders o
JOIN products p ON o.product_id = p.product_id
WHERE o.canceled = 0 AND o.product_id = ?
GROUP BY o.product_id
)
SELECT
total_quantity_sold,
total_revenue,
total_cost,
last_sale_date,
first_sale_date,
number_of_orders,
avg_quantity_per_order,
last_30_days_qty / 30 as rolling_daily_avg,
last_7_days_qty / 7 as rolling_weekly_avg,
total_quantity_sold as total_sales_to_date
FROM sales_summary
`, [product.product_id]).catch(err => {
logError(err, `Failed to calculate sales metrics for product ${product.product_id}`);
throw err;
});
// Calculate purchase metrics // Calculate purchase metrics with proper handling of negative quantities
const [purchaseMetrics] = await connection.query(` const [purchaseMetrics] = await connection.query(`
SELECT WITH recent_orders AS (
SUM(received) as total_quantity_purchased, SELECT
SUM(cost_price * received) as total_cost, date,
MAX(date) as last_purchase_date, received_date,
MAX(received_date) as last_received_date, received,
AVG(DATEDIFF(received_date, date)) as avg_lead_time_days cost_price,
FROM purchase_orders DATEDIFF(received_date, date) as lead_time_days,
WHERE status = 'closed' AND received > 0 AND product_id = ? ROW_NUMBER() OVER (ORDER BY date DESC) as order_rank
GROUP BY product_id FROM purchase_orders
`, [product.product_id]); WHERE status = 'closed'
AND product_id = ?
AND received > 0
AND received_date IS NOT NULL
),
lead_time_orders AS (
SELECT *
FROM recent_orders
WHERE order_rank <= 5 -- Last 5 orders
OR date >= DATE_SUB(CURDATE(), INTERVAL 90 DAY) -- Or orders from last 90 days
)
SELECT
SUM(CASE WHEN received >= 0 THEN received ELSE 0 END) as total_quantity_purchased,
SUM(CASE WHEN received >= 0 THEN cost_price * received ELSE 0 END) as total_cost,
MAX(date) as last_purchase_date,
MAX(received_date) as last_received_date,
AVG(lead_time_days) as avg_lead_time_days,
COUNT(*) as orders_analyzed
FROM lead_time_orders
`, [product.product_id]).catch(err => {
logError(err, `Failed to calculate purchase metrics for product ${product.product_id}`);
throw err;
});
// Get current stock // Get current stock
const [stockInfo] = await connection.query(` const [stockInfo] = await connection.query(`
SELECT stock_quantity, cost_price SELECT stock_quantity, cost_price
FROM products FROM products
WHERE product_id = ? WHERE product_id = ?
`, [product.product_id]); `, [product.product_id]).catch(err => {
logError(err, `Failed to get stock info for product ${product.product_id}`);
throw err;
});
// Calculate metrics // Get stock thresholds for this product's category/vendor
const metrics = salesMetrics[0] || {}; const [thresholds] = await connection.query(`
const purchases = purchaseMetrics[0] || {}; WITH product_info AS (
const stock = stockInfo[0] || {}; SELECT
p.product_id,
p.vendor,
pc.category_id
FROM products p
LEFT JOIN product_categories pc ON p.product_id = pc.product_id
WHERE p.product_id = ?
),
threshold_options AS (
SELECT
st.*,
CASE
WHEN st.category_id = pi.category_id AND st.vendor = pi.vendor THEN 1 -- Category + vendor match
WHEN st.category_id = pi.category_id AND st.vendor IS NULL THEN 2 -- Category match
WHEN st.category_id IS NULL AND st.vendor = pi.vendor THEN 3 -- Vendor match
WHEN st.category_id IS NULL AND st.vendor IS NULL THEN 4 -- Default
ELSE 5
END as priority
FROM product_info pi
CROSS JOIN stock_thresholds st
WHERE (st.category_id = pi.category_id OR st.category_id IS NULL)
AND (st.vendor = pi.vendor OR st.vendor IS NULL)
)
SELECT
COALESCE(
(SELECT critical_days
FROM threshold_options
ORDER BY priority LIMIT 1),
7
) as critical_days,
COALESCE(
(SELECT reorder_days
FROM threshold_options
ORDER BY priority LIMIT 1),
14
) as reorder_days,
COALESCE(
(SELECT overstock_days
FROM threshold_options
ORDER BY priority LIMIT 1),
90
) as overstock_days
`, [product.product_id]).catch(err => {
logError(err, `Failed to get thresholds for product ${product.product_id}`);
throw err;
});
const daily_sales_avg = metrics.total_quantity_sold ? metrics.total_quantity_sold / 30 : 0; const threshold = thresholds[0] || { critical_days: 7, reorder_days: 14, overstock_days: 90 };
const weekly_sales_avg = metrics.total_quantity_sold ? metrics.total_quantity_sold / 4 : 0;
const monthly_sales_avg = metrics.total_quantity_sold || 0;
// Calculate margin percent with proper handling of edge cases // Calculate metrics
let margin_percent = 0; const metrics = salesMetrics[0] || {};
if (metrics.total_revenue && metrics.total_revenue > 0) { const purchases = purchaseMetrics[0] || {};
margin_percent = ((metrics.total_revenue - metrics.total_cost) / metrics.total_revenue) * 100; const stock = stockInfo[0] || {};
// Handle -Infinity or Infinity cases
margin_percent = isFinite(margin_percent) ? margin_percent : 0; const daily_sales_avg = metrics.rolling_daily_avg || 0;
const weekly_sales_avg = metrics.rolling_weekly_avg || 0;
const monthly_sales_avg = metrics.total_quantity_sold ? metrics.total_quantity_sold / 30 : 0;
// Calculate margin percent with proper handling of zero revenue
const margin_percent = metrics.total_revenue > 0 ?
((metrics.total_revenue - metrics.total_cost) / metrics.total_revenue) * 100 :
null;
// Calculate current inventory value
const inventory_value = (stock.stock_quantity || 0) * (stock.cost_price || 0);
// Calculate stock status using configurable thresholds with proper handling of zero sales
const stock_status = daily_sales_avg === 0 ? 'New' :
stock.stock_quantity <= Math.max(1, Math.ceil(daily_sales_avg * threshold.critical_days)) ? 'Critical' :
stock.stock_quantity <= Math.max(1, Math.ceil(daily_sales_avg * threshold.reorder_days)) ? 'Reorder' :
stock.stock_quantity > Math.max(1, daily_sales_avg * threshold.overstock_days) ? 'Overstocked' : 'Healthy';
// Add to batch update
metricsUpdates.push([
product.product_id,
daily_sales_avg || null,
weekly_sales_avg || null,
monthly_sales_avg || null,
metrics.avg_quantity_per_order || null,
metrics.number_of_orders || 0,
metrics.first_sale_date || null,
metrics.last_sale_date || null,
daily_sales_avg > 0 ? stock.stock_quantity / daily_sales_avg : null,
weekly_sales_avg > 0 ? stock.stock_quantity / weekly_sales_avg : null,
daily_sales_avg > 0 ? Math.max(1, Math.ceil(daily_sales_avg * threshold.reorder_days)) : null,
daily_sales_avg > 0 ? Math.max(1, Math.ceil(daily_sales_avg * threshold.critical_days)) : null,
margin_percent,
metrics.total_revenue || 0,
inventory_value || 0,
purchases.avg_lead_time_days || null,
purchases.last_purchase_date || null,
purchases.last_received_date || null,
stock_status
]);
} catch (err) {
logError(err, `Failed processing product ${product.product_id}`);
// Continue with next product instead of failing entire batch
continue;
} }
}
// Update product metrics // Batch update metrics
if (metricsUpdates.length > 0) {
await connection.query(` await connection.query(`
INSERT INTO product_metrics ( INSERT INTO product_metrics (
product_id, product_id,
last_calculated_at,
daily_sales_avg, daily_sales_avg,
weekly_sales_avg, weekly_sales_avg,
monthly_sales_avg, monthly_sales_avg,
avg_quantity_per_order,
number_of_orders,
first_sale_date,
last_sale_date,
days_of_inventory, days_of_inventory,
weeks_of_inventory, weeks_of_inventory,
reorder_point, reorder_point,
safety_stock, safety_stock,
avg_margin_percent, avg_margin_percent,
total_revenue, total_revenue,
inventory_value,
avg_lead_time_days, avg_lead_time_days,
last_purchase_date, last_purchase_date,
last_received_date, last_received_date,
abc_class,
stock_status stock_status
) VALUES ( ) VALUES ?
?,
NOW(),
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
NULL,
?
)
ON DUPLICATE KEY UPDATE ON DUPLICATE KEY UPDATE
last_calculated_at = VALUES(last_calculated_at), last_calculated_at = NOW(),
daily_sales_avg = VALUES(daily_sales_avg), daily_sales_avg = VALUES(daily_sales_avg),
weekly_sales_avg = VALUES(weekly_sales_avg), weekly_sales_avg = VALUES(weekly_sales_avg),
monthly_sales_avg = VALUES(monthly_sales_avg), monthly_sales_avg = VALUES(monthly_sales_avg),
avg_quantity_per_order = VALUES(avg_quantity_per_order),
number_of_orders = VALUES(number_of_orders),
first_sale_date = VALUES(first_sale_date),
last_sale_date = VALUES(last_sale_date),
days_of_inventory = VALUES(days_of_inventory), days_of_inventory = VALUES(days_of_inventory),
weeks_of_inventory = VALUES(weeks_of_inventory), weeks_of_inventory = VALUES(weeks_of_inventory),
reorder_point = VALUES(reorder_point), reorder_point = VALUES(reorder_point),
safety_stock = VALUES(safety_stock), safety_stock = VALUES(safety_stock),
avg_margin_percent = VALUES(avg_margin_percent), avg_margin_percent = VALUES(avg_margin_percent),
total_revenue = VALUES(total_revenue), total_revenue = VALUES(total_revenue),
inventory_value = VALUES(inventory_value),
avg_lead_time_days = VALUES(avg_lead_time_days), avg_lead_time_days = VALUES(avg_lead_time_days),
last_purchase_date = VALUES(last_purchase_date), last_purchase_date = VALUES(last_purchase_date),
last_received_date = VALUES(last_received_date), last_received_date = VALUES(last_received_date),
stock_status = VALUES(stock_status) stock_status = VALUES(stock_status)
`, [ `, [metricsUpdates]).catch(err => {
product.product_id, logError(err, `Failed to batch update metrics for ${metricsUpdates.length} products`);
daily_sales_avg, throw err;
weekly_sales_avg, });
monthly_sales_avg,
daily_sales_avg ? stock.stock_quantity / daily_sales_avg : null,
weekly_sales_avg ? stock.stock_quantity / weekly_sales_avg : null,
Math.ceil(daily_sales_avg * 14), // 14 days reorder point
Math.ceil(daily_sales_avg * 7), // 7 days safety stock
margin_percent, // Use the properly handled margin percent
metrics.total_revenue || 0,
purchases.avg_lead_time_days || 0,
purchases.last_purchase_date,
purchases.last_received_date,
daily_sales_avg === 0 ? 'New' :
stock.stock_quantity <= Math.ceil(daily_sales_avg * 7) ? 'Critical' :
stock.stock_quantity <= Math.ceil(daily_sales_avg * 14) ? 'Reorder' :
stock.stock_quantity > (daily_sales_avg * 90) ? 'Overstocked' : 'Healthy'
]);
} }
} }
@@ -283,22 +405,27 @@ async function calculateMetrics() {
// Calculate ABC classification // Calculate ABC classification
await connection.query(` await connection.query(`
WITH revenue_percentiles AS ( WITH revenue_rankings AS (
SELECT SELECT
product_id, product_id,
total_revenue, total_revenue,
PERCENT_RANK() OVER (ORDER BY total_revenue DESC) as revenue_percentile PERCENT_RANK() OVER (ORDER BY COALESCE(total_revenue, 0) DESC) as revenue_rank
FROM product_metrics FROM product_metrics
WHERE total_revenue > 0 ),
classification_update AS (
SELECT
product_id,
CASE
WHEN revenue_rank <= 0.2 THEN 'A'
WHEN revenue_rank <= 0.5 THEN 'B'
ELSE 'C'
END as abc_class
FROM revenue_rankings
) )
UPDATE product_metrics pm UPDATE product_metrics pm
JOIN revenue_percentiles rp ON pm.product_id = rp.product_id JOIN classification_update cu ON pm.product_id = cu.product_id
SET pm.abc_class = SET pm.abc_class = cu.abc_class,
CASE pm.last_calculated_at = NOW()
WHEN rp.revenue_percentile < 0.2 THEN 'A'
WHEN rp.revenue_percentile < 0.5 THEN 'B'
ELSE 'C'
END;
`); `);
// Update progress for time-based aggregates // Update progress for time-based aggregates