Set up change tracking in core tables

This commit is contained in:
2025-02-02 15:06:20 -05:00
parent 12cab7473a
commit 22ad2d44db
9 changed files with 216 additions and 94 deletions

View File

@@ -100,44 +100,44 @@ CREATE TABLE IF NOT EXISTS sales_seasonality (
-- Insert default global thresholds if not exists -- Insert default global thresholds if not exists
INSERT INTO stock_thresholds (id, category_id, vendor, critical_days, reorder_days, overstock_days) INSERT INTO stock_thresholds (id, category_id, vendor, critical_days, reorder_days, overstock_days)
VALUES (1, NULL, NULL, 7, 14, 90) VALUES (1, NULL, NULL, 7, 14, 90) AS new_values
ON DUPLICATE KEY UPDATE ON DUPLICATE KEY UPDATE
critical_days = VALUES(critical_days), critical_days = new_values.critical_days,
reorder_days = VALUES(reorder_days), reorder_days = new_values.reorder_days,
overstock_days = VALUES(overstock_days); overstock_days = new_values.overstock_days;
INSERT INTO lead_time_thresholds (id, category_id, vendor, target_days, warning_days, critical_days) INSERT INTO lead_time_thresholds (id, category_id, vendor, target_days, warning_days, critical_days)
VALUES (1, NULL, NULL, 14, 21, 30) VALUES (1, NULL, NULL, 14, 21, 28) AS new_values
ON DUPLICATE KEY UPDATE ON DUPLICATE KEY UPDATE
target_days = VALUES(target_days), target_days = new_values.target_days,
warning_days = VALUES(warning_days), warning_days = new_values.warning_days,
critical_days = VALUES(critical_days); critical_days = new_values.critical_days;
INSERT INTO sales_velocity_config (id, category_id, vendor, daily_window_days, weekly_window_days, monthly_window_days) INSERT INTO sales_velocity_config (id, category_id, vendor, daily_window_days, weekly_window_days, monthly_window_days)
VALUES (1, NULL, NULL, 30, 7, 90) VALUES (1, NULL, NULL, 30, 90, 180) AS new_values
ON DUPLICATE KEY UPDATE ON DUPLICATE KEY UPDATE
daily_window_days = VALUES(daily_window_days), daily_window_days = new_values.daily_window_days,
weekly_window_days = VALUES(weekly_window_days), weekly_window_days = new_values.weekly_window_days,
monthly_window_days = VALUES(monthly_window_days); monthly_window_days = new_values.monthly_window_days;
INSERT INTO abc_classification_config (id, a_threshold, b_threshold, classification_period_days) INSERT INTO abc_classification_config (id, a_threshold, b_threshold, classification_period_days)
VALUES (1, 20.0, 50.0, 90) VALUES (1, 80, 95, 180) AS new_values
ON DUPLICATE KEY UPDATE ON DUPLICATE KEY UPDATE
a_threshold = VALUES(a_threshold), a_threshold = new_values.a_threshold,
b_threshold = VALUES(b_threshold), b_threshold = new_values.b_threshold,
classification_period_days = VALUES(classification_period_days); classification_period_days = new_values.classification_period_days;
INSERT INTO safety_stock_config (id, category_id, vendor, coverage_days, service_level) INSERT INTO safety_stock_config (id, category_id, vendor, coverage_days, service_level)
VALUES (1, NULL, NULL, 14, 95.0) VALUES (1, NULL, NULL, 14, 95.0) AS new_values
ON DUPLICATE KEY UPDATE ON DUPLICATE KEY UPDATE
coverage_days = VALUES(coverage_days), coverage_days = new_values.coverage_days,
service_level = VALUES(service_level); service_level = new_values.service_level;
INSERT INTO turnover_config (id, category_id, vendor, calculation_period_days, target_rate) INSERT INTO turnover_config (id, category_id, vendor, calculation_period_days, target_rate)
VALUES (1, NULL, NULL, 30, 1.0) VALUES (1, NULL, NULL, 30, 1.0) AS new_values
ON DUPLICATE KEY UPDATE ON DUPLICATE KEY UPDATE
calculation_period_days = VALUES(calculation_period_days), calculation_period_days = new_values.calculation_period_days,
target_rate = VALUES(target_rate); target_rate = new_values.target_rate;
-- Insert default seasonality factors (neutral) -- Insert default seasonality factors (neutral)
INSERT INTO sales_seasonality (month, seasonality_factor) INSERT INTO sales_seasonality (month, seasonality_factor)
@@ -193,4 +193,24 @@ CREATE TABLE IF NOT EXISTS import_history (
additional_info JSON, additional_info JSON,
INDEX idx_table_time (table_name, start_time), INDEX idx_table_time (table_name, start_time),
INDEX idx_status (status) INDEX idx_status (status)
); );
-- Configuration for metric calculation windows
CREATE TABLE IF NOT EXISTS metric_calculation_config (
id INT NOT NULL PRIMARY KEY,
calculation_window_days INT NOT NULL DEFAULT 90,
incremental_enabled BOOLEAN DEFAULT TRUE,
force_full_calculation_days INT DEFAULT 7,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
);
-- Insert default metric calculation config
INSERT INTO metric_calculation_config (id, calculation_window_days, incremental_enabled, force_full_calculation_days)
VALUES (1, 90, TRUE, 7) AS new_values
ON DUPLICATE KEY UPDATE
calculation_window_days = new_values.calculation_window_days,
incremental_enabled = new_values.incremental_enabled,
force_full_calculation_days = new_values.force_full_calculation_days;

View File

@@ -1,6 +1,44 @@
-- Disable foreign key checks -- Disable foreign key checks
SET FOREIGN_KEY_CHECKS = 0; SET FOREIGN_KEY_CHECKS = 0;
-- Create metric status tracking table
CREATE TABLE IF NOT EXISTS product_metric_status (
pid BIGINT NOT NULL,
last_calculated_at TIMESTAMP NULL,
needs_recalculation BOOLEAN DEFAULT TRUE,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (pid),
INDEX idx_needs_recalc (needs_recalculation),
INDEX idx_last_calc (last_calculated_at),
FOREIGN KEY (pid) REFERENCES products(pid) ON DELETE CASCADE
) ENGINE=InnoDB;
-- Add triggers for metric recalculation
DROP TRIGGER IF EXISTS orders_after_insert_update;
CREATE TRIGGER orders_after_insert_update AFTER INSERT ON orders FOR EACH ROW
INSERT INTO product_metric_status (pid, needs_recalculation)
VALUES (NEW.pid, TRUE)
ON DUPLICATE KEY UPDATE
needs_recalculation = TRUE,
updated_at = CURRENT_TIMESTAMP;
DROP TRIGGER IF EXISTS purchase_orders_after_insert_update;
CREATE TRIGGER purchase_orders_after_insert_update AFTER INSERT ON purchase_orders FOR EACH ROW
INSERT INTO product_metric_status (pid, needs_recalculation)
VALUES (NEW.pid, TRUE)
ON DUPLICATE KEY UPDATE
needs_recalculation = TRUE,
updated_at = CURRENT_TIMESTAMP;
DROP TRIGGER IF EXISTS products_after_insert_update;
CREATE TRIGGER products_after_insert_update AFTER INSERT ON products FOR EACH ROW
INSERT INTO product_metric_status (pid, needs_recalculation)
VALUES (NEW.pid, TRUE)
ON DUPLICATE KEY UPDATE
needs_recalculation = TRUE,
updated_at = CURRENT_TIMESTAMP;
-- Temporary tables for batch metrics processing -- Temporary tables for batch metrics processing
CREATE TABLE IF NOT EXISTS temp_sales_metrics ( CREATE TABLE IF NOT EXISTS temp_sales_metrics (
pid BIGINT NOT NULL, pid BIGINT NOT NULL,
@@ -410,21 +448,4 @@ LEFT JOIN
category_metrics cm ON c.cat_id = cm.category_id; category_metrics cm ON c.cat_id = cm.category_id;
-- Re-enable foreign key checks -- Re-enable foreign key checks
SET FOREIGN_KEY_CHECKS = 1; SET FOREIGN_KEY_CHECKS = 1;
-- Create table for sales seasonality factors
CREATE TABLE IF NOT EXISTS sales_seasonality (
month INT NOT NULL,
seasonality_factor DECIMAL(5,3) DEFAULT 0,
last_updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (month),
CHECK (month BETWEEN 1 AND 12),
CHECK (seasonality_factor BETWEEN -1.0 AND 1.0)
);
-- Insert default seasonality factors (neutral)
INSERT INTO sales_seasonality (month, seasonality_factor)
VALUES
(1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0),
(7, 0), (8, 0), (9, 0), (10, 0), (11, 0), (12, 0)
ON DUPLICATE KEY UPDATE last_updated = CURRENT_TIMESTAMP;

View File

@@ -19,7 +19,7 @@ CREATE TABLE products (
landing_cost_price DECIMAL(10, 3), landing_cost_price DECIMAL(10, 3),
barcode VARCHAR(50), barcode VARCHAR(50),
harmonized_tariff_code VARCHAR(20), harmonized_tariff_code VARCHAR(20),
updated_at TIMESTAMP, updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
visible BOOLEAN DEFAULT true, visible BOOLEAN DEFAULT true,
managing_stock BOOLEAN DEFAULT true, managing_stock BOOLEAN DEFAULT true,
replenishable BOOLEAN DEFAULT true, replenishable BOOLEAN DEFAULT true,
@@ -57,7 +57,8 @@ CREATE TABLE products (
INDEX idx_brand (brand), INDEX idx_brand (brand),
INDEX idx_location (location), INDEX idx_location (location),
INDEX idx_total_sold (total_sold), INDEX idx_total_sold (total_sold),
INDEX idx_date_last_sold (date_last_sold) INDEX idx_date_last_sold (date_last_sold),
INDEX idx_updated (updated_at)
) ENGINE=InnoDB; ) ENGINE=InnoDB;
-- Create categories table with hierarchy support -- Create categories table with hierarchy support
@@ -74,19 +75,8 @@ CREATE TABLE categories (
INDEX idx_parent (parent_id), INDEX idx_parent (parent_id),
INDEX idx_type (type), INDEX idx_type (type),
INDEX idx_status (status), INDEX idx_status (status),
INDEX idx_name_type (name, type) INDEX idx_name_type (name, type),
) ENGINE=InnoDB; INDEX idx_updated (updated_at)
-- Create vendor_details table
CREATE TABLE vendor_details (
vendor VARCHAR(100) PRIMARY KEY,
contact_name VARCHAR(100),
email VARCHAR(255),
phone VARCHAR(50),
status VARCHAR(20) DEFAULT 'active',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
INDEX idx_status (status)
) ENGINE=InnoDB; ) ENGINE=InnoDB;
-- Create product_categories junction table -- Create product_categories junction table
@@ -118,6 +108,7 @@ CREATE TABLE IF NOT EXISTS orders (
customer_name VARCHAR(100), customer_name VARCHAR(100),
status VARCHAR(20) DEFAULT 'pending', status VARCHAR(20) DEFAULT 'pending',
canceled TINYINT(1) DEFAULT 0, canceled TINYINT(1) DEFAULT 0,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (id), PRIMARY KEY (id),
UNIQUE KEY unique_order_line (order_number, pid), UNIQUE KEY unique_order_line (order_number, pid),
KEY order_number (order_number), KEY order_number (order_number),
@@ -125,8 +116,9 @@ CREATE TABLE IF NOT EXISTS orders (
KEY customer (customer), KEY customer (customer),
KEY date (date), KEY date (date),
KEY status (status), KEY status (status),
INDEX idx_orders_metrics (pid, date, canceled) INDEX idx_orders_metrics (pid, date, canceled, updated_at),
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; INDEX idx_updated (updated_at)
) ENGINE=InnoDB;
-- Create purchase_orders table with its indexes -- Create purchase_orders table with its indexes
CREATE TABLE purchase_orders ( CREATE TABLE purchase_orders (
@@ -150,15 +142,17 @@ CREATE TABLE purchase_orders (
last_received_date DATE COMMENT 'Date of most recent receiving', last_received_date DATE COMMENT 'Date of most recent receiving',
received_by VARCHAR(100) COMMENT 'Name of person who first received this PO line', received_by VARCHAR(100) COMMENT 'Name of person who first received this PO line',
receiving_history JSON COMMENT 'Array of receiving records with qty, date, cost, receiving_id, and alt_po flag', receiving_history JSON COMMENT 'Array of receiving records with qty, date, cost, receiving_id, and alt_po flag',
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
FOREIGN KEY (pid) REFERENCES products(pid), FOREIGN KEY (pid) REFERENCES products(pid),
INDEX idx_po_id (po_id), INDEX idx_po_id (po_id),
INDEX idx_vendor (vendor), INDEX idx_vendor (vendor),
INDEX idx_status (status), INDEX idx_status (status),
INDEX idx_receiving_status (receiving_status), INDEX idx_receiving_status (receiving_status),
INDEX idx_purchase_orders_metrics (pid, date, status, ordered, received), INDEX idx_purchase_orders_metrics (pid, date, status, ordered, received, updated_at),
INDEX idx_po_metrics (pid, date, receiving_status, received_date), INDEX idx_po_metrics (pid, date, receiving_status, received_date),
INDEX idx_po_product_date (pid, date), INDEX idx_po_product_date (pid, date),
INDEX idx_po_product_status (pid, status), INDEX idx_po_product_status (pid, status),
INDEX idx_updated (updated_at),
UNIQUE KEY unique_po_product (po_id, pid) UNIQUE KEY unique_po_product (po_id, pid)
) ENGINE=InnoDB; ) ENGINE=InnoDB;

View File

@@ -102,6 +102,16 @@ async function main() {
if (isImportCancelled) throw new Error("Import cancelled"); if (isImportCancelled) throw new Error("Import cancelled");
// Initialize product_metric_status table for any missing products
await localConnection.query(`
INSERT IGNORE INTO product_metric_status (pid)
SELECT pid FROM products p
WHERE NOT EXISTS (
SELECT 1 FROM product_metric_status pms
WHERE pms.pid = p.pid
)
`);
// Clean up any previously running imports that weren't completed // Clean up any previously running imports that weren't completed
await localConnection.query(` await localConnection.query(`
UPDATE import_history UPDATE import_history

View File

@@ -136,6 +136,21 @@ async function importCategories(prodConnection, localConnection) {
total: totalInserted, total: totalInserted,
elapsed: formatElapsedTime((Date.now() - startTime) / 1000), elapsed: formatElapsedTime((Date.now() - startTime) / 1000),
}); });
// Mark all products in these categories for recalculation
if (categoriesToInsert.length > 0) {
const affectedCatIds = categoriesToInsert.map(c => c.cat_id);
await localConnection.query(`
INSERT INTO product_metric_status (pid, needs_recalculation)
SELECT DISTINCT pc.pid, TRUE
FROM product_categories pc
WHERE pc.cat_id IN (?)
ON DUPLICATE KEY UPDATE
needs_recalculation = TRUE,
updated_at = CURRENT_TIMESTAMP
`, [affectedCatIds]);
}
} }
// After all imports, if we skipped any categories, throw an error // After all imports, if we skipped any categories, throw an error

View File

@@ -472,7 +472,19 @@ async function importOrders(prodConnection, localConnection, incrementalUpdate =
recordsAdded += inserts; recordsAdded += inserts;
recordsUpdated += updates; recordsUpdated += updates;
importedCount += processedOrderItems.size; // Count unique order items processed importedCount += processedOrderItems.size;
// Mark affected products for recalculation
const affectedPids = [...new Set(validOrders.map(o => o.pid))];
if (affectedPids.length > 0) {
await localConnection.query(`
INSERT INTO product_metric_status (pid, needs_recalculation)
VALUES ${affectedPids.map(() => '(?, TRUE)').join(',')}
ON DUPLICATE KEY UPDATE
needs_recalculation = TRUE,
updated_at = CURRENT_TIMESTAMP
`, affectedPids);
}
} }
// Update progress based on unique orders processed // Update progress based on unique orders processed
@@ -585,6 +597,18 @@ async function importOrders(prodConnection, localConnection, incrementalUpdate =
recordsAdded += inserts; recordsAdded += inserts;
recordsUpdated += updates; recordsUpdated += updates;
importedCount += retryOrderItems.size; importedCount += retryOrderItems.size;
// Mark affected products for recalculation
const affectedPids = [...new Set(validOrders.map(o => o.pid))];
if (affectedPids.length > 0) {
await localConnection.query(`
INSERT INTO product_metric_status (pid, needs_recalculation)
VALUES ${affectedPids.map(() => '(?, TRUE)').join(',')}
ON DUPLICATE KEY UPDATE
needs_recalculation = TRUE,
updated_at = CURRENT_TIMESTAMP
`, affectedPids);
}
} }
} catch (error) { } catch (error) {
console.warn('Warning: Failed to retry skipped orders:', error.message); console.warn('Warning: Failed to retry skipped orders:', error.message);

View File

@@ -468,6 +468,21 @@ async function importProducts(prodConnection, localConnection, incrementalUpdate
recordsUpdated += insertsAndUpdates.updates.length; recordsUpdated += insertsAndUpdates.updates.length;
} }
if (insertsAndUpdates.updates.length > 0 || insertsAndUpdates.inserts.length > 0) {
const affectedPids = [
...insertsAndUpdates.updates.map(p => p.pid),
...insertsAndUpdates.inserts.map(p => p.pid)
];
await localConnection.query(`
INSERT INTO product_metric_status (pid, needs_recalculation)
VALUES ${affectedPids.map(() => '(?, TRUE)').join(',')}
ON DUPLICATE KEY UPDATE
needs_recalculation = TRUE,
updated_at = CURRENT_TIMESTAMP
`, affectedPids);
}
// Process category relationships // Process category relationships
if (batch.some(p => p.category_ids)) { if (batch.some(p => p.category_ids)) {
// First get all valid categories // First get all valid categories

View File

@@ -474,6 +474,18 @@ async function importPurchaseOrders(prodConnection, localConnection, incremental
recordsAdded += inserts; recordsAdded += inserts;
recordsUpdated += Math.floor(updates); // Ensure we never have fractional updates recordsUpdated += Math.floor(updates); // Ensure we never have fractional updates
processed += batchProcessed; processed += batchProcessed;
// Mark affected products for recalculation
const affectedPids = [...new Set(productBatch.map(p => p.pid))];
if (affectedPids.length > 0) {
await localConnection.query(`
INSERT INTO product_metric_status (pid, needs_recalculation)
VALUES ${affectedPids.map(() => '(?, TRUE)').join(',')}
ON DUPLICATE KEY UPDATE
needs_recalculation = TRUE,
updated_at = CURRENT_TIMESTAMP
`, affectedPids);
}
} }
// Handle updates - now we know these actually have changes // Handle updates - now we know these actually have changes
@@ -499,6 +511,18 @@ async function importPurchaseOrders(prodConnection, localConnection, incremental
recordsUpdated += Math.floor(updates); // Ensure we never have fractional updates recordsUpdated += Math.floor(updates); // Ensure we never have fractional updates
processed += batchProcessed; processed += batchProcessed;
// Mark affected products for recalculation
const affectedPids = [...new Set(productBatch.map(p => p.pid))];
if (affectedPids.length > 0) {
await localConnection.query(`
INSERT INTO product_metric_status (pid, needs_recalculation)
VALUES ${affectedPids.map(() => '(?, TRUE)').join(',')}
ON DUPLICATE KEY UPDATE
needs_recalculation = TRUE,
updated_at = CURRENT_TIMESTAMP
`, affectedPids);
}
} }
// Update progress based on time interval // Update progress based on time interval

View File

@@ -41,7 +41,10 @@ const CONFIG_TABLES = [
'abc_classification_config', 'abc_classification_config',
'safety_stock_config', 'safety_stock_config',
'sales_seasonality', 'sales_seasonality',
'turnover_config' 'turnover_config',
'sync_status',
'metric_calculation_config',
'product_metric_status'
]; ];
// Split SQL into individual statements // Split SQL into individual statements
@@ -175,7 +178,7 @@ async function resetDatabase() {
DROP TABLE IF EXISTS DROP TABLE IF EXISTS
${tables[0].tables ${tables[0].tables
.split(',') .split(',')
.filter(table => table !== 'users') .filter(table => !['users', 'import_history'].includes(table))
.map(table => '`' + table + '`') .map(table => '`' + table + '`')
.join(', ')} .join(', ')}
`; `;
@@ -436,34 +439,7 @@ async function resetDatabase() {
} }
} }
// Verify config tables were created // Read and execute metrics schema
const [showConfigTables] = await connection.query('SHOW TABLES');
const existingConfigTables = showConfigTables.map(t => Object.values(t)[0]);
outputProgress({
operation: 'Config tables verification',
message: {
found: existingConfigTables,
expected: CONFIG_TABLES
}
});
const missingConfigTables = CONFIG_TABLES.filter(
t => !existingConfigTables.includes(t)
);
if (missingConfigTables.length > 0) {
throw new Error(
`Failed to create config tables: ${missingConfigTables.join(', ')}`
);
}
outputProgress({
operation: 'Config tables created',
message: `Successfully created tables: ${CONFIG_TABLES.join(', ')}`
});
// Read and execute metrics schema (metrics tables)
outputProgress({ outputProgress({
operation: 'Running metrics setup', operation: 'Running metrics setup',
message: 'Creating metrics tables...' message: 'Creating metrics tables...'
@@ -525,6 +501,29 @@ async function resetDatabase() {
} }
} }
// Verify triggers exist
const [triggers] = await connection.query('SHOW TRIGGERS');
const expectedTriggers = [
'orders_after_insert_update',
'purchase_orders_after_insert_update',
'products_after_insert_update'
];
const missingTriggers = expectedTriggers.filter(
triggerName => !triggers.some(t => t.Trigger === triggerName)
);
if (missingTriggers.length > 0) {
throw new Error(
`Missing required triggers: ${missingTriggers.join(', ')}`
);
}
outputProgress({
operation: 'Triggers verified',
message: `Successfully verified triggers: ${expectedTriggers.join(', ')}`
});
outputProgress({ outputProgress({
status: 'complete', status: 'complete',
operation: 'Database reset complete', operation: 'Database reset complete',