UI tweaks for match columns step + auto hide empty columns
This commit is contained in:
@@ -1,908 +0,0 @@
|
||||
// run-all-updates.js
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const { Pool } = require('pg'); // Assuming you use 'pg'
|
||||
|
||||
// --- Configuration ---
|
||||
// Toggle these constants to enable/disable specific steps for testing
|
||||
const RUN_DAILY_SNAPSHOTS = true;
|
||||
const RUN_PRODUCT_METRICS = true;
|
||||
const RUN_PERIODIC_METRICS = true;
|
||||
const RUN_BRAND_METRICS = true;
|
||||
const RUN_VENDOR_METRICS = true;
|
||||
const RUN_CATEGORY_METRICS = true;
|
||||
|
||||
// Maximum execution time for the entire sequence (e.g., 90 minutes)
|
||||
const MAX_EXECUTION_TIME_TOTAL = 90 * 60 * 1000;
|
||||
// Maximum execution time per individual SQL step (e.g., 30 minutes)
|
||||
const MAX_EXECUTION_TIME_PER_STEP = 30 * 60 * 1000;
|
||||
// Query cancellation timeout
|
||||
const CANCEL_QUERY_AFTER_SECONDS = 5;
|
||||
// --- End Configuration ---
|
||||
|
||||
// Change working directory to script directory
|
||||
process.chdir(path.dirname(__filename));
|
||||
|
||||
// Log script path for debugging
|
||||
console.log('Script running from:', __dirname);
|
||||
|
||||
// Try to load environment variables from multiple locations
|
||||
const envPaths = [
|
||||
path.resolve(__dirname, '../..', '.env'), // Two levels up (inventory/.env)
|
||||
path.resolve(__dirname, '..', '.env'), // One level up (inventory-server/.env)
|
||||
path.resolve(__dirname, '.env'), // Same directory
|
||||
'/var/www/html/inventory/.env' // Server absolute path
|
||||
];
|
||||
|
||||
let envLoaded = false;
|
||||
for (const envPath of envPaths) {
|
||||
if (fs.existsSync(envPath)) {
|
||||
console.log(`Loading environment from: ${envPath}`);
|
||||
require('dotenv').config({ path: envPath });
|
||||
envLoaded = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!envLoaded) {
|
||||
console.warn('WARNING: Could not find .env file in any of the expected locations.');
|
||||
console.warn('Checked paths:', envPaths);
|
||||
}
|
||||
|
||||
// --- Database Setup ---
|
||||
// Make sure we have the required DB credentials
|
||||
if (!process.env.DB_HOST && !process.env.DATABASE_URL) {
|
||||
console.error('WARNING: Neither DB_HOST nor DATABASE_URL environment variables found');
|
||||
}
|
||||
|
||||
// Only validate individual parameters if not using connection string
|
||||
if (!process.env.DATABASE_URL) {
|
||||
if (!process.env.DB_USER) console.error('WARNING: DB_USER environment variable is missing');
|
||||
if (!process.env.DB_NAME) console.error('WARNING: DB_NAME environment variable is missing');
|
||||
|
||||
// Password must be a string for PostgreSQL SCRAM authentication
|
||||
if (!process.env.DB_PASSWORD || typeof process.env.DB_PASSWORD !== 'string') {
|
||||
console.error('WARNING: DB_PASSWORD environment variable is missing or not a string');
|
||||
}
|
||||
}
|
||||
|
||||
// Configure database connection to match individual scripts
|
||||
let dbConfig;
|
||||
|
||||
// Check if a DATABASE_URL exists (common in production environments)
|
||||
if (process.env.DATABASE_URL && typeof process.env.DATABASE_URL === 'string') {
|
||||
console.log('Using DATABASE_URL for connection');
|
||||
dbConfig = {
|
||||
connectionString: process.env.DATABASE_URL,
|
||||
ssl: process.env.DB_SSL === 'true' ? { rejectUnauthorized: false } : false,
|
||||
// Add performance optimizations
|
||||
max: 10, // connection pool max size
|
||||
idleTimeoutMillis: 30000,
|
||||
connectionTimeoutMillis: 60000,
|
||||
// Set timeouts for long-running queries
|
||||
statement_timeout: 1800000, // 30 minutes
|
||||
query_timeout: 1800000 // 30 minutes
|
||||
};
|
||||
} else {
|
||||
// Use individual connection parameters
|
||||
dbConfig = {
|
||||
host: process.env.DB_HOST,
|
||||
user: process.env.DB_USER,
|
||||
password: process.env.DB_PASSWORD,
|
||||
database: process.env.DB_NAME,
|
||||
port: process.env.DB_PORT || 5432,
|
||||
ssl: process.env.DB_SSL === 'true',
|
||||
// Add performance optimizations
|
||||
max: 10, // connection pool max size
|
||||
idleTimeoutMillis: 30000,
|
||||
connectionTimeoutMillis: 60000,
|
||||
// Set timeouts for long-running queries
|
||||
statement_timeout: 1800000, // 30 minutes
|
||||
query_timeout: 1800000 // 30 minutes
|
||||
};
|
||||
}
|
||||
|
||||
// Try to load from utils DB module as a last resort
|
||||
try {
|
||||
if (!process.env.DB_HOST && !process.env.DATABASE_URL) {
|
||||
console.log('Attempting to load DB config from individual script modules...');
|
||||
const dbModule = require('./metrics-new/utils/db');
|
||||
if (dbModule && dbModule.dbConfig) {
|
||||
console.log('Found DB config in individual script module');
|
||||
dbConfig = {
|
||||
...dbModule.dbConfig,
|
||||
// Add performance optimizations if not present
|
||||
max: dbModule.dbConfig.max || 10,
|
||||
idleTimeoutMillis: dbModule.dbConfig.idleTimeoutMillis || 30000,
|
||||
connectionTimeoutMillis: dbModule.dbConfig.connectionTimeoutMillis || 60000,
|
||||
statement_timeout: 1800000,
|
||||
query_timeout: 1800000
|
||||
};
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn('Could not load DB config from individual script modules:', err.message);
|
||||
}
|
||||
|
||||
// Debug log connection info (without password)
|
||||
console.log('DB Connection Info:', {
|
||||
connectionString: dbConfig.connectionString ? 'PROVIDED' : undefined,
|
||||
host: dbConfig.host,
|
||||
user: dbConfig.user,
|
||||
database: dbConfig.database,
|
||||
port: dbConfig.port,
|
||||
ssl: dbConfig.ssl ? 'ENABLED' : 'DISABLED',
|
||||
password: (dbConfig.password || dbConfig.connectionString) ? '****' : 'MISSING' // Only show if credentials exist
|
||||
});
|
||||
|
||||
const pool = new Pool(dbConfig);
|
||||
|
||||
const getConnection = () => {
|
||||
return pool.connect();
|
||||
};
|
||||
|
||||
const closePool = () => {
|
||||
console.log("Closing database connection pool.");
|
||||
return pool.end();
|
||||
};
|
||||
|
||||
// --- Progress Utilities ---
|
||||
// Using functions directly instead of globals
|
||||
const progressUtils = require('./metrics-new/utils/progress'); // Assuming utils/progress.js exports these
|
||||
|
||||
// --- State & Cancellation ---
|
||||
let isCancelled = false;
|
||||
let currentStep = ''; // Track which step is running for cancellation message
|
||||
let overallStartTime = null;
|
||||
let mainTimeoutHandle = null;
|
||||
let stepTimeoutHandle = null;
|
||||
let combinedHistoryId = null; // ID for the combined history record
|
||||
|
||||
async function cancelCalculation(reason = 'cancelled by user') {
|
||||
if (isCancelled) return; // Prevent multiple cancellations
|
||||
isCancelled = true;
|
||||
console.log(`Calculation ${reason}. Attempting to cancel active step: ${currentStep}`);
|
||||
|
||||
// Clear timeouts
|
||||
if (mainTimeoutHandle) clearTimeout(mainTimeoutHandle);
|
||||
if (stepTimeoutHandle) clearTimeout(stepTimeoutHandle);
|
||||
|
||||
// Attempt to cancel the long-running query in Postgres
|
||||
let conn = null;
|
||||
try {
|
||||
console.log(`Attempting to cancel queries running longer than ${CANCEL_QUERY_AFTER_SECONDS} seconds...`);
|
||||
conn = await getConnection();
|
||||
const result = await conn.query(`
|
||||
SELECT pg_cancel_backend(pid)
|
||||
FROM pg_stat_activity
|
||||
WHERE query_start < now() - interval '${CANCEL_QUERY_AFTER_SECONDS} seconds'
|
||||
AND application_name = 'node-metrics-calculator' -- Match specific app name
|
||||
AND state = 'active' -- Only cancel active queries
|
||||
AND query NOT LIKE '%pg_cancel_backend%'
|
||||
AND pid <> pg_backend_pid(); -- Don't cancel self
|
||||
`);
|
||||
console.log(`Sent ${result.rowCount} cancellation signal(s).`);
|
||||
|
||||
// Update the combined history record to show cancellation
|
||||
if (combinedHistoryId) {
|
||||
const totalDuration = Math.round((Date.now() - overallStartTime) / 1000);
|
||||
await conn.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
status = 'cancelled'::calculation_status,
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1::integer,
|
||||
error_message = $2::text
|
||||
WHERE id = $3::integer;
|
||||
`, [totalDuration, `Calculation ${reason} during step: ${currentStep}`, combinedHistoryId]);
|
||||
console.log(`Updated combined history record ${combinedHistoryId} with cancellation status`);
|
||||
}
|
||||
|
||||
conn.release();
|
||||
} catch (err) {
|
||||
console.error('Error during database query cancellation:', err.message);
|
||||
if (conn) {
|
||||
try { conn.release(); } catch (e) { console.error("Error releasing cancellation connection", e); }
|
||||
}
|
||||
// Proceed with script termination attempt even if DB cancel fails
|
||||
} finally {
|
||||
// Update progress to show cancellation
|
||||
progressUtils.outputProgress({
|
||||
status: 'cancelled',
|
||||
operation: `Calculation ${reason} during step: ${currentStep}`,
|
||||
current: 0, // Reset progress indicators
|
||||
total: 100,
|
||||
elapsed: overallStartTime ? progressUtils.formatElapsedTime(overallStartTime) : 'N/A',
|
||||
remaining: null,
|
||||
rate: 0,
|
||||
percentage: '0', // Or keep last known percentage?
|
||||
timing: {
|
||||
start_time: overallStartTime ? new Date(overallStartTime).toISOString() : 'N/A',
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: overallStartTime ? Math.round((Date.now() - overallStartTime) / 1000) : 0
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Note: We don't force exit here anymore. We let the main function's error
|
||||
// handling catch the cancellation error thrown by executeSqlStep or the timeout.
|
||||
return {
|
||||
success: true, // Indicates cancellation was initiated
|
||||
message: `Calculation ${reason}`
|
||||
};
|
||||
}
|
||||
|
||||
// Handle SIGINT (Ctrl+C) and SIGTERM (kill) signals
|
||||
process.on('SIGINT', () => {
|
||||
console.log('\nReceived SIGINT (Ctrl+C).');
|
||||
cancelCalculation('cancelled by user (SIGINT)');
|
||||
// Give cancellation a moment to propagate before force-exiting if needed
|
||||
setTimeout(() => process.exit(1), 2000);
|
||||
});
|
||||
process.on('SIGTERM', () => {
|
||||
console.log('Received SIGTERM.');
|
||||
cancelCalculation('cancelled by system (SIGTERM)');
|
||||
// Give cancellation a moment to propagate before force-exiting if needed
|
||||
setTimeout(() => process.exit(1), 2000);
|
||||
});
|
||||
|
||||
// Add error handlers for uncaught exceptions/rejections
|
||||
process.on('uncaughtException', (error) => {
|
||||
console.error('Uncaught Exception:', error);
|
||||
// Attempt graceful shutdown/logging if possible, then exit
|
||||
cancelCalculation('failed due to uncaught exception').finally(() => {
|
||||
closePool().finally(() => process.exit(1));
|
||||
});
|
||||
});
|
||||
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
||||
// Attempt graceful shutdown/logging if possible, then exit
|
||||
cancelCalculation('failed due to unhandled rejection').finally(() => {
|
||||
closePool().finally(() => process.exit(1));
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
// --- Core Logic ---
|
||||
|
||||
/**
|
||||
* Ensures all products have entries in the settings_product table
|
||||
* This is important after importing new products
|
||||
*/
|
||||
async function syncSettingsProductTable() {
|
||||
let conn = null;
|
||||
try {
|
||||
currentStep = 'Syncing settings_product table';
|
||||
progressUtils.outputProgress({
|
||||
operation: 'Syncing product settings',
|
||||
message: 'Ensuring all products have settings entries'
|
||||
});
|
||||
|
||||
conn = await getConnection();
|
||||
|
||||
// Get counts before sync
|
||||
const beforeCounts = await conn.query(`
|
||||
SELECT
|
||||
(SELECT COUNT(*) FROM products) AS products_count,
|
||||
(SELECT COUNT(*) FROM settings_product) AS settings_count
|
||||
`);
|
||||
|
||||
const productsCount = parseInt(beforeCounts.rows[0].products_count);
|
||||
const settingsCount = parseInt(beforeCounts.rows[0].settings_count);
|
||||
|
||||
progressUtils.outputProgress({
|
||||
operation: 'Settings product sync',
|
||||
message: `Found ${productsCount} products and ${settingsCount} settings entries`
|
||||
});
|
||||
|
||||
// Insert missing product settings
|
||||
const result = await conn.query(`
|
||||
INSERT INTO settings_product (
|
||||
pid,
|
||||
lead_time_days,
|
||||
days_of_stock,
|
||||
safety_stock,
|
||||
forecast_method,
|
||||
exclude_from_forecast
|
||||
)
|
||||
SELECT
|
||||
p.pid,
|
||||
CAST(NULL AS INTEGER),
|
||||
CAST(NULL AS INTEGER),
|
||||
COALESCE((SELECT setting_value::int FROM settings_global WHERE setting_key = 'default_safety_stock_units'), 0),
|
||||
CAST(NULL AS VARCHAR),
|
||||
FALSE
|
||||
FROM
|
||||
public.products p
|
||||
WHERE
|
||||
NOT EXISTS (
|
||||
SELECT 1 FROM settings_product sp WHERE sp.pid = p.pid
|
||||
)
|
||||
ON CONFLICT (pid) DO NOTHING
|
||||
`);
|
||||
|
||||
// Get counts after sync
|
||||
const afterCounts = await conn.query(`
|
||||
SELECT COUNT(*) AS settings_count FROM settings_product
|
||||
`);
|
||||
|
||||
const newSettingsCount = parseInt(afterCounts.rows[0].settings_count);
|
||||
const addedCount = newSettingsCount - settingsCount;
|
||||
|
||||
progressUtils.outputProgress({
|
||||
operation: 'Settings product sync',
|
||||
message: `Added ${addedCount} new settings entries. Now have ${newSettingsCount} total entries.`,
|
||||
status: 'complete'
|
||||
});
|
||||
|
||||
conn.release();
|
||||
return addedCount;
|
||||
} catch (err) {
|
||||
progressUtils.outputProgress({
|
||||
status: 'error',
|
||||
operation: 'Settings product sync failed',
|
||||
error: err.message
|
||||
});
|
||||
if (conn) conn.release();
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a single SQL calculation step.
|
||||
* @param {object} config - Configuration for the step.
|
||||
* @param {string} config.name - User-friendly name of the step.
|
||||
* @param {string} config.sqlFile - Path to the SQL file.
|
||||
* @param {string} config.historyType - Type identifier for calculate_history.
|
||||
* @param {string} config.statusModule - Module name for calculate_status.
|
||||
* @param {object} progress - Progress utility functions.
|
||||
* @returns {Promise<{success: boolean, message: string, duration: number, rowsAffected: number}>}
|
||||
*/
|
||||
async function executeSqlStep(config, progress) {
|
||||
if (isCancelled) throw new Error(`Calculation skipped step ${config.name} due to prior cancellation.`);
|
||||
|
||||
currentStep = config.name; // Update global state
|
||||
console.log(`\n--- Starting Step: ${config.name} ---`);
|
||||
const stepStartTime = Date.now();
|
||||
let connection = null;
|
||||
let rowsAffected = 0; // Track rows affected by this step
|
||||
|
||||
// Set timeout for this specific step
|
||||
if (stepTimeoutHandle) clearTimeout(stepTimeoutHandle); // Clear previous step's timeout
|
||||
stepTimeoutHandle = setTimeout(() => {
|
||||
// Don't exit directly, throw an error to be caught by the main loop
|
||||
const timeoutError = new Error(`Step "${config.name}" timed out after ${MAX_EXECUTION_TIME_PER_STEP / 1000} seconds.`);
|
||||
cancelCalculation(`timed out during step: ${config.name}`); // Initiate cancellation process
|
||||
// The error will likely be thrown before cancelCalculation fully completes,
|
||||
// but cancelCalculation attempts to stop the query.
|
||||
// The main catch block will handle cleanup.
|
||||
}, MAX_EXECUTION_TIME_PER_STEP);
|
||||
|
||||
|
||||
try {
|
||||
// 1. Read SQL File
|
||||
const sqlFilePath = path.resolve(__dirname, config.sqlFile);
|
||||
if (!fs.existsSync(sqlFilePath)) {
|
||||
throw new Error(`SQL file not found: ${sqlFilePath}`);
|
||||
}
|
||||
const sqlQuery = fs.readFileSync(sqlFilePath, 'utf8');
|
||||
console.log(`Read SQL file: ${config.sqlFile}`);
|
||||
|
||||
// Check for potential parameter references that might cause issues
|
||||
const parameterMatches = sqlQuery.match(/\$\d+(?!\:\:)/g);
|
||||
if (parameterMatches && parameterMatches.length > 0) {
|
||||
console.warn(`WARNING: Found ${parameterMatches.length} untyped parameters in SQL: ${parameterMatches.slice(0, 5).join(', ')}${parameterMatches.length > 5 ? '...' : ''}`);
|
||||
console.warn('These might cause "could not determine data type of parameter" errors.');
|
||||
}
|
||||
|
||||
// 2. Get Database Connection
|
||||
connection = await getConnection();
|
||||
console.log("Database connection acquired.");
|
||||
|
||||
// 3. Ensure calculate_status table exists
|
||||
await connection.query(`
|
||||
CREATE TABLE IF NOT EXISTS calculate_status (
|
||||
module_name TEXT PRIMARY KEY,
|
||||
last_calculation_timestamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
`);
|
||||
|
||||
// 4. Initial Progress Update
|
||||
progress.outputProgress({
|
||||
status: 'running',
|
||||
operation: `Starting: ${config.name}`,
|
||||
current: 0, total: 100,
|
||||
elapsed: progress.formatElapsedTime(stepStartTime),
|
||||
remaining: 'Calculating...', rate: 0, percentage: '0',
|
||||
timing: {
|
||||
start_time: new Date(stepStartTime).toISOString(),
|
||||
step_start_ms: stepStartTime
|
||||
}
|
||||
});
|
||||
|
||||
// 5. Execute the Main SQL Query
|
||||
progress.outputProgress({
|
||||
status: 'running',
|
||||
operation: `Executing SQL: ${config.name}`,
|
||||
current: 25, total: 100,
|
||||
elapsed: progress.formatElapsedTime(stepStartTime),
|
||||
remaining: 'Executing query...', rate: 0, percentage: '25',
|
||||
timing: {
|
||||
start_time: new Date(stepStartTime).toISOString(),
|
||||
step_start_ms: stepStartTime
|
||||
}
|
||||
});
|
||||
console.log(`Executing SQL for ${config.name}...`);
|
||||
|
||||
try {
|
||||
// Try executing exactly as individual scripts do
|
||||
const result = await connection.query(sqlQuery);
|
||||
|
||||
// Try to extract row count from result
|
||||
if (result && result.rowCount !== undefined) {
|
||||
rowsAffected = result.rowCount;
|
||||
} else if (Array.isArray(result) && result[0] && result[0].rowCount !== undefined) {
|
||||
rowsAffected = result[0].rowCount;
|
||||
}
|
||||
|
||||
// Check if the query returned a result set with row count info
|
||||
if (result && result.rows && result.rows.length > 0 && result.rows[0].rows_processed) {
|
||||
rowsAffected = parseInt(result.rows[0].rows_processed) || rowsAffected;
|
||||
console.log(`SQL returned metrics: ${JSON.stringify(result.rows[0])}`);
|
||||
} else if (Array.isArray(result) && result[0] && result[0].rows && result[0].rows[0] && result[0].rows[0].rows_processed) {
|
||||
rowsAffected = parseInt(result[0].rows[0].rows_processed) || rowsAffected;
|
||||
console.log(`SQL returned metrics: ${JSON.stringify(result[0].rows[0])}`);
|
||||
}
|
||||
|
||||
console.log(`SQL affected ${rowsAffected} rows`);
|
||||
} catch (sqlError) {
|
||||
if (sqlError.message.includes('could not determine data type of parameter')) {
|
||||
console.log('Simple query failed with parameter type error, trying alternative method...');
|
||||
try {
|
||||
// Execute with explicit text mode to avoid parameter confusion
|
||||
await connection.query({
|
||||
text: sqlQuery,
|
||||
rowMode: 'text'
|
||||
});
|
||||
} catch (altError) {
|
||||
console.error('Alternative execution method also failed:', altError.message);
|
||||
throw altError; // Re-throw the alternative error
|
||||
}
|
||||
} else {
|
||||
console.error('SQL Execution Error:', sqlError.message);
|
||||
if (sqlError.position) {
|
||||
// If the error has a position, try to show the relevant part of the SQL query
|
||||
const position = parseInt(sqlError.position, 10);
|
||||
const startPos = Math.max(0, position - 100);
|
||||
const endPos = Math.min(sqlQuery.length, position + 100);
|
||||
console.error('SQL Error Context:');
|
||||
console.error('...' + sqlQuery.substring(startPos, position) + ' [ERROR HERE] ' + sqlQuery.substring(position, endPos) + '...');
|
||||
}
|
||||
throw sqlError; // Re-throw to be caught by the main try/catch
|
||||
}
|
||||
}
|
||||
|
||||
// Check for cancellation immediately after query finishes
|
||||
if (isCancelled) throw new Error(`Calculation cancelled during SQL execution for ${config.name}`);
|
||||
|
||||
console.log(`SQL execution finished for ${config.name}.`);
|
||||
|
||||
// 6. Update Status table only
|
||||
await connection.query(`
|
||||
INSERT INTO calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES ($1::text, NOW())
|
||||
ON CONFLICT (module_name) DO UPDATE
|
||||
SET last_calculation_timestamp = EXCLUDED.last_calculation_timestamp;
|
||||
`, [config.statusModule]);
|
||||
|
||||
const stepDuration = Math.round((Date.now() - stepStartTime) / 1000);
|
||||
|
||||
// 7. Final Progress Update for Step
|
||||
progress.outputProgress({
|
||||
status: 'complete',
|
||||
operation: `Completed: ${config.name}`,
|
||||
current: 100, total: 100,
|
||||
elapsed: progress.formatElapsedTime(stepStartTime),
|
||||
remaining: '0s', rate: 0, percentage: '100',
|
||||
timing: {
|
||||
start_time: new Date(stepStartTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: stepDuration
|
||||
}
|
||||
});
|
||||
console.log(`--- Finished Step: ${config.name} (Duration: ${progress.formatElapsedTime(stepStartTime)}) ---`);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: `${config.name} completed successfully`,
|
||||
duration: stepDuration,
|
||||
rowsAffected: rowsAffected
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
clearTimeout(stepTimeoutHandle); // Clear timeout on error
|
||||
const errorEndTime = Date.now();
|
||||
const errorDuration = Math.round((errorEndTime - stepStartTime) / 1000);
|
||||
const finalStatus = isCancelled ? 'cancelled' : 'failed';
|
||||
const errorMessage = error.message || 'Unknown error';
|
||||
|
||||
console.error(`--- ERROR in Step: ${config.name} ---`);
|
||||
console.error(error); // Log the full error
|
||||
console.error(`------------------------------------`);
|
||||
|
||||
// Update progress file with error/cancellation
|
||||
progress.outputProgress({
|
||||
status: finalStatus,
|
||||
operation: `Error in ${config.name}: ${errorMessage.split('\n')[0]}`, // Show first line of error
|
||||
current: 50, total: 100, // Indicate partial completion
|
||||
elapsed: progress.formatElapsedTime(stepStartTime),
|
||||
remaining: null, rate: 0, percentage: '50',
|
||||
timing: {
|
||||
start_time: new Date(stepStartTime).toISOString(),
|
||||
end_time: new Date(errorEndTime).toISOString(),
|
||||
elapsed_seconds: errorDuration
|
||||
}
|
||||
});
|
||||
|
||||
// Rethrow the error to be caught by the main runCalculations function
|
||||
throw error; // Add context if needed: new Error(`Step ${config.name} failed: ${errorMessage}`)
|
||||
|
||||
} finally {
|
||||
clearTimeout(stepTimeoutHandle); // Ensure timeout is cleared
|
||||
currentStep = ''; // Reset current step
|
||||
if (connection) {
|
||||
try {
|
||||
await connection.release();
|
||||
console.log("Database connection released.");
|
||||
} catch (releaseError) {
|
||||
console.error("Error releasing database connection:", releaseError);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Main function to run all calculation steps sequentially.
|
||||
*/
|
||||
async function runAllCalculations() {
|
||||
overallStartTime = Date.now();
|
||||
isCancelled = false; // Reset cancellation flag at start
|
||||
|
||||
// Overall timeout for the entire script
|
||||
mainTimeoutHandle = setTimeout(() => {
|
||||
console.error(`--- OVERALL TIMEOUT REACHED (${MAX_EXECUTION_TIME_TOTAL / 1000}s) ---`);
|
||||
cancelCalculation(`overall timeout reached`);
|
||||
// The process should exit via the unhandled rejection/exception handlers
|
||||
// or the SIGTERM/SIGINT handlers after cancellation attempt.
|
||||
}, MAX_EXECUTION_TIME_TOTAL);
|
||||
|
||||
const steps = [
|
||||
{
|
||||
run: RUN_DAILY_SNAPSHOTS,
|
||||
name: 'Daily Snapshots Update',
|
||||
sqlFile: 'metrics-new/update_daily_snapshots.sql',
|
||||
historyType: 'daily_snapshots',
|
||||
statusModule: 'daily_snapshots'
|
||||
},
|
||||
{
|
||||
run: RUN_PRODUCT_METRICS,
|
||||
name: 'Product Metrics Update',
|
||||
sqlFile: 'metrics-new/update_product_metrics.sql', // ASSUMING the initial population is now part of a regular update
|
||||
historyType: 'product_metrics',
|
||||
statusModule: 'product_metrics'
|
||||
},
|
||||
{
|
||||
run: RUN_PERIODIC_METRICS,
|
||||
name: 'Periodic Metrics Update',
|
||||
sqlFile: 'metrics-new/update_periodic_metrics.sql',
|
||||
historyType: 'periodic_metrics',
|
||||
statusModule: 'periodic_metrics'
|
||||
},
|
||||
{
|
||||
run: RUN_BRAND_METRICS,
|
||||
name: 'Brand Metrics Update',
|
||||
sqlFile: 'metrics-new/calculate_brand_metrics.sql',
|
||||
historyType: 'brand_metrics',
|
||||
statusModule: 'brand_metrics'
|
||||
},
|
||||
{
|
||||
run: RUN_VENDOR_METRICS,
|
||||
name: 'Vendor Metrics Update',
|
||||
sqlFile: 'metrics-new/calculate_vendor_metrics.sql',
|
||||
historyType: 'vendor_metrics',
|
||||
statusModule: 'vendor_metrics'
|
||||
},
|
||||
{
|
||||
run: RUN_CATEGORY_METRICS,
|
||||
name: 'Category Metrics Update',
|
||||
sqlFile: 'metrics-new/calculate_category_metrics.sql',
|
||||
historyType: 'category_metrics',
|
||||
statusModule: 'category_metrics'
|
||||
}
|
||||
];
|
||||
|
||||
// Build a list of steps we will actually run
|
||||
const stepsToRun = steps.filter(step => step.run);
|
||||
const stepNames = stepsToRun.map(step => step.name);
|
||||
const sqlFiles = stepsToRun.map(step => step.sqlFile);
|
||||
|
||||
let overallSuccess = true;
|
||||
let connection = null;
|
||||
|
||||
try {
|
||||
// Create a single history record before starting all calculations
|
||||
try {
|
||||
connection = await getConnection();
|
||||
|
||||
// Ensure calculate_history table exists (basic structure)
|
||||
await connection.query(`
|
||||
CREATE TABLE IF NOT EXISTS calculate_history (
|
||||
id SERIAL PRIMARY KEY,
|
||||
start_time TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
end_time TIMESTAMP WITH TIME ZONE,
|
||||
duration_seconds INTEGER,
|
||||
status TEXT, -- Will be altered to enum if needed below
|
||||
error_message TEXT,
|
||||
additional_info JSONB
|
||||
);
|
||||
`);
|
||||
|
||||
// Ensure the calculation_status enum type exists if needed
|
||||
await connection.query(`
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'calculation_status') THEN
|
||||
CREATE TYPE calculation_status AS ENUM ('running', 'completed', 'failed', 'cancelled');
|
||||
|
||||
-- If needed, alter the existing table to use the enum
|
||||
ALTER TABLE calculate_history
|
||||
ALTER COLUMN status TYPE calculation_status
|
||||
USING status::calculation_status;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
`);
|
||||
|
||||
// Mark any previous running combined calculations as cancelled
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
status = 'cancelled'::calculation_status,
|
||||
end_time = NOW(),
|
||||
duration_seconds = EXTRACT(EPOCH FROM (NOW() - start_time))::INTEGER,
|
||||
error_message = 'Previous calculation was not completed properly or was superseded.'
|
||||
WHERE status = 'running'::calculation_status AND additional_info->>'type' = 'combined_metrics';
|
||||
`);
|
||||
|
||||
// Create a single history record for this run
|
||||
const historyResult = await connection.query(`
|
||||
INSERT INTO calculate_history (status, additional_info)
|
||||
VALUES ('running'::calculation_status, jsonb_build_object(
|
||||
'type', 'combined_metrics',
|
||||
'steps', $1::jsonb,
|
||||
'sql_files', $2::jsonb
|
||||
))
|
||||
RETURNING id;
|
||||
`, [JSON.stringify(stepNames), JSON.stringify(sqlFiles)]);
|
||||
|
||||
combinedHistoryId = historyResult.rows[0].id;
|
||||
console.log(`Created combined history record ID: ${combinedHistoryId}`);
|
||||
|
||||
// Get initial counts for tracking
|
||||
const productCount = await connection.query('SELECT COUNT(*) as count FROM products');
|
||||
const totalProducts = parseInt(productCount.rows[0].count);
|
||||
|
||||
// Update history with initial counts
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET additional_info = additional_info || jsonb_build_object('total_products', $1::integer)
|
||||
WHERE id = $2
|
||||
`, [totalProducts, combinedHistoryId]);
|
||||
|
||||
connection.release();
|
||||
} catch (historyError) {
|
||||
console.error('Error creating combined history record:', historyError);
|
||||
if (connection) connection.release();
|
||||
// Continue without history tracking if it fails
|
||||
}
|
||||
|
||||
// First, sync the settings_product table to ensure all products have entries
|
||||
progressUtils.outputProgress({
|
||||
operation: 'Starting metrics calculation',
|
||||
message: 'Preparing product settings...'
|
||||
});
|
||||
|
||||
try {
|
||||
const addedCount = await syncSettingsProductTable();
|
||||
|
||||
progressUtils.outputProgress({
|
||||
operation: 'Preparation complete',
|
||||
message: `Added ${addedCount} missing product settings entries`,
|
||||
status: 'complete'
|
||||
});
|
||||
} catch (syncError) {
|
||||
console.error('Warning: Failed to sync product settings, continuing with metrics calculations:', syncError);
|
||||
// Don't fail the entire process if settings sync fails
|
||||
}
|
||||
|
||||
// Track completed steps
|
||||
const completedSteps = [];
|
||||
const stepTimings = {};
|
||||
const stepRowCounts = {};
|
||||
let currentStepIndex = 0;
|
||||
|
||||
// Now run the calculation steps
|
||||
for (const step of stepsToRun) {
|
||||
if (isCancelled) {
|
||||
console.log(`Skipping step "${step.name}" due to cancellation.`);
|
||||
overallSuccess = false; // Mark as not fully successful if steps are skipped due to cancel
|
||||
continue; // Skip to next step
|
||||
}
|
||||
|
||||
currentStepIndex++;
|
||||
|
||||
// Update overall progress
|
||||
progressUtils.outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Running calculations',
|
||||
message: `Step ${currentStepIndex} of ${stepsToRun.length}: ${step.name}`,
|
||||
current: currentStepIndex - 1,
|
||||
total: stepsToRun.length,
|
||||
elapsed: progressUtils.formatElapsedTime(overallStartTime),
|
||||
remaining: progressUtils.estimateRemaining(overallStartTime, currentStepIndex - 1, stepsToRun.length),
|
||||
percentage: Math.round(((currentStepIndex - 1) / stepsToRun.length) * 100).toString(),
|
||||
timing: {
|
||||
overall_start_time: new Date(overallStartTime).toISOString(),
|
||||
current_step: step.name,
|
||||
completed_steps: completedSteps.length
|
||||
}
|
||||
});
|
||||
|
||||
// Pass the progress utilities to the step executor
|
||||
const result = await executeSqlStep(step, progressUtils);
|
||||
|
||||
if (result.success) {
|
||||
completedSteps.push({
|
||||
name: step.name,
|
||||
duration: result.duration,
|
||||
status: 'completed',
|
||||
rowsAffected: result.rowsAffected
|
||||
});
|
||||
stepTimings[step.name] = result.duration;
|
||||
stepRowCounts[step.name] = result.rowsAffected;
|
||||
}
|
||||
}
|
||||
|
||||
// If we finished naturally (no errors thrown out)
|
||||
clearTimeout(mainTimeoutHandle); // Clear the main timeout
|
||||
|
||||
// Update the combined history record on successful completion
|
||||
if (combinedHistoryId) {
|
||||
try {
|
||||
connection = await getConnection();
|
||||
const totalDuration = Math.round((Date.now() - overallStartTime) / 1000);
|
||||
|
||||
// Get final processed counts
|
||||
const processedCounts = await connection.query(`
|
||||
SELECT
|
||||
(SELECT COUNT(*) FROM product_metrics WHERE last_calculated >= $1) as processed_products
|
||||
`, [new Date(overallStartTime)]);
|
||||
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1::integer,
|
||||
status = $2::calculation_status,
|
||||
additional_info = additional_info || jsonb_build_object(
|
||||
'processed_products', $3::integer,
|
||||
'completed_steps', $4::jsonb,
|
||||
'step_timings', $5::jsonb,
|
||||
'step_row_counts', $6::jsonb
|
||||
)
|
||||
WHERE id = $7::integer;
|
||||
`, [
|
||||
totalDuration,
|
||||
isCancelled ? 'cancelled' : 'completed',
|
||||
processedCounts.rows[0].processed_products,
|
||||
JSON.stringify(completedSteps),
|
||||
JSON.stringify(stepTimings),
|
||||
JSON.stringify(stepRowCounts),
|
||||
combinedHistoryId
|
||||
]);
|
||||
|
||||
connection.release();
|
||||
} catch (historyError) {
|
||||
console.error('Error updating combined history record on completion:', historyError);
|
||||
if (connection) connection.release();
|
||||
}
|
||||
}
|
||||
|
||||
if (isCancelled) {
|
||||
console.log("\n--- Calculation finished with cancellation ---");
|
||||
overallSuccess = false;
|
||||
} else {
|
||||
console.log("\n--- All enabled calculations finished successfully ---");
|
||||
|
||||
// Send final completion progress
|
||||
progressUtils.outputProgress({
|
||||
status: 'complete',
|
||||
operation: 'All calculations completed',
|
||||
message: `Successfully completed ${completedSteps.length} of ${stepsToRun.length} steps`,
|
||||
current: stepsToRun.length,
|
||||
total: stepsToRun.length,
|
||||
elapsed: progressUtils.formatElapsedTime(overallStartTime),
|
||||
remaining: '0s',
|
||||
percentage: '100',
|
||||
timing: {
|
||||
overall_start_time: new Date(overallStartTime).toISOString(),
|
||||
overall_end_time: new Date().toISOString(),
|
||||
total_duration_seconds: Math.round((Date.now() - overallStartTime) / 1000),
|
||||
step_timings: stepTimings,
|
||||
completed_steps: completedSteps.length
|
||||
}
|
||||
});
|
||||
|
||||
progressUtils.clearProgress(); // Clear progress only on full success
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
clearTimeout(mainTimeoutHandle); // Clear the main timeout
|
||||
console.error("\n--- SCRIPT EXECUTION FAILED ---");
|
||||
// Error details were already logged by executeSqlStep or global handlers
|
||||
overallSuccess = false;
|
||||
|
||||
// Update the combined history record on error
|
||||
if (combinedHistoryId) {
|
||||
try {
|
||||
connection = await getConnection();
|
||||
const totalDuration = Math.round((Date.now() - overallStartTime) / 1000);
|
||||
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1::integer,
|
||||
status = $2::calculation_status,
|
||||
error_message = $3::text
|
||||
WHERE id = $4::integer;
|
||||
`, [
|
||||
totalDuration,
|
||||
isCancelled ? 'cancelled' : 'failed',
|
||||
error.message.substring(0, 1000),
|
||||
combinedHistoryId
|
||||
]);
|
||||
|
||||
connection.release();
|
||||
} catch (historyError) {
|
||||
console.error('Error updating combined history record on error:', historyError);
|
||||
if (connection) connection.release();
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
await closePool();
|
||||
console.log(`Total execution time: ${progressUtils.formatElapsedTime(overallStartTime)}`);
|
||||
process.exit(overallSuccess ? 0 : 1);
|
||||
}
|
||||
}
|
||||
|
||||
// --- Script Execution ---
|
||||
if (require.main === module) {
|
||||
runAllCalculations();
|
||||
} else {
|
||||
// Export functions if needed as a module (e.g., for testing or API)
|
||||
module.exports = {
|
||||
runAllCalculations,
|
||||
cancelCalculation,
|
||||
syncSettingsProductTable,
|
||||
// Expose individual steps if useful, wrapping them slightly
|
||||
runDailySnapshots: () => executeSqlStep({ name: 'Daily Snapshots Update', sqlFile: 'update_daily_snapshots.sql', historyType: 'daily_snapshots', statusModule: 'daily_snapshots' }, progressUtils),
|
||||
runProductMetrics: () => executeSqlStep({ name: 'Product Metrics Update', sqlFile: 'update_product_metrics.sql', historyType: 'product_metrics', statusModule: 'product_metrics' }, progressUtils),
|
||||
runPeriodicMetrics: () => executeSqlStep({ name: 'Periodic Metrics Update', sqlFile: 'update_periodic_metrics.sql', historyType: 'periodic_metrics', statusModule: 'periodic_metrics' }, progressUtils),
|
||||
runBrandMetrics: () => executeSqlStep({ name: 'Brand Metrics Update', sqlFile: 'calculate_brand_metrics.sql', historyType: 'brand_metrics', statusModule: 'brand_metrics' }, progressUtils),
|
||||
runVendorMetrics: () => executeSqlStep({ name: 'Vendor Metrics Update', sqlFile: 'calculate_vendor_metrics.sql', historyType: 'vendor_metrics', statusModule: 'vendor_metrics' }, progressUtils),
|
||||
runCategoryMetrics: () => executeSqlStep({ name: 'Category Metrics Update', sqlFile: 'calculate_category_metrics.sql', historyType: 'category_metrics', statusModule: 'category_metrics' }, progressUtils),
|
||||
getProgress: progressUtils.getProgress
|
||||
};
|
||||
}
|
||||
@@ -1,115 +0,0 @@
|
||||
const path = require('path');
|
||||
const { spawn } = require('child_process');
|
||||
|
||||
function outputProgress(data) {
|
||||
if (!data.status) {
|
||||
data = {
|
||||
status: 'running',
|
||||
...data
|
||||
};
|
||||
}
|
||||
console.log(JSON.stringify(data));
|
||||
}
|
||||
|
||||
function runScript(scriptPath) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const child = spawn('node', [scriptPath], {
|
||||
stdio: ['inherit', 'pipe', 'pipe'],
|
||||
env: {
|
||||
...process.env,
|
||||
PGHOST: process.env.DB_HOST,
|
||||
PGUSER: process.env.DB_USER,
|
||||
PGPASSWORD: process.env.DB_PASSWORD,
|
||||
PGDATABASE: process.env.DB_NAME,
|
||||
PGPORT: process.env.DB_PORT || '5432'
|
||||
}
|
||||
});
|
||||
|
||||
let output = '';
|
||||
|
||||
child.stdout.on('data', (data) => {
|
||||
const lines = data.toString().split('\n');
|
||||
lines.filter(line => line.trim()).forEach(line => {
|
||||
try {
|
||||
console.log(line); // Pass through the JSON output
|
||||
output += line + '\n';
|
||||
} catch (e) {
|
||||
console.log(line); // If not JSON, just log it directly
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
child.stderr.on('data', (data) => {
|
||||
console.error(data.toString());
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
if (code !== 0) {
|
||||
reject(new Error(`Script ${scriptPath} exited with code ${code}`));
|
||||
} else {
|
||||
resolve(output);
|
||||
}
|
||||
});
|
||||
|
||||
child.on('error', (err) => {
|
||||
reject(err);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
async function fullReset() {
|
||||
try {
|
||||
// Step 1: Reset Database
|
||||
outputProgress({
|
||||
operation: 'Starting full reset',
|
||||
message: 'Step 1/3: Resetting database...'
|
||||
});
|
||||
await runScript(path.join(__dirname, 'reset-db.js'));
|
||||
outputProgress({
|
||||
status: 'complete',
|
||||
operation: 'Database reset step complete',
|
||||
message: 'Database reset finished, moving to import...'
|
||||
});
|
||||
|
||||
// Step 2: Import from Production
|
||||
outputProgress({
|
||||
operation: 'Starting import',
|
||||
message: 'Step 2/3: Importing from production...'
|
||||
});
|
||||
await runScript(path.join(__dirname, 'import-from-prod.js'));
|
||||
outputProgress({
|
||||
status: 'complete',
|
||||
operation: 'Import step complete',
|
||||
message: 'Import finished, moving to metrics calculation...'
|
||||
});
|
||||
|
||||
// Step 3: Calculate Metrics
|
||||
outputProgress({
|
||||
operation: 'Starting metrics calculation',
|
||||
message: 'Step 3/3: Calculating metrics...'
|
||||
});
|
||||
await runScript(path.join(__dirname, 'calculate-metrics-new.js'));
|
||||
|
||||
// Final completion message
|
||||
outputProgress({
|
||||
status: 'complete',
|
||||
operation: 'Full reset complete',
|
||||
message: 'Successfully completed all steps: database reset, import, and metrics calculation'
|
||||
});
|
||||
} catch (error) {
|
||||
outputProgress({
|
||||
status: 'error',
|
||||
operation: 'Full reset failed',
|
||||
error: error.message,
|
||||
stack: error.stack
|
||||
});
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Run if called directly
|
||||
if (require.main === module) {
|
||||
fullReset();
|
||||
}
|
||||
|
||||
module.exports = fullReset;
|
||||
@@ -1,100 +0,0 @@
|
||||
const path = require('path');
|
||||
const { spawn } = require('child_process');
|
||||
|
||||
function outputProgress(data) {
|
||||
if (!data.status) {
|
||||
data = {
|
||||
status: 'running',
|
||||
...data
|
||||
};
|
||||
}
|
||||
console.log(JSON.stringify(data));
|
||||
}
|
||||
|
||||
function runScript(scriptPath) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const child = spawn('node', [scriptPath], {
|
||||
stdio: ['inherit', 'pipe', 'pipe']
|
||||
});
|
||||
|
||||
let output = '';
|
||||
|
||||
child.stdout.on('data', (data) => {
|
||||
const lines = data.toString().split('\n');
|
||||
lines.filter(line => line.trim()).forEach(line => {
|
||||
try {
|
||||
console.log(line); // Pass through the JSON output
|
||||
output += line + '\n';
|
||||
} catch (e) {
|
||||
console.log(line); // If not JSON, just log it directly
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
child.stderr.on('data', (data) => {
|
||||
console.error(data.toString());
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
if (code !== 0) {
|
||||
reject(new Error(`Script ${scriptPath} exited with code ${code}`));
|
||||
} else {
|
||||
resolve(output);
|
||||
}
|
||||
});
|
||||
|
||||
child.on('error', (err) => {
|
||||
reject(err);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
async function fullUpdate() {
|
||||
try {
|
||||
// Step 1: Import from Production
|
||||
outputProgress({
|
||||
operation: 'Starting full update',
|
||||
message: 'Step 1/2: Importing from production...'
|
||||
});
|
||||
await runScript(path.join(__dirname, 'import-from-prod.js'));
|
||||
outputProgress({
|
||||
status: 'complete',
|
||||
operation: 'Import step complete',
|
||||
message: 'Import finished, moving to metrics calculation...'
|
||||
});
|
||||
|
||||
// Step 2: Calculate Metrics
|
||||
outputProgress({
|
||||
operation: 'Starting metrics calculation',
|
||||
message: 'Step 2/2: Calculating metrics...'
|
||||
});
|
||||
await runScript(path.join(__dirname, 'calculate-metrics-new.js'));
|
||||
outputProgress({
|
||||
status: 'complete',
|
||||
operation: 'Metrics step complete',
|
||||
message: 'Metrics calculation finished'
|
||||
});
|
||||
|
||||
// Final completion message
|
||||
outputProgress({
|
||||
status: 'complete',
|
||||
operation: 'Full update complete',
|
||||
message: 'Successfully completed all steps: import and metrics calculation'
|
||||
});
|
||||
} catch (error) {
|
||||
outputProgress({
|
||||
status: 'error',
|
||||
operation: 'Full update failed',
|
||||
error: error.message,
|
||||
stack: error.stack
|
||||
});
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Run if called directly
|
||||
if (require.main === module) {
|
||||
fullUpdate();
|
||||
}
|
||||
|
||||
module.exports = fullUpdate;
|
||||
@@ -1,352 +0,0 @@
|
||||
const dotenv = require("dotenv");
|
||||
const path = require("path");
|
||||
const { outputProgress, formatElapsedTime } = require('./metrics-new/utils/progress');
|
||||
const { setupConnections, closeConnections } = require('./import/utils');
|
||||
const importCategories = require('./import/categories');
|
||||
const { importProducts } = require('./import/products');
|
||||
const importOrders = require('./import/orders');
|
||||
const importPurchaseOrders = require('./import/purchase-orders');
|
||||
|
||||
dotenv.config({ path: path.join(__dirname, "../.env") });
|
||||
|
||||
// Constants to control which imports run
|
||||
const IMPORT_CATEGORIES = true;
|
||||
const IMPORT_PRODUCTS = true;
|
||||
const IMPORT_ORDERS = true;
|
||||
const IMPORT_PURCHASE_ORDERS = true;
|
||||
|
||||
// Add flag for incremental updates
|
||||
const INCREMENTAL_UPDATE = process.env.INCREMENTAL_UPDATE !== 'false'; // Default to true unless explicitly set to false
|
||||
|
||||
// SSH configuration
|
||||
const sshConfig = {
|
||||
ssh: {
|
||||
host: process.env.PROD_SSH_HOST,
|
||||
port: process.env.PROD_SSH_PORT || 22,
|
||||
username: process.env.PROD_SSH_USER,
|
||||
privateKey: process.env.PROD_SSH_KEY_PATH
|
||||
? require("fs").readFileSync(process.env.PROD_SSH_KEY_PATH)
|
||||
: undefined,
|
||||
compress: true, // Enable SSH compression
|
||||
},
|
||||
prodDbConfig: {
|
||||
// MySQL config for production
|
||||
host: process.env.PROD_DB_HOST || "localhost",
|
||||
user: process.env.PROD_DB_USER,
|
||||
password: process.env.PROD_DB_PASSWORD,
|
||||
database: process.env.PROD_DB_NAME,
|
||||
port: process.env.PROD_DB_PORT || 3306,
|
||||
timezone: '-05:00', // Production DB always stores times in EST (UTC-5) regardless of DST
|
||||
},
|
||||
localDbConfig: {
|
||||
// PostgreSQL config for local
|
||||
host: process.env.DB_HOST,
|
||||
user: process.env.DB_USER,
|
||||
password: process.env.DB_PASSWORD,
|
||||
database: process.env.DB_NAME,
|
||||
port: process.env.DB_PORT || 5432,
|
||||
ssl: process.env.DB_SSL === 'true',
|
||||
connectionTimeoutMillis: 60000,
|
||||
idleTimeoutMillis: 30000,
|
||||
max: 10 // connection pool max size
|
||||
}
|
||||
};
|
||||
|
||||
let isImportCancelled = false;
|
||||
|
||||
// Add cancel function
|
||||
function cancelImport() {
|
||||
isImportCancelled = true;
|
||||
outputProgress({
|
||||
status: 'cancelled',
|
||||
operation: 'Import process',
|
||||
message: 'Import cancelled by user',
|
||||
current: 0,
|
||||
total: 0,
|
||||
elapsed: null,
|
||||
remaining: null,
|
||||
rate: 0
|
||||
});
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const startTime = Date.now();
|
||||
let connections;
|
||||
let completedSteps = 0;
|
||||
let importHistoryId;
|
||||
const totalSteps = [
|
||||
IMPORT_CATEGORIES,
|
||||
IMPORT_PRODUCTS,
|
||||
IMPORT_ORDERS,
|
||||
IMPORT_PURCHASE_ORDERS
|
||||
].filter(Boolean).length;
|
||||
|
||||
try {
|
||||
// Initial progress update
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Import process",
|
||||
message: `Initializing SSH tunnel for ${INCREMENTAL_UPDATE ? 'incremental' : 'full'} import...`,
|
||||
current: completedSteps,
|
||||
total: totalSteps,
|
||||
elapsed: formatElapsedTime(startTime)
|
||||
});
|
||||
|
||||
connections = await setupConnections(sshConfig);
|
||||
const { prodConnection, localConnection } = connections;
|
||||
|
||||
if (isImportCancelled) throw new Error("Import cancelled");
|
||||
|
||||
// Clean up any previously running imports that weren't completed
|
||||
await localConnection.query(`
|
||||
UPDATE import_history
|
||||
SET
|
||||
status = 'cancelled',
|
||||
end_time = NOW(),
|
||||
duration_seconds = EXTRACT(EPOCH FROM (NOW() - start_time))::INTEGER,
|
||||
error_message = 'Previous import was not completed properly'
|
||||
WHERE status = 'running'
|
||||
`);
|
||||
|
||||
// Create import history record for the overall session
|
||||
try {
|
||||
const [historyResult] = await localConnection.query(`
|
||||
INSERT INTO import_history (
|
||||
table_name,
|
||||
start_time,
|
||||
is_incremental,
|
||||
status,
|
||||
additional_info
|
||||
) VALUES (
|
||||
'all_tables',
|
||||
NOW(),
|
||||
$1::boolean,
|
||||
'running',
|
||||
jsonb_build_object(
|
||||
'categories_enabled', $2::boolean,
|
||||
'products_enabled', $3::boolean,
|
||||
'orders_enabled', $4::boolean,
|
||||
'purchase_orders_enabled', $5::boolean
|
||||
)
|
||||
) RETURNING id
|
||||
`, [INCREMENTAL_UPDATE, IMPORT_CATEGORIES, IMPORT_PRODUCTS, IMPORT_ORDERS, IMPORT_PURCHASE_ORDERS]);
|
||||
importHistoryId = historyResult.rows[0].id;
|
||||
} catch (error) {
|
||||
console.error("Error creating import history record:", error);
|
||||
outputProgress({
|
||||
status: "error",
|
||||
operation: "Import process",
|
||||
message: "Failed to create import history record",
|
||||
error: error.message
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
|
||||
const results = {
|
||||
categories: null,
|
||||
products: null,
|
||||
orders: null,
|
||||
purchaseOrders: null
|
||||
};
|
||||
|
||||
let totalRecordsAdded = 0;
|
||||
let totalRecordsUpdated = 0;
|
||||
let totalRecordsDeleted = 0; // Add tracking for deleted records
|
||||
let totalRecordsSkipped = 0; // Track skipped/filtered records
|
||||
const stepTimings = {};
|
||||
|
||||
// Run each import based on constants
|
||||
if (IMPORT_CATEGORIES) {
|
||||
const stepStart = Date.now();
|
||||
results.categories = await importCategories(prodConnection, localConnection);
|
||||
stepTimings.categories = Math.round((Date.now() - stepStart) / 1000);
|
||||
|
||||
if (isImportCancelled) throw new Error("Import cancelled");
|
||||
completedSteps++;
|
||||
console.log('Categories import result:', results.categories);
|
||||
totalRecordsAdded += parseInt(results.categories?.recordsAdded || 0);
|
||||
totalRecordsUpdated += parseInt(results.categories?.recordsUpdated || 0);
|
||||
}
|
||||
|
||||
if (IMPORT_PRODUCTS) {
|
||||
const stepStart = Date.now();
|
||||
results.products = await importProducts(prodConnection, localConnection, INCREMENTAL_UPDATE);
|
||||
stepTimings.products = Math.round((Date.now() - stepStart) / 1000);
|
||||
|
||||
if (isImportCancelled) throw new Error("Import cancelled");
|
||||
completedSteps++;
|
||||
console.log('Products import result:', results.products);
|
||||
totalRecordsAdded += parseInt(results.products?.recordsAdded || 0);
|
||||
totalRecordsUpdated += parseInt(results.products?.recordsUpdated || 0);
|
||||
totalRecordsSkipped += parseInt(results.products?.skippedUnchanged || 0);
|
||||
}
|
||||
|
||||
if (IMPORT_ORDERS) {
|
||||
const stepStart = Date.now();
|
||||
results.orders = await importOrders(prodConnection, localConnection, INCREMENTAL_UPDATE);
|
||||
stepTimings.orders = Math.round((Date.now() - stepStart) / 1000);
|
||||
|
||||
if (isImportCancelled) throw new Error("Import cancelled");
|
||||
completedSteps++;
|
||||
console.log('Orders import result:', results.orders);
|
||||
totalRecordsAdded += parseInt(results.orders?.recordsAdded || 0);
|
||||
totalRecordsUpdated += parseInt(results.orders?.recordsUpdated || 0);
|
||||
totalRecordsSkipped += parseInt(results.orders?.totalSkipped || 0);
|
||||
}
|
||||
|
||||
if (IMPORT_PURCHASE_ORDERS) {
|
||||
try {
|
||||
const stepStart = Date.now();
|
||||
results.purchaseOrders = await importPurchaseOrders(prodConnection, localConnection, INCREMENTAL_UPDATE);
|
||||
stepTimings.purchaseOrders = Math.round((Date.now() - stepStart) / 1000);
|
||||
|
||||
if (isImportCancelled) throw new Error("Import cancelled");
|
||||
completedSteps++;
|
||||
console.log('Purchase orders import result:', results.purchaseOrders);
|
||||
|
||||
// Handle potential error status
|
||||
if (results.purchaseOrders?.status === 'error') {
|
||||
console.error('Purchase orders import had an error:', results.purchaseOrders.error);
|
||||
} else {
|
||||
totalRecordsAdded += parseInt(results.purchaseOrders?.recordsAdded || 0);
|
||||
totalRecordsUpdated += parseInt(results.purchaseOrders?.recordsUpdated || 0);
|
||||
totalRecordsDeleted += parseInt(results.purchaseOrders?.recordsDeleted || 0);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error during purchase orders import:', error);
|
||||
// Continue with other imports, don't fail the whole process
|
||||
results.purchaseOrders = {
|
||||
status: 'error',
|
||||
error: error.message,
|
||||
recordsAdded: 0,
|
||||
recordsUpdated: 0
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
const endTime = Date.now();
|
||||
const totalElapsedSeconds = Math.round((endTime - startTime) / 1000);
|
||||
|
||||
// Update import history with final stats
|
||||
await localConnection.query(`
|
||||
UPDATE import_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1,
|
||||
records_added = $2,
|
||||
records_updated = $3,
|
||||
status = 'completed',
|
||||
additional_info = jsonb_build_object(
|
||||
'categories_enabled', $4::boolean,
|
||||
'products_enabled', $5::boolean,
|
||||
'orders_enabled', $6::boolean,
|
||||
'purchase_orders_enabled', $7::boolean,
|
||||
'categories_result', COALESCE($8::jsonb, 'null'::jsonb),
|
||||
'products_result', COALESCE($9::jsonb, 'null'::jsonb),
|
||||
'orders_result', COALESCE($10::jsonb, 'null'::jsonb),
|
||||
'purchase_orders_result', COALESCE($11::jsonb, 'null'::jsonb),
|
||||
'total_deleted', $12::integer,
|
||||
'total_skipped', $13::integer,
|
||||
'step_timings', $14::jsonb
|
||||
)
|
||||
WHERE id = $15
|
||||
`, [
|
||||
totalElapsedSeconds,
|
||||
parseInt(totalRecordsAdded),
|
||||
parseInt(totalRecordsUpdated),
|
||||
IMPORT_CATEGORIES,
|
||||
IMPORT_PRODUCTS,
|
||||
IMPORT_ORDERS,
|
||||
IMPORT_PURCHASE_ORDERS,
|
||||
JSON.stringify(results.categories),
|
||||
JSON.stringify(results.products),
|
||||
JSON.stringify(results.orders),
|
||||
JSON.stringify(results.purchaseOrders),
|
||||
totalRecordsDeleted,
|
||||
totalRecordsSkipped,
|
||||
JSON.stringify(stepTimings),
|
||||
importHistoryId
|
||||
]);
|
||||
|
||||
outputProgress({
|
||||
status: "complete",
|
||||
operation: "Import process",
|
||||
message: `${INCREMENTAL_UPDATE ? 'Incremental' : 'Full'} import completed successfully in ${formatElapsedTime(totalElapsedSeconds)}`,
|
||||
current: completedSteps,
|
||||
total: totalSteps,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date(endTime).toISOString(),
|
||||
elapsed_time: formatElapsedTime(startTime),
|
||||
elapsed_seconds: totalElapsedSeconds,
|
||||
total_duration: formatElapsedTime(totalElapsedSeconds)
|
||||
},
|
||||
results
|
||||
});
|
||||
|
||||
return results;
|
||||
} catch (error) {
|
||||
const endTime = Date.now();
|
||||
const totalElapsedSeconds = Math.round((endTime - startTime) / 1000);
|
||||
|
||||
// Update import history with error
|
||||
if (importHistoryId && connections?.localConnection) {
|
||||
await connections.localConnection.query(`
|
||||
UPDATE import_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1,
|
||||
status = $2,
|
||||
error_message = $3
|
||||
WHERE id = $4
|
||||
`, [totalElapsedSeconds, error.message === "Import cancelled" ? 'cancelled' : 'failed', error.message, importHistoryId]);
|
||||
}
|
||||
|
||||
console.error("Error during import process:", error);
|
||||
outputProgress({
|
||||
status: error.message === "Import cancelled" ? "cancelled" : "error",
|
||||
operation: "Import process",
|
||||
message: error.message === "Import cancelled"
|
||||
? `${INCREMENTAL_UPDATE ? 'Incremental' : 'Full'} import cancelled by user after ${formatElapsedTime(totalElapsedSeconds)}`
|
||||
: `${INCREMENTAL_UPDATE ? 'Incremental' : 'Full'} import failed after ${formatElapsedTime(totalElapsedSeconds)}`,
|
||||
error: error.message,
|
||||
current: completedSteps,
|
||||
total: totalSteps,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date(endTime).toISOString(),
|
||||
elapsed_time: formatElapsedTime(startTime),
|
||||
elapsed_seconds: totalElapsedSeconds,
|
||||
total_duration: formatElapsedTime(totalElapsedSeconds)
|
||||
}
|
||||
});
|
||||
throw error;
|
||||
} finally {
|
||||
if (connections) {
|
||||
await closeConnections(connections).catch(err => {
|
||||
console.error("Error closing connections:", err);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run the import only if this is the main module
|
||||
if (require.main === module) {
|
||||
main().then((results) => {
|
||||
console.log('Import completed successfully:', results);
|
||||
// Force exit after a small delay to ensure all logs are written
|
||||
setTimeout(() => process.exit(0), 500);
|
||||
}).catch((error) => {
|
||||
console.error("Unhandled error in main process:", error);
|
||||
// Force exit with error code after a small delay
|
||||
setTimeout(() => process.exit(1), 500);
|
||||
});
|
||||
}
|
||||
|
||||
// Export the functions needed by the route
|
||||
module.exports = {
|
||||
main,
|
||||
cancelImport,
|
||||
};
|
||||
@@ -1,210 +0,0 @@
|
||||
const { outputProgress, formatElapsedTime } = require('../metrics-new/utils/progress');
|
||||
|
||||
async function importCategories(prodConnection, localConnection) {
|
||||
outputProgress({
|
||||
operation: "Starting categories import",
|
||||
status: "running",
|
||||
});
|
||||
|
||||
const startTime = Date.now();
|
||||
const typeOrder = [10, 20, 11, 21, 12, 13];
|
||||
let totalInserted = 0;
|
||||
let totalUpdated = 0;
|
||||
let skippedCategories = [];
|
||||
|
||||
try {
|
||||
// Start a single transaction for the entire import
|
||||
await localConnection.query('BEGIN');
|
||||
|
||||
// Temporarily disable the trigger that's causing problems
|
||||
await localConnection.query('ALTER TABLE categories DISABLE TRIGGER update_categories_updated_at');
|
||||
|
||||
// Process each type in order with its own savepoint
|
||||
for (const type of typeOrder) {
|
||||
try {
|
||||
// Create a savepoint for this type
|
||||
await localConnection.query(`SAVEPOINT category_type_${type}`);
|
||||
|
||||
// Production query remains MySQL compatible
|
||||
const [categories] = await prodConnection.query(
|
||||
`
|
||||
SELECT
|
||||
pc.cat_id,
|
||||
pc.name,
|
||||
pc.type,
|
||||
CASE
|
||||
WHEN pc.type IN (10, 20) THEN NULL -- Top level categories should have no parent
|
||||
WHEN pc.master_cat_id IS NULL THEN NULL
|
||||
ELSE pc.master_cat_id
|
||||
END as parent_id,
|
||||
pc.combined_name as description
|
||||
FROM product_categories pc
|
||||
WHERE pc.type = ?
|
||||
ORDER BY pc.cat_id
|
||||
`,
|
||||
[type]
|
||||
);
|
||||
|
||||
if (categories.length === 0) {
|
||||
await localConnection.query(`RELEASE SAVEPOINT category_type_${type}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
console.log(`Processing ${categories.length} type ${type} categories`);
|
||||
|
||||
// For types that can have parents (11, 21, 12, 13), we'll proceed directly
|
||||
// No need to check for parent existence since we process in hierarchical order
|
||||
let categoriesToInsert = categories;
|
||||
|
||||
if (categoriesToInsert.length === 0) {
|
||||
console.log(`No valid categories of type ${type} to insert`);
|
||||
await localConnection.query(`RELEASE SAVEPOINT category_type_${type}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
// PostgreSQL upsert query with parameterized values
|
||||
const values = categoriesToInsert.flatMap((cat) => [
|
||||
cat.cat_id,
|
||||
cat.name,
|
||||
cat.type,
|
||||
cat.parent_id,
|
||||
cat.description,
|
||||
'active',
|
||||
new Date(),
|
||||
new Date()
|
||||
]);
|
||||
|
||||
const placeholders = categoriesToInsert
|
||||
.map((_, i) => `($${i * 8 + 1}, $${i * 8 + 2}, $${i * 8 + 3}, $${i * 8 + 4}, $${i * 8 + 5}, $${i * 8 + 6}, $${i * 8 + 7}, $${i * 8 + 8})`)
|
||||
.join(',');
|
||||
|
||||
// Insert categories with ON CONFLICT clause for PostgreSQL
|
||||
const query = `
|
||||
WITH inserted_categories AS (
|
||||
INSERT INTO categories (
|
||||
cat_id, name, type, parent_id, description, status, created_at, updated_at
|
||||
)
|
||||
VALUES ${placeholders}
|
||||
ON CONFLICT (cat_id) DO UPDATE SET
|
||||
name = EXCLUDED.name,
|
||||
type = EXCLUDED.type,
|
||||
parent_id = EXCLUDED.parent_id,
|
||||
description = EXCLUDED.description,
|
||||
status = EXCLUDED.status,
|
||||
updated_at = EXCLUDED.updated_at
|
||||
WHERE -- Only update if at least one field has changed
|
||||
categories.name IS DISTINCT FROM EXCLUDED.name OR
|
||||
categories.type IS DISTINCT FROM EXCLUDED.type OR
|
||||
categories.parent_id IS DISTINCT FROM EXCLUDED.parent_id OR
|
||||
categories.description IS DISTINCT FROM EXCLUDED.description OR
|
||||
categories.status IS DISTINCT FROM EXCLUDED.status
|
||||
RETURNING
|
||||
cat_id,
|
||||
CASE
|
||||
WHEN xmax = 0 THEN true
|
||||
ELSE false
|
||||
END as is_insert
|
||||
)
|
||||
SELECT
|
||||
COUNT(*) as total,
|
||||
COUNT(*) FILTER (WHERE is_insert) as inserted,
|
||||
COUNT(*) FILTER (WHERE NOT is_insert) as updated
|
||||
FROM inserted_categories`;
|
||||
|
||||
const result = await localConnection.query(query, values);
|
||||
|
||||
// Get the first result since query returns an array
|
||||
const queryResult = Array.isArray(result) ? result[0] : result;
|
||||
|
||||
if (!queryResult || !queryResult.rows || !queryResult.rows[0]) {
|
||||
console.error('Query failed to return results');
|
||||
throw new Error('Query did not return expected results');
|
||||
}
|
||||
|
||||
const total = parseInt(queryResult.rows[0].total) || 0;
|
||||
const inserted = parseInt(queryResult.rows[0].inserted) || 0;
|
||||
const updated = parseInt(queryResult.rows[0].updated) || 0;
|
||||
|
||||
console.log(`Total: ${total}, Inserted: ${inserted}, Updated: ${updated}`);
|
||||
|
||||
totalInserted += inserted;
|
||||
totalUpdated += updated;
|
||||
|
||||
// Release the savepoint for this type
|
||||
await localConnection.query(`RELEASE SAVEPOINT category_type_${type}`);
|
||||
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Categories import",
|
||||
message: `Imported ${inserted} (updated ${updated}) categories of type ${type}`,
|
||||
current: totalInserted + totalUpdated,
|
||||
total: categories.length,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
});
|
||||
} catch (error) {
|
||||
// Rollback to the savepoint for this type
|
||||
await localConnection.query(`ROLLBACK TO SAVEPOINT category_type_${type}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Commit the entire transaction - we'll do this even if we have skipped categories
|
||||
await localConnection.query('COMMIT');
|
||||
|
||||
// Update sync status
|
||||
await localConnection.query(`
|
||||
INSERT INTO sync_status (table_name, last_sync_timestamp)
|
||||
VALUES ('categories', NOW())
|
||||
ON CONFLICT (table_name) DO UPDATE SET
|
||||
last_sync_timestamp = NOW()
|
||||
`);
|
||||
|
||||
// Re-enable the trigger
|
||||
await localConnection.query('ALTER TABLE categories ENABLE TRIGGER update_categories_updated_at');
|
||||
|
||||
outputProgress({
|
||||
status: "complete",
|
||||
operation: "Categories import completed",
|
||||
current: totalInserted + totalUpdated,
|
||||
total: totalInserted + totalUpdated,
|
||||
duration: formatElapsedTime(startTime),
|
||||
warnings: skippedCategories.length > 0 ? {
|
||||
message: "Some categories were skipped due to missing parents",
|
||||
skippedCategories
|
||||
} : undefined
|
||||
});
|
||||
|
||||
return {
|
||||
status: "complete",
|
||||
recordsAdded: totalInserted,
|
||||
recordsUpdated: totalUpdated,
|
||||
totalRecords: totalInserted + totalUpdated,
|
||||
warnings: skippedCategories.length > 0 ? {
|
||||
message: "Some categories were skipped due to missing parents",
|
||||
skippedCategories
|
||||
} : undefined
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("Error importing categories:", error);
|
||||
|
||||
// Only rollback if we haven't committed yet
|
||||
try {
|
||||
await localConnection.query('ROLLBACK');
|
||||
|
||||
// Make sure we re-enable the trigger even if there was an error
|
||||
await localConnection.query('ALTER TABLE categories ENABLE TRIGGER update_categories_updated_at');
|
||||
} catch (rollbackError) {
|
||||
console.error("Error during rollback:", rollbackError);
|
||||
}
|
||||
|
||||
outputProgress({
|
||||
status: "error",
|
||||
operation: "Categories import failed",
|
||||
error: error.message
|
||||
});
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = importCategories;
|
||||
@@ -1,779 +0,0 @@
|
||||
const { outputProgress, formatElapsedTime, estimateRemaining, calculateRate } = require('../metrics-new/utils/progress');
|
||||
const { importMissingProducts, setupTemporaryTables, cleanupTemporaryTables, materializeCalculations } = require('./products');
|
||||
|
||||
/**
|
||||
* Imports orders from a production MySQL database to a local PostgreSQL database.
|
||||
* It can run in two modes:
|
||||
* 1. Incremental update mode (default): Only fetch orders that have changed since the last sync time.
|
||||
* 2. Full update mode: Fetch all eligible orders within the last 5 years regardless of timestamp.
|
||||
*
|
||||
* @param {object} prodConnection - A MySQL connection to production DB (MySQL 5.7).
|
||||
* @param {object} localConnection - A MySQL connection to local DB (MySQL 8.0).
|
||||
* @param {boolean} incrementalUpdate - Set to false for a full sync; true for incremental.
|
||||
*
|
||||
* @returns {object} Information about the sync operation.
|
||||
*/
|
||||
async function importOrders(prodConnection, localConnection, incrementalUpdate = true) {
|
||||
const startTime = Date.now();
|
||||
const skippedOrders = new Set();
|
||||
const missingProducts = new Set();
|
||||
let recordsAdded = 0;
|
||||
let recordsUpdated = 0;
|
||||
let processedCount = 0;
|
||||
let importedCount = 0;
|
||||
let totalOrderItems = 0;
|
||||
let totalUniqueOrders = 0;
|
||||
let cumulativeProcessedOrders = 0;
|
||||
|
||||
try {
|
||||
// Get last sync info - NOT in a transaction anymore
|
||||
const [syncInfo] = await localConnection.query(
|
||||
"SELECT last_sync_timestamp FROM sync_status WHERE table_name = 'orders'"
|
||||
);
|
||||
const lastSyncTime = syncInfo?.rows?.[0]?.last_sync_timestamp || '1970-01-01';
|
||||
|
||||
console.log('Orders: Using last sync time:', lastSyncTime);
|
||||
|
||||
// First get count of order items - Keep MySQL compatible for production
|
||||
const [[{ total }]] = await prodConnection.query(`
|
||||
SELECT COUNT(*) as total
|
||||
FROM order_items oi
|
||||
JOIN _order o ON oi.order_id = o.order_id
|
||||
WHERE o.order_status >= 15
|
||||
AND o.date_placed >= DATE_SUB(CURRENT_DATE, INTERVAL ${incrementalUpdate ? '1' : '5'} YEAR)
|
||||
AND o.date_placed IS NOT NULL
|
||||
${incrementalUpdate ? `
|
||||
AND (
|
||||
o.stamp > ?
|
||||
OR oi.stamp > ?
|
||||
OR EXISTS (
|
||||
SELECT 1 FROM order_discount_items odi
|
||||
WHERE odi.order_id = o.order_id
|
||||
AND odi.pid = oi.prod_pid
|
||||
)
|
||||
OR EXISTS (
|
||||
SELECT 1 FROM order_tax_info oti
|
||||
JOIN order_tax_info_products otip ON oti.taxinfo_id = otip.taxinfo_id
|
||||
WHERE oti.order_id = o.order_id
|
||||
AND otip.pid = oi.prod_pid
|
||||
AND oti.stamp > ?
|
||||
)
|
||||
)
|
||||
` : ''}
|
||||
`, incrementalUpdate ? [lastSyncTime, lastSyncTime, lastSyncTime] : []);
|
||||
|
||||
totalOrderItems = total;
|
||||
console.log('Orders: Found changes:', totalOrderItems);
|
||||
|
||||
// Get order items - Keep MySQL compatible for production
|
||||
console.log('Orders: Starting MySQL query...');
|
||||
const [orderItems] = await prodConnection.query(`
|
||||
SELECT
|
||||
oi.order_id,
|
||||
oi.prod_pid,
|
||||
COALESCE(NULLIF(TRIM(oi.prod_itemnumber), ''), 'NO-SKU') as SKU,
|
||||
oi.prod_price as price,
|
||||
oi.qty_ordered as quantity,
|
||||
COALESCE(oi.prod_price_reg - oi.prod_price, 0) as base_discount,
|
||||
oi.stamp as last_modified
|
||||
FROM order_items oi
|
||||
JOIN _order o ON oi.order_id = o.order_id
|
||||
WHERE o.order_status >= 15
|
||||
AND o.date_placed >= DATE_SUB(CURRENT_DATE, INTERVAL ${incrementalUpdate ? '1' : '5'} YEAR)
|
||||
AND o.date_placed IS NOT NULL
|
||||
${incrementalUpdate ? `
|
||||
AND (
|
||||
o.stamp > ?
|
||||
OR oi.stamp > ?
|
||||
OR EXISTS (
|
||||
SELECT 1 FROM order_discount_items odi
|
||||
WHERE odi.order_id = o.order_id
|
||||
AND odi.pid = oi.prod_pid
|
||||
)
|
||||
OR EXISTS (
|
||||
SELECT 1 FROM order_tax_info oti
|
||||
JOIN order_tax_info_products otip ON oti.taxinfo_id = otip.taxinfo_id
|
||||
WHERE oti.order_id = o.order_id
|
||||
AND otip.pid = oi.prod_pid
|
||||
AND oti.stamp > ?
|
||||
)
|
||||
)
|
||||
` : ''}
|
||||
`, incrementalUpdate ? [lastSyncTime, lastSyncTime, lastSyncTime] : []);
|
||||
|
||||
console.log('Orders: Found', orderItems.length, 'order items to process');
|
||||
|
||||
// Create tables in PostgreSQL for data processing
|
||||
// Start a transaction just for creating the temp tables
|
||||
await localConnection.beginTransaction();
|
||||
try {
|
||||
await localConnection.query(`
|
||||
DROP TABLE IF EXISTS temp_order_items;
|
||||
DROP TABLE IF EXISTS temp_order_meta;
|
||||
DROP TABLE IF EXISTS temp_order_discounts;
|
||||
DROP TABLE IF EXISTS temp_order_taxes;
|
||||
DROP TABLE IF EXISTS temp_order_costs;
|
||||
DROP TABLE IF EXISTS temp_main_discounts;
|
||||
DROP TABLE IF EXISTS temp_item_discounts;
|
||||
|
||||
CREATE TEMP TABLE temp_order_items (
|
||||
order_id INTEGER NOT NULL,
|
||||
pid INTEGER NOT NULL,
|
||||
sku TEXT NOT NULL,
|
||||
price NUMERIC(14, 4) NOT NULL,
|
||||
quantity INTEGER NOT NULL,
|
||||
base_discount NUMERIC(14, 4) DEFAULT 0,
|
||||
PRIMARY KEY (order_id, pid)
|
||||
);
|
||||
|
||||
CREATE TEMP TABLE temp_order_meta (
|
||||
order_id INTEGER NOT NULL,
|
||||
date TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
customer TEXT NOT NULL,
|
||||
customer_name TEXT NOT NULL,
|
||||
status TEXT,
|
||||
canceled BOOLEAN,
|
||||
summary_discount NUMERIC(14, 4) DEFAULT 0.0000,
|
||||
summary_subtotal NUMERIC(14, 4) DEFAULT 0.0000,
|
||||
summary_discount_subtotal NUMERIC(14, 4) DEFAULT 0.0000,
|
||||
PRIMARY KEY (order_id)
|
||||
);
|
||||
|
||||
CREATE TEMP TABLE temp_order_discounts (
|
||||
order_id INTEGER NOT NULL,
|
||||
pid INTEGER NOT NULL,
|
||||
discount NUMERIC(14, 4) NOT NULL,
|
||||
PRIMARY KEY (order_id, pid)
|
||||
);
|
||||
|
||||
CREATE TEMP TABLE temp_main_discounts (
|
||||
order_id INTEGER NOT NULL,
|
||||
discount_id INTEGER NOT NULL,
|
||||
discount_amount_subtotal NUMERIC(14, 4) DEFAULT 0.0000,
|
||||
PRIMARY KEY (order_id, discount_id)
|
||||
);
|
||||
|
||||
CREATE TEMP TABLE temp_item_discounts (
|
||||
order_id INTEGER NOT NULL,
|
||||
pid INTEGER NOT NULL,
|
||||
discount_id INTEGER NOT NULL,
|
||||
amount NUMERIC(14, 4) NOT NULL,
|
||||
PRIMARY KEY (order_id, pid, discount_id)
|
||||
);
|
||||
|
||||
CREATE TEMP TABLE temp_order_taxes (
|
||||
order_id INTEGER NOT NULL,
|
||||
pid INTEGER NOT NULL,
|
||||
tax NUMERIC(14, 4) NOT NULL,
|
||||
PRIMARY KEY (order_id, pid)
|
||||
);
|
||||
|
||||
CREATE TEMP TABLE temp_order_costs (
|
||||
order_id INTEGER NOT NULL,
|
||||
pid INTEGER NOT NULL,
|
||||
costeach NUMERIC(14, 4) DEFAULT 0.0000,
|
||||
PRIMARY KEY (order_id, pid)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_temp_order_items_pid ON temp_order_items(pid);
|
||||
CREATE INDEX idx_temp_order_meta_order_id ON temp_order_meta(order_id);
|
||||
CREATE INDEX idx_temp_order_discounts_order_pid ON temp_order_discounts(order_id, pid);
|
||||
CREATE INDEX idx_temp_order_taxes_order_pid ON temp_order_taxes(order_id, pid);
|
||||
CREATE INDEX idx_temp_order_costs_order_pid ON temp_order_costs(order_id, pid);
|
||||
CREATE INDEX idx_temp_main_discounts_discount_id ON temp_main_discounts(discount_id);
|
||||
CREATE INDEX idx_temp_item_discounts_order_pid ON temp_item_discounts(order_id, pid);
|
||||
CREATE INDEX idx_temp_item_discounts_discount_id ON temp_item_discounts(discount_id);
|
||||
`);
|
||||
await localConnection.commit();
|
||||
} catch (error) {
|
||||
await localConnection.rollback();
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Insert order items in batches - each batch gets its own transaction
|
||||
for (let i = 0; i < orderItems.length; i += 5000) {
|
||||
await localConnection.beginTransaction();
|
||||
try {
|
||||
const batch = orderItems.slice(i, Math.min(i + 5000, orderItems.length));
|
||||
const placeholders = batch.map((_, idx) =>
|
||||
`($${idx * 6 + 1}, $${idx * 6 + 2}, $${idx * 6 + 3}, $${idx * 6 + 4}, $${idx * 6 + 5}, $${idx * 6 + 6})`
|
||||
).join(",");
|
||||
const values = batch.flatMap(item => [
|
||||
item.order_id, item.prod_pid, item.SKU, item.price, item.quantity, item.base_discount
|
||||
]);
|
||||
|
||||
await localConnection.query(`
|
||||
INSERT INTO temp_order_items (order_id, pid, sku, price, quantity, base_discount)
|
||||
VALUES ${placeholders}
|
||||
ON CONFLICT (order_id, pid) DO UPDATE SET
|
||||
sku = EXCLUDED.sku,
|
||||
price = EXCLUDED.price,
|
||||
quantity = EXCLUDED.quantity,
|
||||
base_discount = EXCLUDED.base_discount
|
||||
`, values);
|
||||
|
||||
await localConnection.commit();
|
||||
|
||||
processedCount = i + batch.length;
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Orders import",
|
||||
message: `Loading order items: ${processedCount} of ${totalOrderItems}`,
|
||||
current: processedCount,
|
||||
total: totalOrderItems,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalOrderItems),
|
||||
rate: calculateRate(startTime, processedCount)
|
||||
});
|
||||
} catch (error) {
|
||||
await localConnection.rollback();
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Get unique order IDs
|
||||
const orderIds = [...new Set(orderItems.map(item => item.order_id))];
|
||||
totalUniqueOrders = orderIds.length;
|
||||
console.log('Orders: Processing', totalUniqueOrders, 'unique orders');
|
||||
|
||||
// Reset processed count for order processing phase
|
||||
processedCount = 0;
|
||||
|
||||
// Process metadata, discounts, taxes, and costs in parallel
|
||||
const METADATA_BATCH_SIZE = 2000;
|
||||
const PG_BATCH_SIZE = 200;
|
||||
|
||||
// Add a helper function for title case conversion
|
||||
function toTitleCase(str) {
|
||||
if (!str) return '';
|
||||
return str.toLowerCase().split(' ').map(word => {
|
||||
return word.charAt(0).toUpperCase() + word.slice(1);
|
||||
}).join(' ');
|
||||
}
|
||||
|
||||
const processMetadataBatch = async (batchIds) => {
|
||||
const [orders] = await prodConnection.query(`
|
||||
SELECT
|
||||
o.order_id,
|
||||
o.date_placed as date,
|
||||
o.order_cid as customer,
|
||||
CONCAT(COALESCE(u.firstname, ''), ' ', COALESCE(u.lastname, '')) as customer_name,
|
||||
o.order_status as status,
|
||||
CASE WHEN o.date_cancelled != '0000-00-00 00:00:00' THEN 1 ELSE 0 END as canceled,
|
||||
o.summary_discount,
|
||||
o.summary_subtotal,
|
||||
o.summary_discount_subtotal
|
||||
FROM _order o
|
||||
LEFT JOIN users u ON o.order_cid = u.cid
|
||||
WHERE o.order_id IN (?)
|
||||
`, [batchIds]);
|
||||
|
||||
// Process in sub-batches for PostgreSQL
|
||||
await localConnection.beginTransaction();
|
||||
try {
|
||||
for (let j = 0; j < orders.length; j += PG_BATCH_SIZE) {
|
||||
const subBatch = orders.slice(j, j + PG_BATCH_SIZE);
|
||||
if (subBatch.length === 0) continue;
|
||||
|
||||
const placeholders = subBatch.map((_, idx) =>
|
||||
`($${idx * 9 + 1}, $${idx * 9 + 2}, $${idx * 9 + 3}, $${idx * 9 + 4}, $${idx * 9 + 5}, $${idx * 9 + 6}, $${idx * 9 + 7}, $${idx * 9 + 8}, $${idx * 9 + 9})`
|
||||
).join(",");
|
||||
|
||||
const values = subBatch.flatMap(order => [
|
||||
order.order_id,
|
||||
new Date(order.date), // Convert to TIMESTAMP WITH TIME ZONE
|
||||
order.customer,
|
||||
toTitleCase(order.customer_name) || '',
|
||||
order.status.toString(), // Convert status to TEXT
|
||||
order.canceled,
|
||||
order.summary_discount || 0,
|
||||
order.summary_subtotal || 0,
|
||||
order.summary_discount_subtotal || 0
|
||||
]);
|
||||
|
||||
await localConnection.query(`
|
||||
INSERT INTO temp_order_meta (
|
||||
order_id, date, customer, customer_name, status, canceled,
|
||||
summary_discount, summary_subtotal, summary_discount_subtotal
|
||||
)
|
||||
VALUES ${placeholders}
|
||||
ON CONFLICT (order_id) DO UPDATE SET
|
||||
date = EXCLUDED.date,
|
||||
customer = EXCLUDED.customer,
|
||||
customer_name = EXCLUDED.customer_name,
|
||||
status = EXCLUDED.status,
|
||||
canceled = EXCLUDED.canceled,
|
||||
summary_discount = EXCLUDED.summary_discount,
|
||||
summary_subtotal = EXCLUDED.summary_subtotal,
|
||||
summary_discount_subtotal = EXCLUDED.summary_discount_subtotal
|
||||
`, values);
|
||||
}
|
||||
await localConnection.commit();
|
||||
} catch (error) {
|
||||
await localConnection.rollback();
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
|
||||
const processDiscountsBatch = async (batchIds) => {
|
||||
// First, load main discount records
|
||||
const [mainDiscounts] = await prodConnection.query(`
|
||||
SELECT order_id, discount_id, discount_amount_subtotal
|
||||
FROM order_discounts
|
||||
WHERE order_id IN (?)
|
||||
`, [batchIds]);
|
||||
|
||||
if (mainDiscounts.length > 0) {
|
||||
await localConnection.beginTransaction();
|
||||
try {
|
||||
for (let j = 0; j < mainDiscounts.length; j += PG_BATCH_SIZE) {
|
||||
const subBatch = mainDiscounts.slice(j, j + PG_BATCH_SIZE);
|
||||
if (subBatch.length === 0) continue;
|
||||
|
||||
const placeholders = subBatch.map((_, idx) =>
|
||||
`($${idx * 3 + 1}, $${idx * 3 + 2}, $${idx * 3 + 3})`
|
||||
).join(",");
|
||||
|
||||
const values = subBatch.flatMap(d => [
|
||||
d.order_id,
|
||||
d.discount_id,
|
||||
d.discount_amount_subtotal || 0
|
||||
]);
|
||||
|
||||
await localConnection.query(`
|
||||
INSERT INTO temp_main_discounts (order_id, discount_id, discount_amount_subtotal)
|
||||
VALUES ${placeholders}
|
||||
ON CONFLICT (order_id, discount_id) DO UPDATE SET
|
||||
discount_amount_subtotal = EXCLUDED.discount_amount_subtotal
|
||||
`, values);
|
||||
}
|
||||
await localConnection.commit();
|
||||
} catch (error) {
|
||||
await localConnection.rollback();
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Then, load item discount records
|
||||
const [discounts] = await prodConnection.query(`
|
||||
SELECT order_id, pid, discount_id, amount
|
||||
FROM order_discount_items
|
||||
WHERE order_id IN (?)
|
||||
`, [batchIds]);
|
||||
|
||||
if (discounts.length === 0) return;
|
||||
|
||||
// Process in memory to handle potential duplicates
|
||||
const discountMap = new Map();
|
||||
for (const d of discounts) {
|
||||
const key = `${d.order_id}-${d.pid}-${d.discount_id}`;
|
||||
discountMap.set(key, d);
|
||||
}
|
||||
|
||||
const uniqueDiscounts = Array.from(discountMap.values());
|
||||
|
||||
await localConnection.beginTransaction();
|
||||
try {
|
||||
for (let j = 0; j < uniqueDiscounts.length; j += PG_BATCH_SIZE) {
|
||||
const subBatch = uniqueDiscounts.slice(j, j + PG_BATCH_SIZE);
|
||||
if (subBatch.length === 0) continue;
|
||||
|
||||
const placeholders = subBatch.map((_, idx) =>
|
||||
`($${idx * 4 + 1}, $${idx * 4 + 2}, $${idx * 4 + 3}, $${idx * 4 + 4})`
|
||||
).join(",");
|
||||
|
||||
const values = subBatch.flatMap(d => [
|
||||
d.order_id,
|
||||
d.pid,
|
||||
d.discount_id,
|
||||
d.amount || 0
|
||||
]);
|
||||
|
||||
await localConnection.query(`
|
||||
INSERT INTO temp_item_discounts (order_id, pid, discount_id, amount)
|
||||
VALUES ${placeholders}
|
||||
ON CONFLICT (order_id, pid, discount_id) DO UPDATE SET
|
||||
amount = EXCLUDED.amount
|
||||
`, values);
|
||||
}
|
||||
|
||||
// Create aggregated view with a simpler, safer query that avoids duplicates
|
||||
await localConnection.query(`
|
||||
TRUNCATE temp_order_discounts;
|
||||
|
||||
INSERT INTO temp_order_discounts (order_id, pid, discount)
|
||||
SELECT order_id, pid, SUM(amount) as discount
|
||||
FROM temp_item_discounts
|
||||
GROUP BY order_id, pid
|
||||
`);
|
||||
|
||||
await localConnection.commit();
|
||||
} catch (error) {
|
||||
await localConnection.rollback();
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
|
||||
const processTaxesBatch = async (batchIds) => {
|
||||
// Optimized tax query to avoid subquery
|
||||
const [taxes] = await prodConnection.query(`
|
||||
SELECT oti.order_id, otip.pid, otip.item_taxes_to_collect as tax
|
||||
FROM (
|
||||
SELECT order_id, MAX(taxinfo_id) as latest_taxinfo_id
|
||||
FROM order_tax_info
|
||||
WHERE order_id IN (?)
|
||||
GROUP BY order_id
|
||||
) latest_info
|
||||
JOIN order_tax_info oti ON oti.order_id = latest_info.order_id
|
||||
AND oti.taxinfo_id = latest_info.latest_taxinfo_id
|
||||
JOIN order_tax_info_products otip ON oti.taxinfo_id = otip.taxinfo_id
|
||||
`, [batchIds]);
|
||||
|
||||
if (taxes.length === 0) return;
|
||||
|
||||
await localConnection.beginTransaction();
|
||||
try {
|
||||
for (let j = 0; j < taxes.length; j += PG_BATCH_SIZE) {
|
||||
const subBatch = taxes.slice(j, j + PG_BATCH_SIZE);
|
||||
if (subBatch.length === 0) continue;
|
||||
|
||||
const placeholders = subBatch.map((_, idx) =>
|
||||
`($${idx * 3 + 1}, $${idx * 3 + 2}, $${idx * 3 + 3})`
|
||||
).join(",");
|
||||
|
||||
const values = subBatch.flatMap(t => [
|
||||
t.order_id,
|
||||
t.pid,
|
||||
t.tax || 0
|
||||
]);
|
||||
|
||||
await localConnection.query(`
|
||||
INSERT INTO temp_order_taxes (order_id, pid, tax)
|
||||
VALUES ${placeholders}
|
||||
ON CONFLICT (order_id, pid) DO UPDATE SET
|
||||
tax = EXCLUDED.tax
|
||||
`, values);
|
||||
}
|
||||
await localConnection.commit();
|
||||
} catch (error) {
|
||||
await localConnection.rollback();
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
|
||||
const processCostsBatch = async (batchIds) => {
|
||||
// Modified query to ensure one row per order_id/pid by using a subquery
|
||||
const [costs] = await prodConnection.query(`
|
||||
SELECT
|
||||
oc.orderid as order_id,
|
||||
oc.pid,
|
||||
oc.costeach
|
||||
FROM order_costs oc
|
||||
INNER JOIN (
|
||||
SELECT
|
||||
orderid,
|
||||
pid,
|
||||
MAX(id) as max_id
|
||||
FROM order_costs
|
||||
WHERE orderid IN (?)
|
||||
AND pending = 0
|
||||
GROUP BY orderid, pid
|
||||
) latest ON oc.orderid = latest.orderid AND oc.pid = latest.pid AND oc.id = latest.max_id
|
||||
`, [batchIds]);
|
||||
|
||||
if (costs.length === 0) return;
|
||||
|
||||
await localConnection.beginTransaction();
|
||||
try {
|
||||
for (let j = 0; j < costs.length; j += PG_BATCH_SIZE) {
|
||||
const subBatch = costs.slice(j, j + PG_BATCH_SIZE);
|
||||
if (subBatch.length === 0) continue;
|
||||
|
||||
const placeholders = subBatch.map((_, idx) =>
|
||||
`($${idx * 3 + 1}, $${idx * 3 + 2}, $${idx * 3 + 3})`
|
||||
).join(",");
|
||||
|
||||
const values = subBatch.flatMap(c => [
|
||||
c.order_id,
|
||||
c.pid,
|
||||
c.costeach || 0
|
||||
]);
|
||||
|
||||
await localConnection.query(`
|
||||
INSERT INTO temp_order_costs (order_id, pid, costeach)
|
||||
VALUES ${placeholders}
|
||||
ON CONFLICT (order_id, pid) DO UPDATE SET
|
||||
costeach = EXCLUDED.costeach
|
||||
`, values);
|
||||
}
|
||||
await localConnection.commit();
|
||||
} catch (error) {
|
||||
await localConnection.rollback();
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
|
||||
// Process all data types SEQUENTIALLY for each batch - not in parallel
|
||||
for (let i = 0; i < orderIds.length; i += METADATA_BATCH_SIZE) {
|
||||
const batchIds = orderIds.slice(i, i + METADATA_BATCH_SIZE);
|
||||
|
||||
// Run these sequentially instead of in parallel to avoid transaction conflicts
|
||||
await processMetadataBatch(batchIds);
|
||||
await processDiscountsBatch(batchIds);
|
||||
await processTaxesBatch(batchIds);
|
||||
await processCostsBatch(batchIds);
|
||||
|
||||
processedCount = i + batchIds.length;
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Orders import",
|
||||
message: `Loading order data: ${processedCount} of ${totalUniqueOrders}`,
|
||||
current: processedCount,
|
||||
total: totalUniqueOrders,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalUniqueOrders),
|
||||
rate: calculateRate(startTime, processedCount)
|
||||
});
|
||||
}
|
||||
|
||||
// Pre-check all products at once
|
||||
const allOrderPids = [...new Set(orderItems.map(item => item.prod_pid))];
|
||||
console.log('Orders: Checking', allOrderPids.length, 'unique products');
|
||||
|
||||
const [existingProducts] = allOrderPids.length > 0 ? await localConnection.query(
|
||||
"SELECT pid FROM products WHERE pid = ANY($1::bigint[])",
|
||||
[allOrderPids]
|
||||
) : [[]];
|
||||
|
||||
const existingPids = new Set(existingProducts.rows.map(p => p.pid));
|
||||
|
||||
// Process in smaller batches
|
||||
for (let i = 0; i < orderIds.length; i += 2000) { // Increased from 1000 to 2000
|
||||
const batchIds = orderIds.slice(i, i + 2000);
|
||||
|
||||
// Get combined data for this batch in sub-batches
|
||||
const PG_BATCH_SIZE = 200; // Increased from 100 to 200
|
||||
for (let j = 0; j < batchIds.length; j += PG_BATCH_SIZE) {
|
||||
const subBatchIds = batchIds.slice(j, j + PG_BATCH_SIZE);
|
||||
|
||||
// Start a transaction for this sub-batch
|
||||
await localConnection.beginTransaction();
|
||||
try {
|
||||
const [orders] = await localConnection.query(`
|
||||
WITH order_totals AS (
|
||||
SELECT
|
||||
oi.order_id,
|
||||
oi.pid,
|
||||
-- Instead of using ARRAY_AGG which can cause duplicate issues, use SUM with a CASE
|
||||
SUM(CASE
|
||||
WHEN COALESCE(md.discount_amount_subtotal, 0) > 0 THEN id.amount
|
||||
ELSE 0
|
||||
END) as promo_discount_sum,
|
||||
COALESCE(ot.tax, 0) as total_tax,
|
||||
COALESCE(oc.costeach, oi.price * 0.5) as costeach
|
||||
FROM temp_order_items oi
|
||||
LEFT JOIN temp_item_discounts id ON oi.order_id = id.order_id AND oi.pid = id.pid
|
||||
LEFT JOIN temp_main_discounts md ON id.order_id = md.order_id AND id.discount_id = md.discount_id
|
||||
LEFT JOIN temp_order_taxes ot ON oi.order_id = ot.order_id AND oi.pid = ot.pid
|
||||
LEFT JOIN temp_order_costs oc ON oi.order_id = oc.order_id AND oi.pid = oc.pid
|
||||
WHERE oi.order_id = ANY($1)
|
||||
GROUP BY oi.order_id, oi.pid, ot.tax, oc.costeach
|
||||
)
|
||||
SELECT
|
||||
oi.order_id as order_number,
|
||||
oi.pid::bigint as pid,
|
||||
oi.sku,
|
||||
om.date,
|
||||
oi.price,
|
||||
oi.quantity,
|
||||
(
|
||||
-- Part 1: Sale Savings for the Line
|
||||
(oi.base_discount * oi.quantity)
|
||||
+
|
||||
-- Part 2: Prorated Points Discount (if applicable)
|
||||
CASE
|
||||
WHEN om.summary_discount_subtotal > 0 AND om.summary_subtotal > 0 THEN
|
||||
COALESCE(ROUND((om.summary_discount_subtotal * (oi.price * oi.quantity)) / NULLIF(om.summary_subtotal, 0), 4), 0)
|
||||
ELSE 0
|
||||
END
|
||||
+
|
||||
-- Part 3: Specific Item-Level Discount (only if parent discount affected subtotal)
|
||||
COALESCE(ot.promo_discount_sum, 0)
|
||||
)::NUMERIC(14, 4) as discount,
|
||||
COALESCE(ot.total_tax, 0)::NUMERIC(14, 4) as tax,
|
||||
false as tax_included,
|
||||
0 as shipping,
|
||||
om.customer,
|
||||
om.customer_name,
|
||||
om.status,
|
||||
om.canceled,
|
||||
COALESCE(ot.costeach, oi.price * 0.5)::NUMERIC(14, 4) as costeach
|
||||
FROM temp_order_items oi
|
||||
JOIN temp_order_meta om ON oi.order_id = om.order_id
|
||||
LEFT JOIN order_totals ot ON oi.order_id = ot.order_id AND oi.pid = ot.pid
|
||||
WHERE oi.order_id = ANY($1)
|
||||
ORDER BY oi.order_id, oi.pid
|
||||
`, [subBatchIds]);
|
||||
|
||||
// Filter orders and track missing products
|
||||
const validOrders = [];
|
||||
const processedOrderItems = new Set();
|
||||
const processedOrders = new Set();
|
||||
|
||||
for (const order of orders.rows) {
|
||||
if (!existingPids.has(order.pid)) {
|
||||
missingProducts.add(order.pid);
|
||||
skippedOrders.add(order.order_number);
|
||||
continue;
|
||||
}
|
||||
validOrders.push(order);
|
||||
processedOrderItems.add(`${order.order_number}-${order.pid}`);
|
||||
processedOrders.add(order.order_number);
|
||||
}
|
||||
|
||||
// Process valid orders in smaller sub-batches
|
||||
const FINAL_BATCH_SIZE = 100; // Increased from 50 to 100
|
||||
for (let k = 0; k < validOrders.length; k += FINAL_BATCH_SIZE) {
|
||||
const subBatch = validOrders.slice(k, k + FINAL_BATCH_SIZE);
|
||||
|
||||
const placeholders = subBatch.map((_, idx) => {
|
||||
const base = idx * 15; // 15 columns including costeach
|
||||
return `($${base + 1}, $${base + 2}, $${base + 3}, $${base + 4}, $${base + 5}, $${base + 6}, $${base + 7}, $${base + 8}, $${base + 9}, $${base + 10}, $${base + 11}, $${base + 12}, $${base + 13}, $${base + 14}, $${base + 15})`;
|
||||
}).join(',');
|
||||
|
||||
const batchValues = subBatch.flatMap(o => [
|
||||
o.order_number,
|
||||
o.pid,
|
||||
o.sku || 'NO-SKU',
|
||||
o.date, // This is now a TIMESTAMP WITH TIME ZONE
|
||||
o.price,
|
||||
o.quantity,
|
||||
o.discount,
|
||||
o.tax,
|
||||
o.tax_included,
|
||||
o.shipping,
|
||||
o.customer,
|
||||
o.customer_name,
|
||||
o.status.toString(), // Convert status to TEXT
|
||||
o.canceled,
|
||||
o.costeach
|
||||
]);
|
||||
|
||||
const [result] = await localConnection.query(`
|
||||
WITH inserted_orders AS (
|
||||
INSERT INTO orders (
|
||||
order_number, pid, sku, date, price, quantity, discount,
|
||||
tax, tax_included, shipping, customer, customer_name,
|
||||
status, canceled, costeach
|
||||
)
|
||||
VALUES ${placeholders}
|
||||
ON CONFLICT (order_number, pid) DO UPDATE SET
|
||||
sku = EXCLUDED.sku,
|
||||
date = EXCLUDED.date,
|
||||
price = EXCLUDED.price,
|
||||
quantity = EXCLUDED.quantity,
|
||||
discount = EXCLUDED.discount,
|
||||
tax = EXCLUDED.tax,
|
||||
tax_included = EXCLUDED.tax_included,
|
||||
shipping = EXCLUDED.shipping,
|
||||
customer = EXCLUDED.customer,
|
||||
customer_name = EXCLUDED.customer_name,
|
||||
status = EXCLUDED.status,
|
||||
canceled = EXCLUDED.canceled,
|
||||
costeach = EXCLUDED.costeach
|
||||
WHERE -- Only update if at least one key field has changed
|
||||
orders.price IS DISTINCT FROM EXCLUDED.price OR
|
||||
orders.quantity IS DISTINCT FROM EXCLUDED.quantity OR
|
||||
orders.discount IS DISTINCT FROM EXCLUDED.discount OR
|
||||
orders.tax IS DISTINCT FROM EXCLUDED.tax OR
|
||||
orders.status IS DISTINCT FROM EXCLUDED.status OR
|
||||
orders.canceled IS DISTINCT FROM EXCLUDED.canceled OR
|
||||
orders.costeach IS DISTINCT FROM EXCLUDED.costeach OR
|
||||
orders.date IS DISTINCT FROM EXCLUDED.date
|
||||
RETURNING xmax = 0 as inserted
|
||||
)
|
||||
SELECT
|
||||
COUNT(*) FILTER (WHERE inserted) as inserted,
|
||||
COUNT(*) FILTER (WHERE NOT inserted) as updated
|
||||
FROM inserted_orders
|
||||
`, batchValues);
|
||||
|
||||
const { inserted, updated } = result.rows[0];
|
||||
recordsAdded += parseInt(inserted) || 0;
|
||||
recordsUpdated += parseInt(updated) || 0;
|
||||
importedCount += subBatch.length;
|
||||
}
|
||||
|
||||
await localConnection.commit();
|
||||
|
||||
cumulativeProcessedOrders += processedOrders.size;
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Orders import",
|
||||
message: `Importing orders: ${cumulativeProcessedOrders} of ${totalUniqueOrders}`,
|
||||
current: cumulativeProcessedOrders,
|
||||
total: totalUniqueOrders,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, cumulativeProcessedOrders, totalUniqueOrders),
|
||||
rate: calculateRate(startTime, cumulativeProcessedOrders)
|
||||
});
|
||||
} catch (error) {
|
||||
await localConnection.rollback();
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Start a transaction for updating sync status and dropping temp tables
|
||||
await localConnection.beginTransaction();
|
||||
try {
|
||||
// Update sync status
|
||||
await localConnection.query(`
|
||||
INSERT INTO sync_status (table_name, last_sync_timestamp)
|
||||
VALUES ('orders', NOW())
|
||||
ON CONFLICT (table_name) DO UPDATE SET
|
||||
last_sync_timestamp = NOW()
|
||||
`);
|
||||
|
||||
// Cleanup temporary tables
|
||||
await localConnection.query(`
|
||||
DROP TABLE IF EXISTS temp_order_items;
|
||||
DROP TABLE IF EXISTS temp_order_meta;
|
||||
DROP TABLE IF EXISTS temp_order_discounts;
|
||||
DROP TABLE IF EXISTS temp_order_taxes;
|
||||
DROP TABLE IF EXISTS temp_order_costs;
|
||||
DROP TABLE IF EXISTS temp_main_discounts;
|
||||
DROP TABLE IF EXISTS temp_item_discounts;
|
||||
`);
|
||||
|
||||
// Commit final transaction
|
||||
await localConnection.commit();
|
||||
} catch (error) {
|
||||
await localConnection.rollback();
|
||||
throw error;
|
||||
}
|
||||
|
||||
return {
|
||||
status: "complete",
|
||||
totalImported: Math.floor(importedCount) || 0,
|
||||
recordsAdded: parseInt(recordsAdded) || 0,
|
||||
recordsUpdated: parseInt(recordsUpdated) || 0,
|
||||
totalSkipped: skippedOrders.size || 0,
|
||||
missingProducts: missingProducts.size || 0,
|
||||
totalProcessed: orderItems.length, // Total order items in source
|
||||
incrementalUpdate,
|
||||
lastSyncTime,
|
||||
details: {
|
||||
uniqueOrdersProcessed: cumulativeProcessedOrders,
|
||||
totalOrderItems: orderItems.length,
|
||||
skippedDueToMissingProducts: skippedOrders.size,
|
||||
missingProductIds: Array.from(missingProducts).slice(0, 100) // First 100 for debugging
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("Error during orders import:", error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = importOrders;
|
||||
@@ -1,950 +0,0 @@
|
||||
const { outputProgress, formatElapsedTime, estimateRemaining, calculateRate } = require('../metrics-new/utils/progress');
|
||||
const BATCH_SIZE = 1000; // Smaller batch size for better progress tracking
|
||||
const MAX_RETRIES = 3;
|
||||
const RETRY_DELAY = 5000; // 5 seconds
|
||||
const dotenv = require("dotenv");
|
||||
const path = require("path");
|
||||
dotenv.config({ path: path.join(__dirname, "../../.env") });
|
||||
|
||||
// Utility functions
|
||||
const imageUrlBase = process.env.PRODUCT_IMAGE_URL_BASE || 'https://sbing.com/i/products/0000/';
|
||||
const getImageUrls = (pid, iid = 1) => {
|
||||
const paddedPid = pid.toString().padStart(6, '0');
|
||||
// Use padded PID only for the first 3 digits
|
||||
const prefix = paddedPid.slice(0, 3);
|
||||
// Use the actual pid for the rest of the URL
|
||||
const basePath = `${imageUrlBase}${prefix}/${pid}`;
|
||||
return {
|
||||
image: `${basePath}-t-${iid}.jpg`,
|
||||
image_175: `${basePath}-175x175-${iid}.jpg`,
|
||||
image_full: `${basePath}-o-${iid}.jpg`
|
||||
};
|
||||
};
|
||||
|
||||
// Add helper function for retrying operations with exponential backoff
|
||||
async function withRetry(operation, errorMessage) {
|
||||
let lastError;
|
||||
for (let attempt = 1; attempt <= MAX_RETRIES; attempt++) {
|
||||
try {
|
||||
return await operation();
|
||||
} catch (error) {
|
||||
lastError = error;
|
||||
console.error(`${errorMessage} (Attempt ${attempt}/${MAX_RETRIES}):`, error);
|
||||
if (attempt < MAX_RETRIES) {
|
||||
const backoffTime = RETRY_DELAY * Math.pow(2, attempt - 1);
|
||||
await new Promise(resolve => setTimeout(resolve, backoffTime));
|
||||
}
|
||||
}
|
||||
}
|
||||
throw lastError;
|
||||
}
|
||||
|
||||
// Add helper function at the top of the file
|
||||
function validateDate(mysqlDate) {
|
||||
if (!mysqlDate || mysqlDate === '0000-00-00' || mysqlDate === '0000-00-00 00:00:00') {
|
||||
return null;
|
||||
}
|
||||
// Check if the date is valid
|
||||
const date = new Date(mysqlDate);
|
||||
return isNaN(date.getTime()) ? null : mysqlDate;
|
||||
}
|
||||
|
||||
async function setupTemporaryTables(connection) {
|
||||
// Drop the table if it exists
|
||||
await connection.query('DROP TABLE IF EXISTS temp_products');
|
||||
|
||||
// Create the temporary table
|
||||
await connection.query(`
|
||||
CREATE TEMP TABLE temp_products (
|
||||
pid BIGINT NOT NULL,
|
||||
title TEXT,
|
||||
description TEXT,
|
||||
sku TEXT,
|
||||
stock_quantity INTEGER DEFAULT 0,
|
||||
preorder_count INTEGER DEFAULT 0,
|
||||
notions_inv_count INTEGER DEFAULT 0,
|
||||
price NUMERIC(14, 4) NOT NULL DEFAULT 0,
|
||||
regular_price NUMERIC(14, 4) NOT NULL DEFAULT 0,
|
||||
cost_price NUMERIC(14, 4),
|
||||
vendor TEXT,
|
||||
vendor_reference TEXT,
|
||||
notions_reference TEXT,
|
||||
brand TEXT,
|
||||
line TEXT,
|
||||
subline TEXT,
|
||||
artist TEXT,
|
||||
categories TEXT,
|
||||
created_at TIMESTAMP WITH TIME ZONE,
|
||||
first_received TIMESTAMP WITH TIME ZONE,
|
||||
landing_cost_price NUMERIC(14, 4),
|
||||
barcode TEXT,
|
||||
harmonized_tariff_code TEXT,
|
||||
updated_at TIMESTAMP WITH TIME ZONE,
|
||||
visible BOOLEAN,
|
||||
managing_stock BOOLEAN DEFAULT true,
|
||||
replenishable BOOLEAN,
|
||||
permalink TEXT,
|
||||
moq INTEGER DEFAULT 1,
|
||||
uom INTEGER DEFAULT 1,
|
||||
rating NUMERIC(14, 4),
|
||||
reviews INTEGER,
|
||||
weight NUMERIC(14, 4),
|
||||
length NUMERIC(14, 4),
|
||||
width NUMERIC(14, 4),
|
||||
height NUMERIC(14, 4),
|
||||
country_of_origin TEXT,
|
||||
location TEXT,
|
||||
total_sold INTEGER,
|
||||
baskets INTEGER,
|
||||
notifies INTEGER,
|
||||
date_last_sold TIMESTAMP WITH TIME ZONE,
|
||||
primary_iid INTEGER,
|
||||
image TEXT,
|
||||
image_175 TEXT,
|
||||
image_full TEXT,
|
||||
options TEXT,
|
||||
tags TEXT,
|
||||
needs_update BOOLEAN DEFAULT TRUE,
|
||||
PRIMARY KEY (pid)
|
||||
)`);
|
||||
|
||||
// Create the index
|
||||
await connection.query('CREATE INDEX idx_temp_products_needs_update ON temp_products (needs_update)');
|
||||
}
|
||||
|
||||
async function cleanupTemporaryTables(connection) {
|
||||
await connection.query('DROP TABLE IF EXISTS temp_products');
|
||||
}
|
||||
|
||||
async function importMissingProducts(prodConnection, localConnection, missingPids) {
|
||||
if (!missingPids || missingPids.length === 0) {
|
||||
return {
|
||||
status: "complete",
|
||||
recordsAdded: 0,
|
||||
message: "No missing products to import"
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
// Setup temporary tables
|
||||
await setupTemporaryTables(localConnection);
|
||||
|
||||
// Get product data from production - Keep MySQL compatible
|
||||
const [prodData] = await prodConnection.query(`
|
||||
SELECT
|
||||
p.pid,
|
||||
p.description AS title,
|
||||
p.notes AS description,
|
||||
p.itemnumber AS sku,
|
||||
p.date_created,
|
||||
p.datein AS first_received,
|
||||
p.location,
|
||||
p.upc AS barcode,
|
||||
p.harmonized_tariff_code,
|
||||
p.stamp AS updated_at,
|
||||
CASE WHEN si.show + si.buyable > 0 THEN 1 ELSE 0 END AS visible,
|
||||
CASE
|
||||
WHEN p.reorder < 0 THEN 0
|
||||
WHEN p.date_created >= DATE_SUB(CURRENT_DATE, INTERVAL 1 YEAR) THEN 1
|
||||
WHEN COALESCE(pnb.inventory, 0) > 0 THEN 1
|
||||
WHEN (
|
||||
(COALESCE(pls.date_sold, '0000-00-00') = '0000-00-00' OR pls.date_sold <= DATE_SUB(CURRENT_DATE, INTERVAL 5 YEAR))
|
||||
AND (p.datein = '0000-00-00 00:00:00' OR p.datein <= DATE_SUB(CURRENT_TIMESTAMP, INTERVAL 5 YEAR))
|
||||
AND (p.date_refill = '0000-00-00 00:00:00' OR p.date_refill <= DATE_SUB(CURRENT_TIMESTAMP, INTERVAL 5 YEAR))
|
||||
) THEN 0
|
||||
ELSE 1
|
||||
END AS replenishable,
|
||||
COALESCE(si.available_local, 0) as stock_quantity,
|
||||
0 as pending_qty,
|
||||
COALESCE(ci.onpreorder, 0) as preorder_count,
|
||||
COALESCE(pnb.inventory, 0) as notions_inv_count,
|
||||
COALESCE(pcp.price_each, 0) as price,
|
||||
COALESCE(p.sellingprice, 0) AS regular_price,
|
||||
CASE
|
||||
WHEN EXISTS (SELECT 1 FROM product_inventory WHERE pid = p.pid AND count > 0)
|
||||
THEN (
|
||||
SELECT ROUND(SUM(costeach * count) / SUM(count), 5)
|
||||
FROM product_inventory
|
||||
WHERE pid = p.pid AND count > 0
|
||||
)
|
||||
ELSE (SELECT costeach FROM product_inventory WHERE pid = p.pid ORDER BY daterec DESC LIMIT 1)
|
||||
END AS cost_price,
|
||||
NULL as landing_cost_price,
|
||||
s.companyname AS vendor,
|
||||
CASE
|
||||
WHEN s.companyname = 'Notions' THEN sid.notions_itemnumber
|
||||
ELSE sid.supplier_itemnumber
|
||||
END AS vendor_reference,
|
||||
sid.notions_itemnumber AS notions_reference,
|
||||
CONCAT('https://www.acherryontop.com/shop/product/', p.pid) AS permalink,
|
||||
pc1.name AS brand,
|
||||
pc2.name AS line,
|
||||
pc3.name AS subline,
|
||||
pc4.name AS artist,
|
||||
COALESCE(CASE
|
||||
WHEN sid.supplier_id = 92 THEN sid.notions_qty_per_unit
|
||||
ELSE sid.supplier_qty_per_unit
|
||||
END, sid.notions_qty_per_unit) AS moq,
|
||||
p.rating,
|
||||
p.rating_votes AS reviews,
|
||||
p.weight,
|
||||
p.length,
|
||||
p.width,
|
||||
p.height,
|
||||
p.country_of_origin,
|
||||
(SELECT COUNT(*) FROM mybasket mb WHERE mb.item = p.pid AND mb.qty > 0) AS baskets,
|
||||
(SELECT COUNT(*) FROM product_notify pn WHERE pn.pid = p.pid) AS notifies,
|
||||
(SELECT COALESCE(SUM(oi.qty_ordered), 0)
|
||||
FROM order_items oi
|
||||
JOIN _order o ON oi.order_id = o.order_id
|
||||
WHERE oi.prod_pid = p.pid AND o.order_status >= 20) AS total_sold,
|
||||
pls.date_sold as date_last_sold,
|
||||
(SELECT iid FROM product_images WHERE pid = p.pid AND \`order\` = 255 LIMIT 1) AS primary_iid,
|
||||
GROUP_CONCAT(DISTINCT CASE
|
||||
WHEN pc.cat_id IS NOT NULL
|
||||
AND pc.type IN (10, 20, 11, 21, 12, 13)
|
||||
AND pci.cat_id NOT IN (16, 17)
|
||||
THEN pci.cat_id
|
||||
END) as category_ids
|
||||
FROM products p
|
||||
LEFT JOIN shop_inventory si ON p.pid = si.pid AND si.store = 0
|
||||
LEFT JOIN current_inventory ci ON p.pid = ci.pid
|
||||
LEFT JOIN product_notions_b2b pnb ON p.pid = pnb.pid
|
||||
LEFT JOIN product_current_prices pcp ON p.pid = pcp.pid AND pcp.active = 1
|
||||
LEFT JOIN supplier_item_data sid ON p.pid = sid.pid
|
||||
LEFT JOIN suppliers s ON sid.supplier_id = s.supplierid
|
||||
LEFT JOIN product_category_index pci ON p.pid = pci.pid
|
||||
LEFT JOIN product_categories pc ON pci.cat_id = pc.cat_id
|
||||
LEFT JOIN product_categories pc1 ON p.company = pc1.cat_id
|
||||
LEFT JOIN product_categories pc2 ON p.line = pc2.cat_id
|
||||
LEFT JOIN product_categories pc3 ON p.subline = pc3.cat_id
|
||||
LEFT JOIN product_categories pc4 ON p.artist = pc4.cat_id
|
||||
LEFT JOIN product_last_sold pls ON p.pid = pls.pid
|
||||
WHERE p.pid IN (?)
|
||||
GROUP BY p.pid
|
||||
`, [missingPids]);
|
||||
|
||||
if (!prodData || prodData.length === 0) {
|
||||
return {
|
||||
status: "complete",
|
||||
recordsAdded: 0,
|
||||
message: "No products found in production database"
|
||||
};
|
||||
}
|
||||
|
||||
// Process in batches
|
||||
let recordsAdded = 0;
|
||||
for (let i = 0; i < prodData.length; i += BATCH_SIZE) {
|
||||
const batch = prodData.slice(i, i + BATCH_SIZE);
|
||||
|
||||
const placeholders = batch.map((_, idx) => {
|
||||
const base = idx * 48; // 48 columns
|
||||
return `(${Array.from({ length: 48 }, (_, i) => `$${base + i + 1}`).join(', ')})`;
|
||||
}).join(',');
|
||||
|
||||
const values = batch.flatMap(row => {
|
||||
const imageUrls = getImageUrls(row.pid, row.primary_iid || 1);
|
||||
return [
|
||||
row.pid,
|
||||
row.title,
|
||||
row.description,
|
||||
row.sku || '',
|
||||
row.stock_quantity > 5000 ? 0 : Math.max(0, row.stock_quantity),
|
||||
row.preorder_count,
|
||||
row.notions_inv_count,
|
||||
row.price,
|
||||
row.regular_price,
|
||||
row.cost_price,
|
||||
row.vendor,
|
||||
row.vendor_reference,
|
||||
row.notions_reference,
|
||||
row.brand,
|
||||
row.line,
|
||||
row.subline,
|
||||
row.artist,
|
||||
row.category_ids,
|
||||
validateDate(row.date_created),
|
||||
validateDate(row.first_received),
|
||||
row.landing_cost_price,
|
||||
row.barcode,
|
||||
row.harmonized_tariff_code,
|
||||
validateDate(row.updated_at),
|
||||
row.visible,
|
||||
true,
|
||||
row.replenishable,
|
||||
row.permalink,
|
||||
Math.max(1, Math.round(row.moq || 1)),
|
||||
1,
|
||||
row.rating,
|
||||
row.reviews,
|
||||
row.weight,
|
||||
row.length,
|
||||
row.width,
|
||||
row.height,
|
||||
row.country_of_origin,
|
||||
row.location,
|
||||
row.total_sold,
|
||||
row.baskets,
|
||||
row.notifies,
|
||||
validateDate(row.date_last_sold),
|
||||
row.primary_iid,
|
||||
imageUrls.image,
|
||||
imageUrls.image_175,
|
||||
imageUrls.image_full,
|
||||
null,
|
||||
null
|
||||
];
|
||||
});
|
||||
|
||||
const [result] = await localConnection.query(`
|
||||
WITH inserted_products AS (
|
||||
INSERT INTO products (
|
||||
pid, title, description, sku, stock_quantity, preorder_count, notions_inv_count,
|
||||
price, regular_price, cost_price, vendor, vendor_reference, notions_reference,
|
||||
brand, line, subline, artist, categories, created_at, first_received,
|
||||
landing_cost_price, barcode, harmonized_tariff_code, updated_at, visible,
|
||||
managing_stock, replenishable, permalink, moq, uom, rating, reviews,
|
||||
weight, length, width, height, country_of_origin, location, total_sold,
|
||||
baskets, notifies, date_last_sold, primary_iid, image, image_175, image_full, options, tags
|
||||
)
|
||||
VALUES ${placeholders}
|
||||
ON CONFLICT (pid) DO NOTHING
|
||||
RETURNING pid
|
||||
)
|
||||
SELECT COUNT(*) as inserted FROM inserted_products
|
||||
`, values);
|
||||
|
||||
recordsAdded += parseInt(result.rows[0].inserted, 10) || 0;
|
||||
}
|
||||
|
||||
return {
|
||||
status: "complete",
|
||||
recordsAdded,
|
||||
message: `Successfully imported ${recordsAdded} missing products`
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('Error importing missing products:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async function materializeCalculations(prodConnection, localConnection, incrementalUpdate = true, lastSyncTime = '1970-01-01', startTime = Date.now()) {
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Products import",
|
||||
message: "Fetching product data from production"
|
||||
});
|
||||
|
||||
// Get all product data in a single optimized query - Keep MySQL compatible
|
||||
const [prodData] = await prodConnection.query(`
|
||||
SELECT
|
||||
p.pid,
|
||||
p.description AS title,
|
||||
p.notes AS description,
|
||||
p.itemnumber AS sku,
|
||||
p.date_created,
|
||||
p.datein AS first_received,
|
||||
p.location,
|
||||
p.upc AS barcode,
|
||||
p.harmonized_tariff_code,
|
||||
p.stamp AS updated_at,
|
||||
CASE WHEN si.show + si.buyable > 0 THEN 1 ELSE 0 END AS visible,
|
||||
CASE
|
||||
WHEN p.reorder < 0 THEN 0
|
||||
WHEN p.date_created >= DATE_SUB(CURRENT_DATE, INTERVAL 1 YEAR) THEN 1
|
||||
WHEN COALESCE(pnb.inventory, 0) > 0 THEN 1
|
||||
WHEN (
|
||||
(COALESCE(pls.date_sold, '0000-00-00') = '0000-00-00' OR pls.date_sold <= DATE_SUB(CURRENT_DATE, INTERVAL 5 YEAR))
|
||||
AND (p.datein = '0000-00-00 00:00:00' OR p.datein <= DATE_SUB(CURRENT_TIMESTAMP, INTERVAL 5 YEAR))
|
||||
AND (p.date_refill = '0000-00-00 00:00:00' OR p.date_refill <= DATE_SUB(CURRENT_TIMESTAMP, INTERVAL 5 YEAR))
|
||||
) THEN 0
|
||||
ELSE 1
|
||||
END AS replenishable,
|
||||
COALESCE(si.available_local, 0) as stock_quantity,
|
||||
0 as pending_qty,
|
||||
COALESCE(ci.onpreorder, 0) as preorder_count,
|
||||
COALESCE(pnb.inventory, 0) as notions_inv_count,
|
||||
COALESCE(pcp.price_each, 0) as price,
|
||||
COALESCE(p.sellingprice, 0) AS regular_price,
|
||||
CASE
|
||||
WHEN EXISTS (SELECT 1 FROM product_inventory WHERE pid = p.pid AND count > 0)
|
||||
THEN (
|
||||
SELECT ROUND(SUM(costeach * count) / SUM(count), 5)
|
||||
FROM product_inventory
|
||||
WHERE pid = p.pid AND count > 0
|
||||
)
|
||||
ELSE (SELECT costeach FROM product_inventory WHERE pid = p.pid ORDER BY daterec DESC LIMIT 1)
|
||||
END AS cost_price,
|
||||
NULL as landing_cost_price,
|
||||
s.companyname AS vendor,
|
||||
CASE
|
||||
WHEN s.companyname = 'Notions' THEN sid.notions_itemnumber
|
||||
ELSE sid.supplier_itemnumber
|
||||
END AS vendor_reference,
|
||||
sid.notions_itemnumber AS notions_reference,
|
||||
CONCAT('https://www.acherryontop.com/shop/product/', p.pid) AS permalink,
|
||||
pc1.name AS brand,
|
||||
pc2.name AS line,
|
||||
pc3.name AS subline,
|
||||
pc4.name AS artist,
|
||||
COALESCE(CASE
|
||||
WHEN sid.supplier_id = 92 THEN sid.notions_qty_per_unit
|
||||
ELSE sid.supplier_qty_per_unit
|
||||
END, sid.notions_qty_per_unit) AS moq,
|
||||
p.rating,
|
||||
p.rating_votes AS reviews,
|
||||
p.weight,
|
||||
p.length,
|
||||
p.width,
|
||||
p.height,
|
||||
p.country_of_origin,
|
||||
(SELECT COUNT(*) FROM mybasket mb WHERE mb.item = p.pid AND mb.qty > 0) AS baskets,
|
||||
(SELECT COUNT(*) FROM product_notify pn WHERE pn.pid = p.pid) AS notifies,
|
||||
(SELECT COALESCE(SUM(oi.qty_ordered), 0)
|
||||
FROM order_items oi
|
||||
JOIN _order o ON oi.order_id = o.order_id
|
||||
WHERE oi.prod_pid = p.pid AND o.order_status >= 20) AS total_sold,
|
||||
pls.date_sold as date_last_sold,
|
||||
(SELECT iid FROM product_images WHERE pid = p.pid AND \`order\` = 255 LIMIT 1) AS primary_iid,
|
||||
GROUP_CONCAT(DISTINCT CASE
|
||||
WHEN pc.cat_id IS NOT NULL
|
||||
AND pc.type IN (10, 20, 11, 21, 12, 13)
|
||||
AND pci.cat_id NOT IN (16, 17)
|
||||
THEN pci.cat_id
|
||||
END) as category_ids
|
||||
FROM products p
|
||||
LEFT JOIN shop_inventory si ON p.pid = si.pid AND si.store = 0
|
||||
LEFT JOIN current_inventory ci ON p.pid = ci.pid
|
||||
LEFT JOIN product_notions_b2b pnb ON p.pid = pnb.pid
|
||||
LEFT JOIN product_current_prices pcp ON p.pid = pcp.pid AND pcp.active = 1
|
||||
LEFT JOIN supplier_item_data sid ON p.pid = sid.pid
|
||||
LEFT JOIN suppliers s ON sid.supplier_id = s.supplierid
|
||||
LEFT JOIN product_category_index pci ON p.pid = pci.pid
|
||||
LEFT JOIN product_categories pc ON pci.cat_id = pc.cat_id
|
||||
LEFT JOIN product_categories pc1 ON p.company = pc1.cat_id
|
||||
LEFT JOIN product_categories pc2 ON p.line = pc2.cat_id
|
||||
LEFT JOIN product_categories pc3 ON p.subline = pc3.cat_id
|
||||
LEFT JOIN product_categories pc4 ON p.artist = pc4.cat_id
|
||||
LEFT JOIN product_last_sold pls ON p.pid = pls.pid
|
||||
WHERE ${incrementalUpdate ? `
|
||||
p.stamp > ? OR
|
||||
ci.stamp > ? OR
|
||||
pcp.date_deactive > ? OR
|
||||
pcp.date_active > ? OR
|
||||
pnb.date_updated > ?
|
||||
-- Add condition for product_images changes if needed for incremental updates
|
||||
-- OR EXISTS (SELECT 1 FROM product_images pi WHERE pi.pid = p.pid AND pi.stamp > ?)
|
||||
` : 'TRUE'}
|
||||
GROUP BY p.pid
|
||||
`, incrementalUpdate ? [lastSyncTime, lastSyncTime, lastSyncTime, lastSyncTime, lastSyncTime /*, lastSyncTime */] : []);
|
||||
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Products import",
|
||||
message: `Processing ${prodData.length} product records`
|
||||
});
|
||||
|
||||
// Insert all product data into temp table in batches
|
||||
for (let i = 0; i < prodData.length; i += BATCH_SIZE) {
|
||||
const batch = prodData.slice(i, Math.min(i + BATCH_SIZE, prodData.length));
|
||||
|
||||
await withRetry(async () => {
|
||||
const placeholders = batch.map((_, idx) => {
|
||||
const base = idx * 48; // 48 columns
|
||||
return `(${Array.from({ length: 48 }, (_, i) => `$${base + i + 1}`).join(', ')})`;
|
||||
}).join(',');
|
||||
|
||||
const values = batch.flatMap(row => {
|
||||
const imageUrls = getImageUrls(row.pid, row.primary_iid || 1);
|
||||
return [
|
||||
row.pid,
|
||||
row.title,
|
||||
row.description,
|
||||
row.sku || '',
|
||||
row.stock_quantity > 5000 ? 0 : Math.max(0, row.stock_quantity),
|
||||
row.preorder_count,
|
||||
row.notions_inv_count,
|
||||
row.price,
|
||||
row.regular_price,
|
||||
row.cost_price,
|
||||
row.vendor,
|
||||
row.vendor_reference,
|
||||
row.notions_reference,
|
||||
row.brand,
|
||||
row.line,
|
||||
row.subline,
|
||||
row.artist,
|
||||
row.category_ids,
|
||||
validateDate(row.date_created),
|
||||
validateDate(row.first_received),
|
||||
row.landing_cost_price,
|
||||
row.barcode,
|
||||
row.harmonized_tariff_code,
|
||||
validateDate(row.updated_at),
|
||||
row.visible,
|
||||
true,
|
||||
row.replenishable,
|
||||
row.permalink,
|
||||
Math.max(1, Math.round(row.moq || 1)),
|
||||
1,
|
||||
row.rating,
|
||||
row.reviews,
|
||||
row.weight,
|
||||
row.length,
|
||||
row.width,
|
||||
row.height,
|
||||
row.country_of_origin,
|
||||
row.location,
|
||||
row.total_sold,
|
||||
row.baskets,
|
||||
row.notifies,
|
||||
validateDate(row.date_last_sold),
|
||||
row.primary_iid,
|
||||
imageUrls.image,
|
||||
imageUrls.image_175,
|
||||
imageUrls.image_full,
|
||||
null,
|
||||
null
|
||||
];
|
||||
});
|
||||
|
||||
await localConnection.query(`
|
||||
INSERT INTO temp_products (
|
||||
pid, title, description, sku, stock_quantity, preorder_count, notions_inv_count,
|
||||
price, regular_price, cost_price, vendor, vendor_reference, notions_reference,
|
||||
brand, line, subline, artist, categories, created_at, first_received,
|
||||
landing_cost_price, barcode, harmonized_tariff_code, updated_at, visible,
|
||||
managing_stock, replenishable, permalink, moq, uom, rating, reviews,
|
||||
weight, length, width, height, country_of_origin, location, total_sold,
|
||||
baskets, notifies, date_last_sold, primary_iid, image, image_175, image_full, options, tags
|
||||
) VALUES ${placeholders}
|
||||
ON CONFLICT (pid) DO UPDATE SET
|
||||
title = EXCLUDED.title,
|
||||
description = EXCLUDED.description,
|
||||
sku = EXCLUDED.sku,
|
||||
stock_quantity = EXCLUDED.stock_quantity,
|
||||
preorder_count = EXCLUDED.preorder_count,
|
||||
notions_inv_count = EXCLUDED.notions_inv_count,
|
||||
price = EXCLUDED.price,
|
||||
regular_price = EXCLUDED.regular_price,
|
||||
cost_price = EXCLUDED.cost_price,
|
||||
vendor = EXCLUDED.vendor,
|
||||
vendor_reference = EXCLUDED.vendor_reference,
|
||||
notions_reference = EXCLUDED.notions_reference,
|
||||
brand = EXCLUDED.brand,
|
||||
line = EXCLUDED.line,
|
||||
subline = EXCLUDED.subline,
|
||||
artist = EXCLUDED.artist,
|
||||
created_at = EXCLUDED.created_at,
|
||||
first_received = EXCLUDED.first_received,
|
||||
landing_cost_price = EXCLUDED.landing_cost_price,
|
||||
barcode = EXCLUDED.barcode,
|
||||
harmonized_tariff_code = EXCLUDED.harmonized_tariff_code,
|
||||
updated_at = EXCLUDED.updated_at,
|
||||
visible = EXCLUDED.visible,
|
||||
managing_stock = EXCLUDED.managing_stock,
|
||||
replenishable = EXCLUDED.replenishable,
|
||||
permalink = EXCLUDED.permalink,
|
||||
moq = EXCLUDED.moq,
|
||||
uom = EXCLUDED.uom,
|
||||
rating = EXCLUDED.rating,
|
||||
reviews = EXCLUDED.reviews,
|
||||
weight = EXCLUDED.weight,
|
||||
length = EXCLUDED.length,
|
||||
width = EXCLUDED.width,
|
||||
height = EXCLUDED.height,
|
||||
country_of_origin = EXCLUDED.country_of_origin,
|
||||
location = EXCLUDED.location,
|
||||
total_sold = EXCLUDED.total_sold,
|
||||
baskets = EXCLUDED.baskets,
|
||||
notifies = EXCLUDED.notifies,
|
||||
date_last_sold = EXCLUDED.date_last_sold,
|
||||
primary_iid = EXCLUDED.primary_iid,
|
||||
image = EXCLUDED.image,
|
||||
image_175 = EXCLUDED.image_175,
|
||||
image_full = EXCLUDED.image_full,
|
||||
options = EXCLUDED.options,
|
||||
tags = EXCLUDED.tags
|
||||
RETURNING
|
||||
xmax = 0 as inserted
|
||||
`, values);
|
||||
}, `Error inserting batch ${i} to ${i + batch.length}`);
|
||||
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Products import",
|
||||
message: `Imported ${i + batch.length} of ${prodData.length} products`,
|
||||
current: i + batch.length,
|
||||
total: prodData.length,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, i + batch.length, prodData.length),
|
||||
rate: calculateRate(startTime, i + batch.length)
|
||||
});
|
||||
}
|
||||
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Products import",
|
||||
message: "Finished materializing calculations"
|
||||
});
|
||||
|
||||
// Add step to identify which products actually need updating
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Products import",
|
||||
message: "Identifying changed products"
|
||||
});
|
||||
|
||||
// Mark products that haven't changed as needs_update = false
|
||||
await localConnection.query(`
|
||||
UPDATE temp_products t
|
||||
SET needs_update = FALSE
|
||||
FROM products p
|
||||
WHERE t.pid = p.pid
|
||||
AND t.title IS NOT DISTINCT FROM p.title
|
||||
AND t.description IS NOT DISTINCT FROM p.description
|
||||
AND t.sku IS NOT DISTINCT FROM p.sku
|
||||
AND t.stock_quantity = p.stock_quantity
|
||||
AND t.price = p.price
|
||||
AND t.regular_price = p.regular_price
|
||||
AND t.cost_price IS NOT DISTINCT FROM p.cost_price
|
||||
AND t.vendor IS NOT DISTINCT FROM p.vendor
|
||||
AND t.brand IS NOT DISTINCT FROM p.brand
|
||||
AND t.visible = p.visible
|
||||
AND t.replenishable = p.replenishable
|
||||
AND t.barcode IS NOT DISTINCT FROM p.barcode
|
||||
AND t.updated_at IS NOT DISTINCT FROM p.updated_at
|
||||
AND t.total_sold IS NOT DISTINCT FROM p.total_sold
|
||||
-- Check key fields that are likely to change
|
||||
-- We don't need to check every single field, just the important ones
|
||||
`);
|
||||
|
||||
// Get count of products that need updating
|
||||
const [countResult] = await localConnection.query(`
|
||||
SELECT
|
||||
COUNT(*) FILTER (WHERE needs_update = true) as update_count,
|
||||
COUNT(*) FILTER (WHERE needs_update = false) as skip_count,
|
||||
COUNT(*) as total_count
|
||||
FROM temp_products
|
||||
`);
|
||||
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Products import",
|
||||
message: `Found ${countResult.rows[0].update_count} products that need updating, ${countResult.rows[0].skip_count} unchanged`
|
||||
});
|
||||
|
||||
// Return the total products processed
|
||||
return {
|
||||
totalProcessed: prodData.length,
|
||||
needsUpdate: parseInt(countResult.rows[0].update_count),
|
||||
skipped: parseInt(countResult.rows[0].skip_count)
|
||||
};
|
||||
}
|
||||
|
||||
async function importProducts(prodConnection, localConnection, incrementalUpdate = true) {
|
||||
const startTime = Date.now();
|
||||
let lastSyncTime = '1970-01-01';
|
||||
|
||||
try {
|
||||
// Get last sync time if doing incremental update
|
||||
if (incrementalUpdate) {
|
||||
const [syncResult] = await localConnection.query(
|
||||
"SELECT last_sync_timestamp FROM sync_status WHERE table_name = 'products'"
|
||||
);
|
||||
if (syncResult.rows.length > 0) {
|
||||
lastSyncTime = syncResult.rows[0].last_sync_timestamp;
|
||||
}
|
||||
}
|
||||
|
||||
// Start a transaction to ensure temporary tables persist
|
||||
await localConnection.beginTransaction();
|
||||
|
||||
try {
|
||||
// Setup temporary tables
|
||||
await setupTemporaryTables(localConnection);
|
||||
|
||||
// Materialize calculations into temp table
|
||||
const materializeResult = await materializeCalculations(prodConnection, localConnection, incrementalUpdate, lastSyncTime, startTime);
|
||||
|
||||
// Get the list of products that need updating
|
||||
const [products] = await localConnection.query(`
|
||||
SELECT
|
||||
t.pid,
|
||||
t.title,
|
||||
t.description,
|
||||
t.sku,
|
||||
t.stock_quantity,
|
||||
t.preorder_count,
|
||||
t.notions_inv_count,
|
||||
t.price,
|
||||
t.regular_price,
|
||||
t.cost_price,
|
||||
t.vendor,
|
||||
t.vendor_reference,
|
||||
t.notions_reference,
|
||||
t.brand,
|
||||
t.line,
|
||||
t.subline,
|
||||
t.artist,
|
||||
t.categories,
|
||||
t.created_at,
|
||||
t.first_received,
|
||||
t.landing_cost_price,
|
||||
t.barcode,
|
||||
t.harmonized_tariff_code,
|
||||
t.updated_at,
|
||||
t.visible,
|
||||
t.managing_stock,
|
||||
t.replenishable,
|
||||
t.permalink,
|
||||
t.moq,
|
||||
t.rating,
|
||||
t.reviews,
|
||||
t.weight,
|
||||
t.length,
|
||||
t.width,
|
||||
t.height,
|
||||
t.country_of_origin,
|
||||
t.location,
|
||||
t.total_sold,
|
||||
t.baskets,
|
||||
t.notifies,
|
||||
t.date_last_sold,
|
||||
t.primary_iid,
|
||||
t.image,
|
||||
t.image_175,
|
||||
t.image_full,
|
||||
t.options,
|
||||
t.tags
|
||||
FROM temp_products t
|
||||
WHERE t.needs_update = true
|
||||
`);
|
||||
|
||||
// Process products in batches
|
||||
let recordsAdded = 0;
|
||||
let recordsUpdated = 0;
|
||||
|
||||
for (let i = 0; i < products.rows.length; i += BATCH_SIZE) {
|
||||
const batch = products.rows.slice(i, i + BATCH_SIZE);
|
||||
|
||||
const placeholders = batch.map((_, idx) => {
|
||||
const base = idx * 47; // 47 columns
|
||||
return `(${Array.from({ length: 47 }, (_, i) => `$${base + i + 1}`).join(', ')})`;
|
||||
}).join(',');
|
||||
|
||||
const values = batch.flatMap(row => {
|
||||
const imageUrls = getImageUrls(row.pid, row.primary_iid || 1);
|
||||
return [
|
||||
row.pid,
|
||||
row.title,
|
||||
row.description,
|
||||
row.sku || '',
|
||||
row.stock_quantity > 5000 ? 0 : Math.max(0, row.stock_quantity),
|
||||
row.preorder_count,
|
||||
row.notions_inv_count,
|
||||
row.price,
|
||||
row.regular_price,
|
||||
row.cost_price,
|
||||
row.vendor,
|
||||
row.vendor_reference,
|
||||
row.notions_reference,
|
||||
row.brand,
|
||||
row.line,
|
||||
row.subline,
|
||||
row.artist,
|
||||
row.categories,
|
||||
validateDate(row.created_at),
|
||||
validateDate(row.first_received),
|
||||
row.landing_cost_price,
|
||||
row.barcode,
|
||||
row.harmonized_tariff_code,
|
||||
validateDate(row.updated_at),
|
||||
row.visible,
|
||||
row.managing_stock,
|
||||
row.replenishable,
|
||||
row.permalink,
|
||||
row.moq,
|
||||
1,
|
||||
row.rating,
|
||||
row.reviews,
|
||||
row.weight,
|
||||
row.length,
|
||||
row.width,
|
||||
row.height,
|
||||
row.country_of_origin,
|
||||
row.location,
|
||||
row.total_sold,
|
||||
row.baskets,
|
||||
row.notifies,
|
||||
validateDate(row.date_last_sold),
|
||||
imageUrls.image,
|
||||
imageUrls.image_175,
|
||||
imageUrls.image_full,
|
||||
row.options,
|
||||
row.tags
|
||||
];
|
||||
});
|
||||
|
||||
const [result] = await localConnection.query(`
|
||||
WITH upserted AS (
|
||||
INSERT INTO products (
|
||||
pid, title, description, sku, stock_quantity, preorder_count, notions_inv_count,
|
||||
price, regular_price, cost_price, vendor, vendor_reference, notions_reference,
|
||||
brand, line, subline, artist, categories, created_at, first_received,
|
||||
landing_cost_price, barcode, harmonized_tariff_code, updated_at, visible,
|
||||
managing_stock, replenishable, permalink, moq, uom, rating, reviews,
|
||||
weight, length, width, height, country_of_origin, location, total_sold,
|
||||
baskets, notifies, date_last_sold, image, image_175, image_full, options, tags
|
||||
)
|
||||
VALUES ${placeholders}
|
||||
ON CONFLICT (pid) DO UPDATE SET
|
||||
title = EXCLUDED.title,
|
||||
description = EXCLUDED.description,
|
||||
sku = EXCLUDED.sku,
|
||||
stock_quantity = EXCLUDED.stock_quantity,
|
||||
preorder_count = EXCLUDED.preorder_count,
|
||||
notions_inv_count = EXCLUDED.notions_inv_count,
|
||||
price = EXCLUDED.price,
|
||||
regular_price = EXCLUDED.regular_price,
|
||||
cost_price = EXCLUDED.cost_price,
|
||||
vendor = EXCLUDED.vendor,
|
||||
vendor_reference = EXCLUDED.vendor_reference,
|
||||
notions_reference = EXCLUDED.notions_reference,
|
||||
brand = EXCLUDED.brand,
|
||||
line = EXCLUDED.line,
|
||||
subline = EXCLUDED.subline,
|
||||
artist = EXCLUDED.artist,
|
||||
created_at = EXCLUDED.created_at,
|
||||
first_received = EXCLUDED.first_received,
|
||||
landing_cost_price = EXCLUDED.landing_cost_price,
|
||||
barcode = EXCLUDED.barcode,
|
||||
harmonized_tariff_code = EXCLUDED.harmonized_tariff_code,
|
||||
updated_at = EXCLUDED.updated_at,
|
||||
visible = EXCLUDED.visible,
|
||||
managing_stock = EXCLUDED.managing_stock,
|
||||
replenishable = EXCLUDED.replenishable,
|
||||
permalink = EXCLUDED.permalink,
|
||||
moq = EXCLUDED.moq,
|
||||
uom = EXCLUDED.uom,
|
||||
rating = EXCLUDED.rating,
|
||||
reviews = EXCLUDED.reviews,
|
||||
weight = EXCLUDED.weight,
|
||||
length = EXCLUDED.length,
|
||||
width = EXCLUDED.width,
|
||||
height = EXCLUDED.height,
|
||||
country_of_origin = EXCLUDED.country_of_origin,
|
||||
location = EXCLUDED.location,
|
||||
total_sold = EXCLUDED.total_sold,
|
||||
baskets = EXCLUDED.baskets,
|
||||
notifies = EXCLUDED.notifies,
|
||||
date_last_sold = EXCLUDED.date_last_sold,
|
||||
image = EXCLUDED.image,
|
||||
image_175 = EXCLUDED.image_175,
|
||||
image_full = EXCLUDED.image_full,
|
||||
options = EXCLUDED.options,
|
||||
tags = EXCLUDED.tags
|
||||
RETURNING
|
||||
xmax = 0 as inserted
|
||||
)
|
||||
SELECT
|
||||
COUNT(*) FILTER (WHERE inserted) as inserted,
|
||||
COUNT(*) FILTER (WHERE NOT inserted) as updated
|
||||
FROM upserted
|
||||
`, values);
|
||||
|
||||
recordsAdded += parseInt(result.rows[0].inserted, 10) || 0;
|
||||
recordsUpdated += parseInt(result.rows[0].updated, 10) || 0;
|
||||
|
||||
// Process category relationships in batches
|
||||
const allCategories = [];
|
||||
for (const row of batch) {
|
||||
if (row.categories) {
|
||||
const categoryIds = row.categories.split(',').filter(id => id && id.trim());
|
||||
if (categoryIds.length > 0) {
|
||||
categoryIds.forEach(catId => {
|
||||
allCategories.push([row.pid, parseInt(catId.trim(), 10)]);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we have categories to process
|
||||
if (allCategories.length > 0) {
|
||||
// First get all products in this batch
|
||||
const productIds = batch.map(p => p.pid);
|
||||
|
||||
// Delete all existing relationships for products in this batch
|
||||
await localConnection.query(
|
||||
'DELETE FROM product_categories WHERE pid = ANY($1)',
|
||||
[productIds]
|
||||
);
|
||||
|
||||
// Insert all new relationships in one batch
|
||||
const catPlaceholders = allCategories.map((_, idx) =>
|
||||
`($${idx * 2 + 1}, $${idx * 2 + 2})`
|
||||
).join(',');
|
||||
|
||||
const catValues = allCategories.flat();
|
||||
|
||||
await localConnection.query(`
|
||||
INSERT INTO product_categories (pid, cat_id)
|
||||
VALUES ${catPlaceholders}
|
||||
ON CONFLICT (pid, cat_id) DO NOTHING
|
||||
`, catValues);
|
||||
}
|
||||
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Products import",
|
||||
message: `Processing products: ${i + batch.length} of ${products.rows.length}`,
|
||||
current: i + batch.length,
|
||||
total: products.rows.length,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, i + batch.length, products.rows.length),
|
||||
rate: calculateRate(startTime, i + batch.length)
|
||||
});
|
||||
}
|
||||
|
||||
// Cleanup temporary tables
|
||||
await cleanupTemporaryTables(localConnection);
|
||||
|
||||
// Commit the transaction
|
||||
await localConnection.commit();
|
||||
|
||||
// Update sync status
|
||||
await localConnection.query(`
|
||||
INSERT INTO sync_status (table_name, last_sync_timestamp)
|
||||
VALUES ('products', NOW())
|
||||
ON CONFLICT (table_name) DO UPDATE SET
|
||||
last_sync_timestamp = NOW()
|
||||
`);
|
||||
|
||||
return {
|
||||
status: 'complete',
|
||||
recordsAdded,
|
||||
recordsUpdated,
|
||||
totalRecords: products.rows.length,
|
||||
totalProcessed: materializeResult.totalProcessed,
|
||||
duration: formatElapsedTime(startTime),
|
||||
needsUpdate: materializeResult.needsUpdate,
|
||||
skippedUnchanged: materializeResult.skipped
|
||||
};
|
||||
} catch (error) {
|
||||
// Rollback on error
|
||||
await localConnection.rollback();
|
||||
throw error;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error in importProducts:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
importProducts,
|
||||
importMissingProducts,
|
||||
setupTemporaryTables,
|
||||
cleanupTemporaryTables,
|
||||
materializeCalculations
|
||||
};
|
||||
@@ -1,884 +0,0 @@
|
||||
const { outputProgress, formatElapsedTime, estimateRemaining, calculateRate } = require('../metrics-new/utils/progress');
|
||||
|
||||
/**
|
||||
* Validates a date from MySQL before inserting it into PostgreSQL
|
||||
* @param {string|Date|null} mysqlDate - Date string or object from MySQL
|
||||
* @returns {string|null} Valid date string or null if invalid
|
||||
*/
|
||||
function validateDate(mysqlDate) {
|
||||
// Handle null, undefined, or empty values
|
||||
if (!mysqlDate) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Convert to string if it's not already
|
||||
const dateStr = String(mysqlDate);
|
||||
|
||||
// Handle MySQL zero dates and empty values
|
||||
if (dateStr === '0000-00-00' ||
|
||||
dateStr === '0000-00-00 00:00:00' ||
|
||||
dateStr.indexOf('0000-00-00') !== -1 ||
|
||||
dateStr === '') {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Check if the date is valid
|
||||
const date = new Date(mysqlDate);
|
||||
|
||||
// If the date is invalid or suspiciously old (pre-1970), return null
|
||||
if (isNaN(date.getTime()) || date.getFullYear() < 1970) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return mysqlDate;
|
||||
}
|
||||
|
||||
/**
|
||||
* Imports purchase orders and receivings from a production MySQL database to a local PostgreSQL database.
|
||||
* Handles these as separate data streams without complex FIFO allocation.
|
||||
*
|
||||
* @param {object} prodConnection - A MySQL connection to production DB
|
||||
* @param {object} localConnection - A PostgreSQL connection to local DB
|
||||
* @param {boolean} incrementalUpdate - Set to false for a full sync; true for incremental
|
||||
* @returns {object} Information about the sync operation
|
||||
*/
|
||||
async function importPurchaseOrders(prodConnection, localConnection, incrementalUpdate = true) {
|
||||
const startTime = Date.now();
|
||||
let poRecordsAdded = 0;
|
||||
let poRecordsUpdated = 0;
|
||||
let poRecordsDeleted = 0;
|
||||
let receivingRecordsAdded = 0;
|
||||
let receivingRecordsUpdated = 0;
|
||||
let receivingRecordsDeleted = 0;
|
||||
let totalProcessed = 0;
|
||||
|
||||
// Batch size constants
|
||||
const PO_BATCH_SIZE = 500;
|
||||
const INSERT_BATCH_SIZE = 100;
|
||||
|
||||
try {
|
||||
// Begin transaction for the entire import process
|
||||
await localConnection.beginTransaction();
|
||||
|
||||
// Get last sync info
|
||||
const [syncInfo] = await localConnection.query(
|
||||
"SELECT last_sync_timestamp FROM sync_status WHERE table_name = 'purchase_orders'"
|
||||
);
|
||||
const lastSyncTime = syncInfo?.rows?.[0]?.last_sync_timestamp || '1970-01-01';
|
||||
|
||||
console.log('Purchase Orders: Using last sync time:', lastSyncTime);
|
||||
|
||||
// Create temp tables for processing
|
||||
await localConnection.query(`
|
||||
DROP TABLE IF EXISTS temp_purchase_orders;
|
||||
DROP TABLE IF EXISTS temp_receivings;
|
||||
DROP TABLE IF EXISTS employee_names;
|
||||
DROP TABLE IF EXISTS temp_supplier_names;
|
||||
|
||||
-- Temporary table for purchase orders
|
||||
CREATE TEMP TABLE temp_purchase_orders (
|
||||
po_id TEXT NOT NULL,
|
||||
pid BIGINT NOT NULL,
|
||||
sku TEXT,
|
||||
name TEXT,
|
||||
vendor TEXT,
|
||||
date TIMESTAMP WITH TIME ZONE,
|
||||
expected_date DATE,
|
||||
status TEXT,
|
||||
notes TEXT,
|
||||
long_note TEXT,
|
||||
ordered INTEGER,
|
||||
po_cost_price NUMERIC(14, 4),
|
||||
supplier_id INTEGER,
|
||||
date_created TIMESTAMP WITH TIME ZONE,
|
||||
date_ordered TIMESTAMP WITH TIME ZONE,
|
||||
PRIMARY KEY (po_id, pid)
|
||||
);
|
||||
|
||||
-- Temporary table for receivings
|
||||
CREATE TEMP TABLE temp_receivings (
|
||||
receiving_id TEXT NOT NULL,
|
||||
pid BIGINT NOT NULL,
|
||||
sku TEXT,
|
||||
name TEXT,
|
||||
vendor TEXT,
|
||||
qty_each INTEGER,
|
||||
qty_each_orig INTEGER,
|
||||
cost_each NUMERIC(14, 5),
|
||||
cost_each_orig NUMERIC(14, 5),
|
||||
received_by INTEGER,
|
||||
received_by_name TEXT,
|
||||
received_date TIMESTAMP WITH TIME ZONE,
|
||||
receiving_created_date TIMESTAMP WITH TIME ZONE,
|
||||
supplier_id INTEGER,
|
||||
status TEXT,
|
||||
PRIMARY KEY (receiving_id, pid)
|
||||
);
|
||||
|
||||
-- Temporary table for employee names
|
||||
CREATE TEMP TABLE employee_names (
|
||||
employeeid INTEGER PRIMARY KEY,
|
||||
firstname TEXT,
|
||||
lastname TEXT
|
||||
);
|
||||
|
||||
-- Create indexes for efficient joins
|
||||
CREATE INDEX idx_temp_po_pid ON temp_purchase_orders(pid);
|
||||
CREATE INDEX idx_temp_receiving_pid ON temp_receivings(pid);
|
||||
`);
|
||||
|
||||
// Map status codes to text values
|
||||
const poStatusMap = {
|
||||
0: 'canceled',
|
||||
1: 'created',
|
||||
10: 'electronically_ready_send',
|
||||
11: 'ordered',
|
||||
12: 'preordered',
|
||||
13: 'electronically_sent',
|
||||
15: 'receiving_started',
|
||||
50: 'done'
|
||||
};
|
||||
|
||||
const receivingStatusMap = {
|
||||
0: 'canceled',
|
||||
1: 'created',
|
||||
30: 'partial_received',
|
||||
40: 'full_received',
|
||||
50: 'paid'
|
||||
};
|
||||
|
||||
// Get time window for data retrieval
|
||||
const yearInterval = incrementalUpdate ? 1 : 5;
|
||||
|
||||
// Fetch employee data from production
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Purchase orders import",
|
||||
message: "Fetching employee data"
|
||||
});
|
||||
|
||||
const [employees] = await prodConnection.query(`
|
||||
SELECT
|
||||
employeeid,
|
||||
firstname,
|
||||
lastname
|
||||
FROM employees
|
||||
`);
|
||||
|
||||
// Insert employee data into temp table
|
||||
if (employees.length > 0) {
|
||||
const employeeValues = employees.map(emp => [
|
||||
emp.employeeid,
|
||||
emp.firstname || '',
|
||||
emp.lastname || ''
|
||||
]).flat();
|
||||
|
||||
const placeholders = employees.map((_, idx) => {
|
||||
const base = idx * 3;
|
||||
return `($${base + 1}, $${base + 2}, $${base + 3})`;
|
||||
}).join(',');
|
||||
|
||||
await localConnection.query(`
|
||||
INSERT INTO employee_names (employeeid, firstname, lastname)
|
||||
VALUES ${placeholders}
|
||||
ON CONFLICT (employeeid) DO UPDATE SET
|
||||
firstname = EXCLUDED.firstname,
|
||||
lastname = EXCLUDED.lastname
|
||||
`, employeeValues);
|
||||
}
|
||||
|
||||
// Add this section before the PO import to create a supplier names mapping
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Purchase orders import",
|
||||
message: "Fetching supplier data for vendor mapping"
|
||||
});
|
||||
|
||||
// Fetch supplier data from production and store in a temp table
|
||||
const [suppliers] = await prodConnection.query(`
|
||||
SELECT
|
||||
supplierid,
|
||||
companyname
|
||||
FROM suppliers
|
||||
WHERE companyname IS NOT NULL AND companyname != ''
|
||||
`);
|
||||
|
||||
if (suppliers.length > 0) {
|
||||
// Create temp table for supplier names
|
||||
await localConnection.query(`
|
||||
DROP TABLE IF EXISTS temp_supplier_names;
|
||||
CREATE TEMP TABLE temp_supplier_names (
|
||||
supplier_id INTEGER PRIMARY KEY,
|
||||
company_name TEXT NOT NULL
|
||||
);
|
||||
`);
|
||||
|
||||
// Insert supplier data in batches
|
||||
for (let i = 0; i < suppliers.length; i += INSERT_BATCH_SIZE) {
|
||||
const batch = suppliers.slice(i, i + INSERT_BATCH_SIZE);
|
||||
|
||||
const placeholders = batch.map((_, idx) => {
|
||||
const base = idx * 2;
|
||||
return `($${base + 1}, $${base + 2})`;
|
||||
}).join(',');
|
||||
|
||||
const values = batch.flatMap(s => [
|
||||
s.supplierid,
|
||||
s.companyname || 'Unnamed Supplier'
|
||||
]);
|
||||
|
||||
await localConnection.query(`
|
||||
INSERT INTO temp_supplier_names (supplier_id, company_name)
|
||||
VALUES ${placeholders}
|
||||
ON CONFLICT (supplier_id) DO UPDATE SET
|
||||
company_name = EXCLUDED.company_name
|
||||
`, values);
|
||||
}
|
||||
}
|
||||
|
||||
// 1. Fetch and process purchase orders
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Purchase orders import",
|
||||
message: "Fetching purchase orders"
|
||||
});
|
||||
|
||||
const [poCount] = await prodConnection.query(`
|
||||
SELECT COUNT(*) as total
|
||||
FROM po p
|
||||
WHERE p.date_created >= DATE_SUB(CURRENT_DATE, INTERVAL ${yearInterval} YEAR)
|
||||
${incrementalUpdate ? `
|
||||
AND (
|
||||
p.date_updated > ?
|
||||
OR p.date_ordered > ?
|
||||
OR p.date_estin > ?
|
||||
)
|
||||
` : ''}
|
||||
`, incrementalUpdate ? [lastSyncTime, lastSyncTime, lastSyncTime] : []);
|
||||
|
||||
const totalPOs = poCount[0].total;
|
||||
console.log(`Found ${totalPOs} relevant purchase orders`);
|
||||
|
||||
// Skip processing if no POs to process
|
||||
if (totalPOs === 0) {
|
||||
console.log('No purchase orders to process, skipping PO import step');
|
||||
} else {
|
||||
// Fetch and process POs in batches
|
||||
let offset = 0;
|
||||
let allPOsProcessed = false;
|
||||
|
||||
while (!allPOsProcessed) {
|
||||
const [poList] = await prodConnection.query(`
|
||||
SELECT
|
||||
p.po_id,
|
||||
p.supplier_id,
|
||||
s.companyname AS vendor,
|
||||
p.status,
|
||||
p.notes AS long_note,
|
||||
p.short_note AS notes,
|
||||
p.date_created,
|
||||
p.date_ordered,
|
||||
p.date_estin
|
||||
FROM po p
|
||||
LEFT JOIN suppliers s ON p.supplier_id = s.supplierid
|
||||
WHERE p.date_created >= DATE_SUB(CURRENT_DATE, INTERVAL ${yearInterval} YEAR)
|
||||
${incrementalUpdate ? `
|
||||
AND (
|
||||
p.date_updated > ?
|
||||
OR p.date_ordered > ?
|
||||
OR p.date_estin > ?
|
||||
)
|
||||
` : ''}
|
||||
ORDER BY p.po_id
|
||||
LIMIT ${PO_BATCH_SIZE} OFFSET ${offset}
|
||||
`, incrementalUpdate ? [lastSyncTime, lastSyncTime, lastSyncTime] : []);
|
||||
|
||||
if (poList.length === 0) {
|
||||
allPOsProcessed = true;
|
||||
break;
|
||||
}
|
||||
|
||||
// Get products for these POs
|
||||
const poIds = poList.map(po => po.po_id);
|
||||
|
||||
const [poProducts] = await prodConnection.query(`
|
||||
SELECT
|
||||
pp.po_id,
|
||||
pp.pid,
|
||||
pp.qty_each,
|
||||
pp.cost_each,
|
||||
COALESCE(p.itemnumber, 'NO-SKU') AS sku,
|
||||
COALESCE(p.description, 'Unknown Product') AS name
|
||||
FROM po_products pp
|
||||
LEFT JOIN products p ON pp.pid = p.pid
|
||||
WHERE pp.po_id IN (?)
|
||||
`, [poIds]);
|
||||
|
||||
// Build complete PO records
|
||||
const completePOs = [];
|
||||
for (const product of poProducts) {
|
||||
const po = poList.find(p => p.po_id == product.po_id);
|
||||
if (!po) continue;
|
||||
|
||||
completePOs.push({
|
||||
po_id: po.po_id.toString(),
|
||||
pid: product.pid,
|
||||
sku: product.sku,
|
||||
name: product.name,
|
||||
vendor: po.vendor || 'Unknown Vendor',
|
||||
date: validateDate(po.date_ordered) || validateDate(po.date_created),
|
||||
expected_date: validateDate(po.date_estin),
|
||||
status: poStatusMap[po.status] || 'created',
|
||||
notes: po.notes || '',
|
||||
long_note: po.long_note || '',
|
||||
ordered: product.qty_each,
|
||||
po_cost_price: product.cost_each,
|
||||
supplier_id: po.supplier_id,
|
||||
date_created: validateDate(po.date_created),
|
||||
date_ordered: validateDate(po.date_ordered)
|
||||
});
|
||||
}
|
||||
|
||||
// Insert PO data in batches
|
||||
for (let i = 0; i < completePOs.length; i += INSERT_BATCH_SIZE) {
|
||||
const batch = completePOs.slice(i, i + INSERT_BATCH_SIZE);
|
||||
|
||||
const placeholders = batch.map((_, idx) => {
|
||||
const base = idx * 15;
|
||||
return `($${base + 1}, $${base + 2}, $${base + 3}, $${base + 4}, $${base + 5}, $${base + 6}, $${base + 7}, $${base + 8}, $${base + 9}, $${base + 10}, $${base + 11}, $${base + 12}, $${base + 13}, $${base + 14}, $${base + 15})`;
|
||||
}).join(',');
|
||||
|
||||
const values = batch.flatMap(po => [
|
||||
po.po_id,
|
||||
po.pid,
|
||||
po.sku,
|
||||
po.name,
|
||||
po.vendor,
|
||||
po.date,
|
||||
po.expected_date,
|
||||
po.status,
|
||||
po.notes,
|
||||
po.long_note,
|
||||
po.ordered,
|
||||
po.po_cost_price,
|
||||
po.supplier_id,
|
||||
po.date_created,
|
||||
po.date_ordered
|
||||
]);
|
||||
|
||||
await localConnection.query(`
|
||||
INSERT INTO temp_purchase_orders (
|
||||
po_id, pid, sku, name, vendor, date, expected_date, status, notes, long_note,
|
||||
ordered, po_cost_price, supplier_id, date_created, date_ordered
|
||||
)
|
||||
VALUES ${placeholders}
|
||||
ON CONFLICT (po_id, pid) DO UPDATE SET
|
||||
sku = EXCLUDED.sku,
|
||||
name = EXCLUDED.name,
|
||||
vendor = EXCLUDED.vendor,
|
||||
date = EXCLUDED.date,
|
||||
expected_date = EXCLUDED.expected_date,
|
||||
status = EXCLUDED.status,
|
||||
notes = EXCLUDED.notes,
|
||||
long_note = EXCLUDED.long_note,
|
||||
ordered = EXCLUDED.ordered,
|
||||
po_cost_price = EXCLUDED.po_cost_price,
|
||||
supplier_id = EXCLUDED.supplier_id,
|
||||
date_created = EXCLUDED.date_created,
|
||||
date_ordered = EXCLUDED.date_ordered
|
||||
`, values);
|
||||
}
|
||||
|
||||
offset += poList.length;
|
||||
totalProcessed += completePOs.length;
|
||||
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Purchase orders import",
|
||||
message: `Processed ${offset} of ${totalPOs} purchase orders (${totalProcessed} line items)`,
|
||||
current: offset,
|
||||
total: totalPOs,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, offset, totalPOs),
|
||||
rate: calculateRate(startTime, offset)
|
||||
});
|
||||
|
||||
if (poList.length < PO_BATCH_SIZE) {
|
||||
allPOsProcessed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Next, fetch all relevant receivings
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Purchase orders import",
|
||||
message: "Fetching receivings data"
|
||||
});
|
||||
|
||||
const [receivingCount] = await prodConnection.query(`
|
||||
SELECT COUNT(*) as total
|
||||
FROM receivings r
|
||||
WHERE r.date_created >= DATE_SUB(CURRENT_DATE, INTERVAL ${yearInterval} YEAR)
|
||||
${incrementalUpdate ? `
|
||||
AND (
|
||||
r.date_updated > ?
|
||||
OR r.date_created > ?
|
||||
)
|
||||
` : ''}
|
||||
`, incrementalUpdate ? [lastSyncTime, lastSyncTime] : []);
|
||||
|
||||
const totalReceivings = receivingCount[0].total;
|
||||
console.log(`Found ${totalReceivings} relevant receivings`);
|
||||
|
||||
// Skip processing if no receivings to process
|
||||
if (totalReceivings === 0) {
|
||||
console.log('No receivings to process, skipping receivings import step');
|
||||
} else {
|
||||
// Fetch and process receivings in batches
|
||||
offset = 0; // Reset offset for receivings
|
||||
let allReceivingsProcessed = false;
|
||||
|
||||
while (!allReceivingsProcessed) {
|
||||
const [receivingList] = await prodConnection.query(`
|
||||
SELECT
|
||||
r.receiving_id,
|
||||
r.supplier_id,
|
||||
r.status,
|
||||
r.notes,
|
||||
r.shipping,
|
||||
r.total_amount,
|
||||
r.hold,
|
||||
r.for_storefront,
|
||||
r.date_created,
|
||||
r.date_paid,
|
||||
r.date_checked
|
||||
FROM receivings r
|
||||
WHERE r.date_created >= DATE_SUB(CURRENT_DATE, INTERVAL ${yearInterval} YEAR)
|
||||
${incrementalUpdate ? `
|
||||
AND (
|
||||
r.date_updated > ?
|
||||
OR r.date_created > ?
|
||||
)
|
||||
` : ''}
|
||||
ORDER BY r.receiving_id
|
||||
LIMIT ${PO_BATCH_SIZE} OFFSET ${offset}
|
||||
`, incrementalUpdate ? [lastSyncTime, lastSyncTime] : []);
|
||||
|
||||
if (receivingList.length === 0) {
|
||||
allReceivingsProcessed = true;
|
||||
break;
|
||||
}
|
||||
|
||||
// Get products for these receivings
|
||||
const receivingIds = receivingList.map(r => r.receiving_id);
|
||||
|
||||
const [receivingProducts] = await prodConnection.query(`
|
||||
SELECT
|
||||
rp.receiving_id,
|
||||
rp.pid,
|
||||
rp.qty_each,
|
||||
rp.qty_each_orig,
|
||||
rp.cost_each,
|
||||
rp.cost_each_orig,
|
||||
rp.received_by,
|
||||
rp.received_date,
|
||||
r.date_created as receiving_created_date,
|
||||
COALESCE(p.itemnumber, 'NO-SKU') AS sku,
|
||||
COALESCE(p.description, 'Unknown Product') AS name
|
||||
FROM receivings_products rp
|
||||
JOIN receivings r ON rp.receiving_id = r.receiving_id
|
||||
LEFT JOIN products p ON rp.pid = p.pid
|
||||
WHERE rp.receiving_id IN (?)
|
||||
`, [receivingIds]);
|
||||
|
||||
// Build complete receiving records
|
||||
const completeReceivings = [];
|
||||
for (const product of receivingProducts) {
|
||||
const receiving = receivingList.find(r => r.receiving_id == product.receiving_id);
|
||||
if (!receiving) continue;
|
||||
|
||||
// Get employee name if available
|
||||
let receivedByName = null;
|
||||
if (product.received_by) {
|
||||
const [employeeResult] = await localConnection.query(`
|
||||
SELECT CONCAT(firstname, ' ', lastname) as full_name
|
||||
FROM employee_names
|
||||
WHERE employeeid = $1
|
||||
`, [product.received_by]);
|
||||
|
||||
if (employeeResult.rows.length > 0) {
|
||||
receivedByName = employeeResult.rows[0].full_name;
|
||||
}
|
||||
}
|
||||
|
||||
// Get vendor name if available
|
||||
let vendorName = 'Unknown Vendor';
|
||||
if (receiving.supplier_id) {
|
||||
const [vendorResult] = await localConnection.query(`
|
||||
SELECT company_name
|
||||
FROM temp_supplier_names
|
||||
WHERE supplier_id = $1
|
||||
`, [receiving.supplier_id]);
|
||||
|
||||
if (vendorResult.rows.length > 0) {
|
||||
vendorName = vendorResult.rows[0].company_name;
|
||||
}
|
||||
}
|
||||
|
||||
completeReceivings.push({
|
||||
receiving_id: receiving.receiving_id.toString(),
|
||||
pid: product.pid,
|
||||
sku: product.sku,
|
||||
name: product.name,
|
||||
vendor: vendorName,
|
||||
qty_each: product.qty_each,
|
||||
qty_each_orig: product.qty_each_orig,
|
||||
cost_each: product.cost_each,
|
||||
cost_each_orig: product.cost_each_orig,
|
||||
received_by: product.received_by,
|
||||
received_by_name: receivedByName,
|
||||
received_date: validateDate(product.received_date) || validateDate(product.receiving_created_date),
|
||||
receiving_created_date: validateDate(product.receiving_created_date),
|
||||
supplier_id: receiving.supplier_id,
|
||||
status: receivingStatusMap[receiving.status] || 'created'
|
||||
});
|
||||
}
|
||||
|
||||
// Insert receiving data in batches
|
||||
for (let i = 0; i < completeReceivings.length; i += INSERT_BATCH_SIZE) {
|
||||
const batch = completeReceivings.slice(i, i + INSERT_BATCH_SIZE);
|
||||
|
||||
const placeholders = batch.map((_, idx) => {
|
||||
const base = idx * 15;
|
||||
return `($${base + 1}, $${base + 2}, $${base + 3}, $${base + 4}, $${base + 5}, $${base + 6}, $${base + 7}, $${base + 8}, $${base + 9}, $${base + 10}, $${base + 11}, $${base + 12}, $${base + 13}, $${base + 14}, $${base + 15})`;
|
||||
}).join(',');
|
||||
|
||||
const values = batch.flatMap(r => [
|
||||
r.receiving_id,
|
||||
r.pid,
|
||||
r.sku,
|
||||
r.name,
|
||||
r.vendor,
|
||||
r.qty_each,
|
||||
r.qty_each_orig,
|
||||
r.cost_each,
|
||||
r.cost_each_orig,
|
||||
r.received_by,
|
||||
r.received_by_name,
|
||||
r.received_date,
|
||||
r.receiving_created_date,
|
||||
r.supplier_id,
|
||||
r.status
|
||||
]);
|
||||
|
||||
await localConnection.query(`
|
||||
INSERT INTO temp_receivings (
|
||||
receiving_id, pid, sku, name, vendor, qty_each, qty_each_orig,
|
||||
cost_each, cost_each_orig, received_by, received_by_name,
|
||||
received_date, receiving_created_date, supplier_id, status
|
||||
)
|
||||
VALUES ${placeholders}
|
||||
ON CONFLICT (receiving_id, pid) DO UPDATE SET
|
||||
sku = EXCLUDED.sku,
|
||||
name = EXCLUDED.name,
|
||||
vendor = EXCLUDED.vendor,
|
||||
qty_each = EXCLUDED.qty_each,
|
||||
qty_each_orig = EXCLUDED.qty_each_orig,
|
||||
cost_each = EXCLUDED.cost_each,
|
||||
cost_each_orig = EXCLUDED.cost_each_orig,
|
||||
received_by = EXCLUDED.received_by,
|
||||
received_by_name = EXCLUDED.received_by_name,
|
||||
received_date = EXCLUDED.received_date,
|
||||
receiving_created_date = EXCLUDED.receiving_created_date,
|
||||
supplier_id = EXCLUDED.supplier_id,
|
||||
status = EXCLUDED.status
|
||||
`, values);
|
||||
}
|
||||
|
||||
offset += receivingList.length;
|
||||
totalProcessed += completeReceivings.length;
|
||||
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Purchase orders import",
|
||||
message: `Processed ${offset} of ${totalReceivings} receivings (${totalProcessed} line items total)`,
|
||||
current: offset,
|
||||
total: totalReceivings,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, offset, totalReceivings),
|
||||
rate: calculateRate(startTime, offset)
|
||||
});
|
||||
|
||||
if (receivingList.length < PO_BATCH_SIZE) {
|
||||
allReceivingsProcessed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add this section to filter out invalid PIDs before final import
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Purchase orders import",
|
||||
message: "Validating product IDs before final import"
|
||||
});
|
||||
|
||||
await localConnection.query(`
|
||||
-- Create temp table to store invalid PIDs
|
||||
DROP TABLE IF EXISTS temp_invalid_pids;
|
||||
CREATE TEMP TABLE temp_invalid_pids AS (
|
||||
-- Get all unique PIDs from our temp tables
|
||||
WITH all_pids AS (
|
||||
SELECT DISTINCT pid FROM temp_purchase_orders
|
||||
UNION
|
||||
SELECT DISTINCT pid FROM temp_receivings
|
||||
)
|
||||
-- Filter to only those that don't exist in products table
|
||||
SELECT p.pid
|
||||
FROM all_pids p
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM products WHERE pid = p.pid
|
||||
)
|
||||
);
|
||||
|
||||
-- Remove purchase orders with invalid PIDs
|
||||
DELETE FROM temp_purchase_orders
|
||||
WHERE pid IN (SELECT pid FROM temp_invalid_pids);
|
||||
|
||||
-- Remove receivings with invalid PIDs
|
||||
DELETE FROM temp_receivings
|
||||
WHERE pid IN (SELECT pid FROM temp_invalid_pids);
|
||||
`);
|
||||
|
||||
// Get count of filtered items for reporting
|
||||
const [filteredResult] = await localConnection.query(`
|
||||
SELECT COUNT(*) as count FROM temp_invalid_pids
|
||||
`);
|
||||
const filteredCount = filteredResult.rows[0].count;
|
||||
|
||||
if (filteredCount > 0) {
|
||||
console.log(`Filtered out ${filteredCount} items with invalid product IDs`);
|
||||
}
|
||||
|
||||
// 3. Insert final purchase order records to the actual table
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Purchase orders import",
|
||||
message: "Inserting final purchase order records"
|
||||
});
|
||||
|
||||
// Create a temp table to track PO IDs being processed
|
||||
await localConnection.query(`
|
||||
DROP TABLE IF EXISTS processed_po_ids;
|
||||
CREATE TEMP TABLE processed_po_ids AS (
|
||||
SELECT DISTINCT po_id FROM temp_purchase_orders
|
||||
);
|
||||
`);
|
||||
|
||||
// Delete products that were removed from POs and count them
|
||||
const [poDeletedResult] = await localConnection.query(`
|
||||
WITH deleted AS (
|
||||
DELETE FROM purchase_orders
|
||||
WHERE po_id IN (SELECT po_id FROM processed_po_ids)
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM temp_purchase_orders tp
|
||||
WHERE purchase_orders.po_id = tp.po_id AND purchase_orders.pid = tp.pid
|
||||
)
|
||||
RETURNING po_id, pid
|
||||
)
|
||||
SELECT COUNT(*) as count FROM deleted
|
||||
`);
|
||||
|
||||
poRecordsDeleted = poDeletedResult.rows[0]?.count || 0;
|
||||
console.log(`Deleted ${poRecordsDeleted} products that were removed from purchase orders`);
|
||||
|
||||
const [poResult] = await localConnection.query(`
|
||||
INSERT INTO purchase_orders (
|
||||
po_id, vendor, date, expected_date, pid, sku, name,
|
||||
po_cost_price, status, notes, long_note,
|
||||
ordered, supplier_id, date_created, date_ordered
|
||||
)
|
||||
SELECT
|
||||
po_id,
|
||||
vendor,
|
||||
COALESCE(date, date_created, now()) as date,
|
||||
expected_date,
|
||||
pid,
|
||||
sku,
|
||||
name,
|
||||
po_cost_price,
|
||||
status,
|
||||
notes,
|
||||
long_note,
|
||||
ordered,
|
||||
supplier_id,
|
||||
date_created,
|
||||
date_ordered
|
||||
FROM temp_purchase_orders
|
||||
ON CONFLICT (po_id, pid) DO UPDATE SET
|
||||
vendor = EXCLUDED.vendor,
|
||||
date = EXCLUDED.date,
|
||||
expected_date = EXCLUDED.expected_date,
|
||||
sku = EXCLUDED.sku,
|
||||
name = EXCLUDED.name,
|
||||
po_cost_price = EXCLUDED.po_cost_price,
|
||||
status = EXCLUDED.status,
|
||||
notes = EXCLUDED.notes,
|
||||
long_note = EXCLUDED.long_note,
|
||||
ordered = EXCLUDED.ordered,
|
||||
supplier_id = EXCLUDED.supplier_id,
|
||||
date_created = EXCLUDED.date_created,
|
||||
date_ordered = EXCLUDED.date_ordered,
|
||||
updated = CURRENT_TIMESTAMP
|
||||
WHERE -- Only update if at least one key field has changed
|
||||
purchase_orders.ordered IS DISTINCT FROM EXCLUDED.ordered OR
|
||||
purchase_orders.po_cost_price IS DISTINCT FROM EXCLUDED.po_cost_price OR
|
||||
purchase_orders.status IS DISTINCT FROM EXCLUDED.status OR
|
||||
purchase_orders.expected_date IS DISTINCT FROM EXCLUDED.expected_date OR
|
||||
purchase_orders.date IS DISTINCT FROM EXCLUDED.date OR
|
||||
purchase_orders.vendor IS DISTINCT FROM EXCLUDED.vendor
|
||||
RETURNING (xmax = 0) as inserted
|
||||
`);
|
||||
|
||||
poRecordsAdded = poResult.rows.filter(r => r.inserted).length;
|
||||
poRecordsUpdated = poResult.rows.filter(r => !r.inserted).length;
|
||||
|
||||
// 4. Insert final receiving records to the actual table
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Purchase orders import",
|
||||
message: "Inserting final receiving records"
|
||||
});
|
||||
|
||||
// Create a temp table to track receiving IDs being processed
|
||||
await localConnection.query(`
|
||||
DROP TABLE IF EXISTS processed_receiving_ids;
|
||||
CREATE TEMP TABLE processed_receiving_ids AS (
|
||||
SELECT DISTINCT receiving_id FROM temp_receivings
|
||||
);
|
||||
`);
|
||||
|
||||
// Delete products that were removed from receivings and count them
|
||||
const [receivingDeletedResult] = await localConnection.query(`
|
||||
WITH deleted AS (
|
||||
DELETE FROM receivings
|
||||
WHERE receiving_id IN (SELECT receiving_id FROM processed_receiving_ids)
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM temp_receivings tr
|
||||
WHERE receivings.receiving_id = tr.receiving_id AND receivings.pid = tr.pid
|
||||
)
|
||||
RETURNING receiving_id, pid
|
||||
)
|
||||
SELECT COUNT(*) as count FROM deleted
|
||||
`);
|
||||
|
||||
receivingRecordsDeleted = receivingDeletedResult.rows[0]?.count || 0;
|
||||
console.log(`Deleted ${receivingRecordsDeleted} products that were removed from receivings`);
|
||||
|
||||
const [receivingsResult] = await localConnection.query(`
|
||||
INSERT INTO receivings (
|
||||
receiving_id, pid, sku, name, vendor, qty_each, qty_each_orig,
|
||||
cost_each, cost_each_orig, received_by, received_by_name,
|
||||
received_date, receiving_created_date, supplier_id, status
|
||||
)
|
||||
SELECT
|
||||
receiving_id,
|
||||
pid,
|
||||
sku,
|
||||
name,
|
||||
vendor,
|
||||
qty_each,
|
||||
qty_each_orig,
|
||||
cost_each,
|
||||
cost_each_orig,
|
||||
received_by,
|
||||
received_by_name,
|
||||
COALESCE(received_date, receiving_created_date, now()) as received_date,
|
||||
receiving_created_date,
|
||||
supplier_id,
|
||||
status
|
||||
FROM temp_receivings
|
||||
ON CONFLICT (receiving_id, pid) DO UPDATE SET
|
||||
sku = EXCLUDED.sku,
|
||||
name = EXCLUDED.name,
|
||||
vendor = EXCLUDED.vendor,
|
||||
qty_each = EXCLUDED.qty_each,
|
||||
qty_each_orig = EXCLUDED.qty_each_orig,
|
||||
cost_each = EXCLUDED.cost_each,
|
||||
cost_each_orig = EXCLUDED.cost_each_orig,
|
||||
received_by = EXCLUDED.received_by,
|
||||
received_by_name = EXCLUDED.received_by_name,
|
||||
received_date = EXCLUDED.received_date,
|
||||
receiving_created_date = EXCLUDED.receiving_created_date,
|
||||
supplier_id = EXCLUDED.supplier_id,
|
||||
status = EXCLUDED.status,
|
||||
updated = CURRENT_TIMESTAMP
|
||||
WHERE -- Only update if at least one key field has changed
|
||||
receivings.qty_each IS DISTINCT FROM EXCLUDED.qty_each OR
|
||||
receivings.cost_each IS DISTINCT FROM EXCLUDED.cost_each OR
|
||||
receivings.status IS DISTINCT FROM EXCLUDED.status OR
|
||||
receivings.received_date IS DISTINCT FROM EXCLUDED.received_date OR
|
||||
receivings.received_by IS DISTINCT FROM EXCLUDED.received_by
|
||||
RETURNING (xmax = 0) as inserted
|
||||
`);
|
||||
|
||||
receivingRecordsAdded = receivingsResult.rows.filter(r => r.inserted).length;
|
||||
receivingRecordsUpdated = receivingsResult.rows.filter(r => !r.inserted).length;
|
||||
|
||||
// Update sync status
|
||||
await localConnection.query(`
|
||||
INSERT INTO sync_status (table_name, last_sync_timestamp)
|
||||
VALUES ('purchase_orders', NOW())
|
||||
ON CONFLICT (table_name) DO UPDATE SET
|
||||
last_sync_timestamp = NOW()
|
||||
`);
|
||||
|
||||
// Clean up temporary tables
|
||||
await localConnection.query(`
|
||||
DROP TABLE IF EXISTS temp_purchase_orders;
|
||||
DROP TABLE IF EXISTS temp_receivings;
|
||||
DROP TABLE IF EXISTS employee_names;
|
||||
DROP TABLE IF EXISTS temp_supplier_names;
|
||||
DROP TABLE IF EXISTS temp_invalid_pids;
|
||||
DROP TABLE IF EXISTS processed_po_ids;
|
||||
DROP TABLE IF EXISTS processed_receiving_ids;
|
||||
`);
|
||||
|
||||
// Commit transaction
|
||||
await localConnection.commit();
|
||||
|
||||
return {
|
||||
status: "complete",
|
||||
recordsAdded: poRecordsAdded + receivingRecordsAdded,
|
||||
recordsUpdated: poRecordsUpdated + receivingRecordsUpdated,
|
||||
recordsDeleted: poRecordsDeleted + receivingRecordsDeleted,
|
||||
poRecordsAdded,
|
||||
poRecordsUpdated,
|
||||
poRecordsDeleted,
|
||||
receivingRecordsAdded,
|
||||
receivingRecordsUpdated,
|
||||
receivingRecordsDeleted,
|
||||
totalRecords: totalProcessed
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("Error during purchase orders import:", error);
|
||||
|
||||
// Rollback transaction
|
||||
try {
|
||||
await localConnection.rollback();
|
||||
} catch (rollbackError) {
|
||||
console.error('Error during rollback:', rollbackError.message);
|
||||
}
|
||||
|
||||
return {
|
||||
status: "error",
|
||||
error: error.message,
|
||||
recordsAdded: 0,
|
||||
recordsUpdated: 0,
|
||||
recordsDeleted: 0,
|
||||
totalRecords: 0
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = importPurchaseOrders;
|
||||
@@ -1,156 +0,0 @@
|
||||
const mysql = require("mysql2/promise");
|
||||
const { Client } = require("ssh2");
|
||||
const { Pool } = require('pg');
|
||||
const dotenv = require("dotenv");
|
||||
const path = require("path");
|
||||
|
||||
// Helper function to setup SSH tunnel
|
||||
async function setupSshTunnel(sshConfig) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const ssh = new Client();
|
||||
|
||||
ssh.on('error', (err) => {
|
||||
console.error('SSH connection error:', err);
|
||||
});
|
||||
|
||||
ssh.on('end', () => {
|
||||
console.log('SSH connection ended normally');
|
||||
});
|
||||
|
||||
ssh.on('close', () => {
|
||||
console.log('SSH connection closed');
|
||||
});
|
||||
|
||||
ssh
|
||||
.on("ready", () => {
|
||||
ssh.forwardOut(
|
||||
"127.0.0.1",
|
||||
0,
|
||||
sshConfig.prodDbConfig.host,
|
||||
sshConfig.prodDbConfig.port,
|
||||
async (err, stream) => {
|
||||
if (err) reject(err);
|
||||
resolve({ ssh, stream });
|
||||
}
|
||||
);
|
||||
})
|
||||
.connect(sshConfig.ssh);
|
||||
});
|
||||
}
|
||||
|
||||
// Helper function to setup database connections
|
||||
async function setupConnections(sshConfig) {
|
||||
const tunnel = await setupSshTunnel(sshConfig);
|
||||
|
||||
// Setup MySQL connection for production
|
||||
const prodConnection = await mysql.createConnection({
|
||||
...sshConfig.prodDbConfig,
|
||||
stream: tunnel.stream,
|
||||
});
|
||||
|
||||
// Setup PostgreSQL connection pool for local
|
||||
const localPool = new Pool(sshConfig.localDbConfig);
|
||||
|
||||
// Test the PostgreSQL connection
|
||||
try {
|
||||
const client = await localPool.connect();
|
||||
await client.query('SELECT NOW()');
|
||||
client.release();
|
||||
console.log('PostgreSQL connection successful');
|
||||
} catch (err) {
|
||||
console.error('PostgreSQL connection error:', err);
|
||||
throw err;
|
||||
}
|
||||
|
||||
// Create a wrapper for the PostgreSQL pool to match MySQL interface
|
||||
const localConnection = {
|
||||
_client: null,
|
||||
_transactionActive: false,
|
||||
|
||||
query: async (text, params) => {
|
||||
// If we're not in a transaction, use the pool directly
|
||||
if (!localConnection._transactionActive) {
|
||||
const client = await localPool.connect();
|
||||
try {
|
||||
const result = await client.query(text, params);
|
||||
return [result];
|
||||
} finally {
|
||||
client.release();
|
||||
}
|
||||
}
|
||||
|
||||
// If we're in a transaction, use the dedicated client
|
||||
if (!localConnection._client) {
|
||||
throw new Error('No active transaction client');
|
||||
}
|
||||
const result = await localConnection._client.query(text, params);
|
||||
return [result];
|
||||
},
|
||||
|
||||
beginTransaction: async () => {
|
||||
if (localConnection._transactionActive) {
|
||||
throw new Error('Transaction already active');
|
||||
}
|
||||
localConnection._client = await localPool.connect();
|
||||
await localConnection._client.query('BEGIN');
|
||||
localConnection._transactionActive = true;
|
||||
},
|
||||
|
||||
commit: async () => {
|
||||
if (!localConnection._transactionActive) {
|
||||
throw new Error('No active transaction to commit');
|
||||
}
|
||||
await localConnection._client.query('COMMIT');
|
||||
localConnection._client.release();
|
||||
localConnection._client = null;
|
||||
localConnection._transactionActive = false;
|
||||
},
|
||||
|
||||
rollback: async () => {
|
||||
if (!localConnection._transactionActive) {
|
||||
throw new Error('No active transaction to rollback');
|
||||
}
|
||||
await localConnection._client.query('ROLLBACK');
|
||||
localConnection._client.release();
|
||||
localConnection._client = null;
|
||||
localConnection._transactionActive = false;
|
||||
},
|
||||
|
||||
end: async () => {
|
||||
if (localConnection._client) {
|
||||
localConnection._client.release();
|
||||
localConnection._client = null;
|
||||
}
|
||||
await localPool.end();
|
||||
}
|
||||
};
|
||||
|
||||
return { prodConnection, localConnection, tunnel };
|
||||
}
|
||||
|
||||
// Helper function to close connections
|
||||
async function closeConnections(connections) {
|
||||
const { ssh, prodConnection, localConnection } = connections;
|
||||
|
||||
try {
|
||||
if (prodConnection) await prodConnection.end();
|
||||
if (localConnection) await localConnection.end();
|
||||
|
||||
// Wait a bit for any pending data to be written before closing SSH
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
|
||||
if (ssh) {
|
||||
ssh.on('close', () => {
|
||||
console.log('SSH connection closed cleanly');
|
||||
});
|
||||
ssh.end();
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Error during cleanup:', err);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
setupConnections,
|
||||
closeConnections
|
||||
};
|
||||
@@ -1,444 +0,0 @@
|
||||
-- Description: Performs the first population OR full recalculation of the product_metrics table based on
|
||||
-- historically backfilled daily_product_snapshots and current product/PO data.
|
||||
-- Calculates all metrics considering the full available history up to 'yesterday'.
|
||||
-- Run ONCE after backfill_historical_snapshots_final.sql completes successfully.
|
||||
-- Dependencies: Core import tables (products, purchase_orders, receivings), daily_product_snapshots (historically populated),
|
||||
-- configuration tables (settings_*), product_metrics table must exist.
|
||||
-- Frequency: Run ONCE.
|
||||
DO $$
|
||||
DECLARE
|
||||
_module_name VARCHAR := 'product_metrics_population'; -- Generic name
|
||||
_start_time TIMESTAMPTZ := clock_timestamp();
|
||||
-- Calculate metrics AS OF the end of the last fully completed day
|
||||
_calculation_date DATE := CURRENT_DATE - INTERVAL '1 day';
|
||||
BEGIN
|
||||
RAISE NOTICE 'Running % module. Calculating AS OF: %. Start Time: %', _module_name, _calculation_date, _start_time;
|
||||
|
||||
-- Optional: Consider TRUNCATE if you want a completely fresh start,
|
||||
-- otherwise ON CONFLICT will update existing rows if this is rerun.
|
||||
-- TRUNCATE TABLE public.product_metrics;
|
||||
RAISE NOTICE 'Populating product_metrics table. This may take some time...';
|
||||
|
||||
-- CTEs to gather necessary information AS OF _calculation_date
|
||||
WITH CurrentInfo AS (
|
||||
-- Fetches current product details, including costs/prices used for forecasting & fallbacks
|
||||
SELECT
|
||||
p.pid, p.sku, p.title, p.brand, p.vendor, COALESCE(p.image_175, p.image) as image_url,
|
||||
p.visible as is_visible, p.replenishable,
|
||||
COALESCE(p.price, 0.00) as current_price, COALESCE(p.regular_price, 0.00) as current_regular_price,
|
||||
COALESCE(p.cost_price, 0.00) as current_cost_price,
|
||||
COALESCE(p.landing_cost_price, p.cost_price, 0.00) as current_effective_cost, -- Use landing if available, else cost
|
||||
p.stock_quantity as current_stock, -- Use actual current stock for forecast base
|
||||
p.created_at, p.first_received, p.date_last_sold,
|
||||
p.moq,
|
||||
p.uom,
|
||||
p.total_sold as historical_total_sold -- Add historical total_sold from products table
|
||||
FROM public.products p
|
||||
),
|
||||
OnOrderInfo AS (
|
||||
-- Calculates current on-order quantities and costs
|
||||
SELECT
|
||||
pid,
|
||||
SUM(ordered) AS on_order_qty,
|
||||
SUM(ordered * po_cost_price) AS on_order_cost,
|
||||
MIN(expected_date) AS earliest_expected_date
|
||||
FROM public.purchase_orders
|
||||
-- Use the most common statuses representing active, unfulfilled POs
|
||||
WHERE status IN ('created', 'ordered', 'preordered', 'electronically_sent', 'electronically_ready_send', 'receiving_started')
|
||||
AND status NOT IN ('canceled', 'done')
|
||||
GROUP BY pid
|
||||
),
|
||||
HistoricalDates AS (
|
||||
-- Determines key historical dates from orders and receivings
|
||||
SELECT
|
||||
p.pid,
|
||||
MIN(o.date)::date AS date_first_sold,
|
||||
MAX(o.date)::date AS max_order_date, -- Used as fallback for date_last_sold
|
||||
MIN(r.received_date)::date AS date_first_received_calc,
|
||||
MAX(r.received_date)::date AS date_last_received_calc
|
||||
FROM public.products p
|
||||
LEFT JOIN public.orders o ON p.pid = o.pid AND o.quantity > 0 AND o.status NOT IN ('canceled', 'returned')
|
||||
LEFT JOIN public.receivings r ON p.pid = r.pid
|
||||
GROUP BY p.pid
|
||||
),
|
||||
SnapshotAggregates AS (
|
||||
-- Aggregates metrics from historical snapshots up to the _calculation_date
|
||||
SELECT
|
||||
pid,
|
||||
-- Rolling periods relative to _calculation_date
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '6 days' AND _calculation_date THEN units_sold ELSE 0 END) AS sales_7d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '6 days' AND _calculation_date THEN net_revenue ELSE 0 END) AS revenue_7d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '13 days' AND _calculation_date THEN units_sold ELSE 0 END) AS sales_14d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '13 days' AND _calculation_date THEN net_revenue ELSE 0 END) AS revenue_14d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN units_sold ELSE 0 END) AS sales_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN net_revenue ELSE 0 END) AS revenue_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN cogs ELSE 0 END) AS cogs_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN profit ELSE 0 END) AS profit_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN units_returned ELSE 0 END) AS returns_units_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN returns_revenue ELSE 0 END) AS returns_revenue_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN discounts ELSE 0 END) AS discounts_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN gross_revenue ELSE 0 END) AS gross_revenue_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN gross_regular_revenue ELSE 0 END) AS gross_regular_revenue_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date AND stockout_flag THEN 1 ELSE 0 END) AS stockout_days_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '364 days' AND _calculation_date THEN units_sold ELSE 0 END) AS sales_365d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '364 days' AND _calculation_date THEN net_revenue ELSE 0 END) AS revenue_365d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN units_received ELSE 0 END) AS received_qty_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN cost_received ELSE 0 END) AS received_cost_30d,
|
||||
|
||||
-- Averages over the last 30 days ending _calculation_date
|
||||
AVG(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN eod_stock_quantity END) AS avg_stock_units_30d,
|
||||
AVG(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN eod_stock_cost END) AS avg_stock_cost_30d,
|
||||
AVG(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN eod_stock_retail END) AS avg_stock_retail_30d,
|
||||
AVG(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN eod_stock_gross END) AS avg_stock_gross_30d,
|
||||
|
||||
-- Lifetime (Using historical total from products table)
|
||||
(SELECT total_sold FROM public.products WHERE public.products.pid = daily_product_snapshots.pid) AS lifetime_sales,
|
||||
COALESCE(
|
||||
-- Option 1: Use 30-day average price if available
|
||||
CASE WHEN SUM(CASE WHEN snapshot_date >= _calculation_date - INTERVAL '29 days' AND snapshot_date <= _calculation_date THEN units_sold ELSE 0 END) > 0 THEN
|
||||
(SELECT total_sold FROM public.products WHERE public.products.pid = daily_product_snapshots.pid) * (
|
||||
SUM(CASE WHEN snapshot_date >= _calculation_date - INTERVAL '29 days' AND snapshot_date <= _calculation_date THEN net_revenue ELSE 0 END) /
|
||||
NULLIF(SUM(CASE WHEN snapshot_date >= _calculation_date - INTERVAL '29 days' AND snapshot_date <= _calculation_date THEN units_sold ELSE 0 END), 0)
|
||||
)
|
||||
ELSE NULL END,
|
||||
-- Option 2: Try 365-day average price if available
|
||||
CASE WHEN SUM(CASE WHEN snapshot_date >= _calculation_date - INTERVAL '364 days' AND snapshot_date <= _calculation_date THEN units_sold ELSE 0 END) > 0 THEN
|
||||
(SELECT total_sold FROM public.products WHERE public.products.pid = daily_product_snapshots.pid) * (
|
||||
SUM(CASE WHEN snapshot_date >= _calculation_date - INTERVAL '364 days' AND snapshot_date <= _calculation_date THEN net_revenue ELSE 0 END) /
|
||||
NULLIF(SUM(CASE WHEN snapshot_date >= _calculation_date - INTERVAL '364 days' AND snapshot_date <= _calculation_date THEN units_sold ELSE 0 END), 0)
|
||||
)
|
||||
ELSE NULL END,
|
||||
-- Option 3: Use current price from products table
|
||||
(SELECT total_sold * price FROM public.products WHERE public.products.pid = daily_product_snapshots.pid),
|
||||
-- Option 4: Use regular price if current price might be zero
|
||||
(SELECT total_sold * regular_price FROM public.products WHERE public.products.pid = daily_product_snapshots.pid),
|
||||
-- Final fallback: Use accumulated revenue (less accurate for old products)
|
||||
SUM(net_revenue)
|
||||
) AS lifetime_revenue,
|
||||
|
||||
-- Yesterday (Sales for the specific _calculation_date)
|
||||
SUM(CASE WHEN snapshot_date = _calculation_date THEN units_sold ELSE 0 END) as yesterday_sales
|
||||
|
||||
FROM public.daily_product_snapshots
|
||||
WHERE snapshot_date <= _calculation_date -- Ensure we only use data up to the calculation point
|
||||
GROUP BY pid
|
||||
),
|
||||
FirstPeriodMetrics AS (
|
||||
-- Calculates sales/revenue for first X days after first sale date
|
||||
-- Uses HistoricalDates CTE to get the first sale date
|
||||
SELECT
|
||||
pid, date_first_sold,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '6 days' THEN units_sold ELSE 0 END) AS first_7_days_sales,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '6 days' THEN net_revenue ELSE 0 END) AS first_7_days_revenue,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '29 days' THEN units_sold ELSE 0 END) AS first_30_days_sales,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '29 days' THEN net_revenue ELSE 0 END) AS first_30_days_revenue,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '59 days' THEN units_sold ELSE 0 END) AS first_60_days_sales,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '59 days' THEN net_revenue ELSE 0 END) AS first_60_days_revenue,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '89 days' THEN units_sold ELSE 0 END) AS first_90_days_sales,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '89 days' THEN net_revenue ELSE 0 END) AS first_90_days_revenue
|
||||
FROM public.daily_product_snapshots ds
|
||||
JOIN HistoricalDates hd USING(pid)
|
||||
WHERE date_first_sold IS NOT NULL
|
||||
AND snapshot_date >= date_first_sold -- Only consider snapshots after first sale
|
||||
AND snapshot_date <= _calculation_date -- Only up to the overall calculation date
|
||||
GROUP BY pid, date_first_sold
|
||||
),
|
||||
Settings AS (
|
||||
-- Fetches effective configuration settings (Product > Vendor > Global)
|
||||
SELECT
|
||||
p.pid,
|
||||
COALESCE(sp.lead_time_days, sv.default_lead_time_days, (SELECT setting_value FROM settings_global WHERE setting_key = 'default_lead_time_days')::int, 14) AS effective_lead_time,
|
||||
COALESCE(sp.days_of_stock, sv.default_days_of_stock, (SELECT setting_value FROM settings_global WHERE setting_key = 'default_days_of_stock')::int, 30) AS effective_days_of_stock,
|
||||
COALESCE(sp.safety_stock, (SELECT setting_value::int FROM settings_global WHERE setting_key = 'default_safety_stock_units'), 0) AS effective_safety_stock,
|
||||
COALESCE(sp.exclude_from_forecast, FALSE) AS exclude_forecast
|
||||
FROM public.products p
|
||||
LEFT JOIN public.settings_product sp ON p.pid = sp.pid
|
||||
LEFT JOIN public.settings_vendor sv ON p.vendor = sv.vendor
|
||||
),
|
||||
AvgLeadTime AS (
|
||||
-- Calculate Average Lead Time by joining purchase_orders with receivings
|
||||
SELECT
|
||||
po.pid,
|
||||
AVG(GREATEST(1,
|
||||
CASE
|
||||
WHEN r.received_date IS NOT NULL AND po.date IS NOT NULL
|
||||
THEN (r.received_date::date - po.date::date)
|
||||
ELSE 1
|
||||
END
|
||||
))::int AS avg_lead_time_days_calc
|
||||
FROM public.purchase_orders po
|
||||
JOIN public.receivings r ON r.pid = po.pid
|
||||
WHERE po.status = 'done' -- Completed POs
|
||||
AND r.received_date IS NOT NULL
|
||||
AND po.date IS NOT NULL
|
||||
AND r.received_date >= po.date
|
||||
GROUP BY po.pid
|
||||
),
|
||||
RankedForABC AS (
|
||||
-- Ranks products based on the configured ABC metric (using historical data)
|
||||
SELECT
|
||||
p.pid,
|
||||
CASE COALESCE((SELECT setting_value FROM settings_global WHERE setting_key = 'abc_calculation_basis'), 'revenue_30d')
|
||||
WHEN 'sales_30d' THEN COALESCE(sa.sales_30d, 0)
|
||||
WHEN 'lifetime_revenue' THEN COALESCE(sa.lifetime_revenue, 0)::numeric
|
||||
ELSE COALESCE(sa.revenue_30d, 0) -- Default to revenue_30d
|
||||
END AS metric_value
|
||||
FROM public.products p -- Use products as the base
|
||||
JOIN SnapshotAggregates sa ON p.pid = sa.pid
|
||||
WHERE p.replenishable = TRUE -- Only rank replenishable products
|
||||
AND (CASE COALESCE((SELECT setting_value FROM settings_global WHERE setting_key = 'abc_calculation_basis'), 'revenue_30d')
|
||||
WHEN 'sales_30d' THEN COALESCE(sa.sales_30d, 0)
|
||||
WHEN 'lifetime_revenue' THEN COALESCE(sa.lifetime_revenue, 0)::numeric
|
||||
ELSE COALESCE(sa.revenue_30d, 0)
|
||||
END) > 0 -- Only include products with non-zero contribution
|
||||
),
|
||||
CumulativeABC AS (
|
||||
-- Calculates cumulative metric values for ABC ranking
|
||||
SELECT
|
||||
pid, metric_value,
|
||||
SUM(metric_value) OVER (ORDER BY metric_value DESC NULLS LAST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) as cumulative_metric,
|
||||
SUM(metric_value) OVER () as total_metric
|
||||
FROM RankedForABC
|
||||
),
|
||||
FinalABC AS (
|
||||
-- Assigns A, B, or C class based on thresholds
|
||||
SELECT
|
||||
pid,
|
||||
CASE
|
||||
WHEN cumulative_metric / NULLIF(total_metric, 0) <= COALESCE((SELECT setting_value::numeric FROM settings_global WHERE setting_key = 'abc_revenue_threshold_a'), 0.8) THEN 'A'::char(1)
|
||||
WHEN cumulative_metric / NULLIF(total_metric, 0) <= COALESCE((SELECT setting_value::numeric FROM settings_global WHERE setting_key = 'abc_revenue_threshold_b'), 0.95) THEN 'B'::char(1)
|
||||
ELSE 'C'::char(1)
|
||||
END AS abc_class_calc
|
||||
FROM CumulativeABC
|
||||
)
|
||||
-- Final INSERT/UPDATE statement using all the prepared CTEs
|
||||
INSERT INTO public.product_metrics (
|
||||
pid, last_calculated, sku, title, brand, vendor, image_url, is_visible, is_replenishable,
|
||||
current_price, current_regular_price, current_cost_price, current_landing_cost_price,
|
||||
current_stock, current_stock_cost, current_stock_retail, current_stock_gross,
|
||||
on_order_qty, on_order_cost, on_order_retail, earliest_expected_date,
|
||||
date_created, date_first_received, date_last_received, date_first_sold, date_last_sold, age_days,
|
||||
sales_7d, revenue_7d, sales_14d, revenue_14d, sales_30d, revenue_30d, cogs_30d, profit_30d,
|
||||
returns_units_30d, returns_revenue_30d, discounts_30d, gross_revenue_30d, gross_regular_revenue_30d,
|
||||
stockout_days_30d, sales_365d, revenue_365d,
|
||||
avg_stock_units_30d, avg_stock_cost_30d, avg_stock_retail_30d, avg_stock_gross_30d,
|
||||
received_qty_30d, received_cost_30d,
|
||||
lifetime_sales, lifetime_revenue,
|
||||
first_7_days_sales, first_7_days_revenue, first_30_days_sales, first_30_days_revenue,
|
||||
first_60_days_sales, first_60_days_revenue, first_90_days_sales, first_90_days_revenue,
|
||||
asp_30d, acp_30d, avg_ros_30d, avg_sales_per_day_30d,
|
||||
margin_30d, markup_30d, gmroi_30d, stockturn_30d, return_rate_30d, discount_rate_30d,
|
||||
stockout_rate_30d, markdown_30d, markdown_rate_30d, sell_through_30d,
|
||||
avg_lead_time_days, abc_class,
|
||||
sales_velocity_daily, config_lead_time, config_days_of_stock, config_safety_stock,
|
||||
planning_period_days, lead_time_forecast_units, days_of_stock_forecast_units,
|
||||
planning_period_forecast_units, lead_time_closing_stock, days_of_stock_closing_stock,
|
||||
replenishment_needed_raw, replenishment_units, replenishment_cost, replenishment_retail, replenishment_profit,
|
||||
to_order_units, forecast_lost_sales_units, forecast_lost_revenue,
|
||||
stock_cover_in_days, po_cover_in_days, sells_out_in_days, replenish_date,
|
||||
overstocked_units, overstocked_cost, overstocked_retail, is_old_stock,
|
||||
yesterday_sales
|
||||
)
|
||||
SELECT
|
||||
-- Select columns in order, joining all CTEs by pid
|
||||
ci.pid, _start_time, ci.sku, ci.title, ci.brand, ci.vendor, ci.image_url, ci.is_visible, ci.replenishable,
|
||||
ci.current_price, ci.current_regular_price, ci.current_cost_price, ci.current_effective_cost,
|
||||
ci.current_stock, (ci.current_stock * COALESCE(ci.current_effective_cost, 0.00))::numeric(12,2), (ci.current_stock * COALESCE(ci.current_price, 0.00))::numeric(12,2), (ci.current_stock * COALESCE(ci.current_regular_price, 0.00))::numeric(12,2),
|
||||
COALESCE(ooi.on_order_qty, 0), COALESCE(ooi.on_order_cost, 0.00)::numeric(12,2), (COALESCE(ooi.on_order_qty, 0) * COALESCE(ci.current_price, 0.00))::numeric(12,2), ooi.earliest_expected_date,
|
||||
|
||||
-- Fix type issue with date calculation - properly cast timestamps to dates before arithmetic
|
||||
ci.created_at::date,
|
||||
COALESCE(ci.first_received::date, hd.date_first_received_calc),
|
||||
hd.date_last_received_calc,
|
||||
hd.date_first_sold,
|
||||
COALESCE(ci.date_last_sold, hd.max_order_date),
|
||||
-- Fix timestamp + integer error by ensuring we work only with dates
|
||||
CASE
|
||||
WHEN LEAST(ci.created_at::date, COALESCE(hd.date_first_sold, ci.created_at::date)) IS NOT NULL
|
||||
THEN (_calculation_date::date - LEAST(ci.created_at::date, COALESCE(hd.date_first_sold, ci.created_at::date)))::int
|
||||
ELSE NULL
|
||||
END,
|
||||
|
||||
COALESCE(sa.sales_7d, 0), COALESCE(sa.revenue_7d, 0), COALESCE(sa.sales_14d, 0), COALESCE(sa.revenue_14d, 0), COALESCE(sa.sales_30d, 0), COALESCE(sa.revenue_30d, 0), COALESCE(sa.cogs_30d, 0), COALESCE(sa.profit_30d, 0),
|
||||
COALESCE(sa.returns_units_30d, 0), COALESCE(sa.returns_revenue_30d, 0), COALESCE(sa.discounts_30d, 0), COALESCE(sa.gross_revenue_30d, 0), COALESCE(sa.gross_regular_revenue_30d, 0),
|
||||
COALESCE(sa.stockout_days_30d, 0), COALESCE(sa.sales_365d, 0), COALESCE(sa.revenue_365d, 0),
|
||||
sa.avg_stock_units_30d, sa.avg_stock_cost_30d, sa.avg_stock_retail_30d, sa.avg_stock_gross_30d, -- Averages can be NULL if no data
|
||||
COALESCE(sa.received_qty_30d, 0), COALESCE(sa.received_cost_30d, 0),
|
||||
COALESCE(sa.lifetime_sales, 0), COALESCE(sa.lifetime_revenue, 0),
|
||||
fpm.first_7_days_sales, fpm.first_7_days_revenue, fpm.first_30_days_sales, fpm.first_30_days_revenue,
|
||||
fpm.first_60_days_sales, fpm.first_60_days_revenue, fpm.first_90_days_sales, fpm.first_90_days_revenue,
|
||||
|
||||
-- Calculated KPIs (using COALESCE on inputs where appropriate)
|
||||
sa.revenue_30d / NULLIF(sa.sales_30d, 0) AS asp_30d,
|
||||
sa.cogs_30d / NULLIF(sa.sales_30d, 0) AS acp_30d,
|
||||
sa.profit_30d / NULLIF(sa.sales_30d, 0) AS avg_ros_30d,
|
||||
COALESCE(sa.sales_30d, 0) / 30.0 AS avg_sales_per_day_30d,
|
||||
|
||||
-- Fix for percentages - cast to numeric with appropriate precision
|
||||
((sa.profit_30d / NULLIF(sa.revenue_30d, 0)) * 100)::numeric(8,2) AS margin_30d,
|
||||
((sa.profit_30d / NULLIF(sa.cogs_30d, 0)) * 100)::numeric(8,2) AS markup_30d,
|
||||
sa.profit_30d / NULLIF(sa.avg_stock_cost_30d, 0) AS gmroi_30d,
|
||||
sa.sales_30d / NULLIF(sa.avg_stock_units_30d, 0) AS stockturn_30d,
|
||||
((sa.returns_units_30d / NULLIF(COALESCE(sa.sales_30d, 0) + COALESCE(sa.returns_units_30d, 0), 0)) * 100)::numeric(8,2) AS return_rate_30d,
|
||||
((sa.discounts_30d / NULLIF(sa.gross_revenue_30d, 0)) * 100)::numeric(8,2) AS discount_rate_30d,
|
||||
((COALESCE(sa.stockout_days_30d, 0) / 30.0) * 100)::numeric(8,2) AS stockout_rate_30d,
|
||||
GREATEST(0, sa.gross_regular_revenue_30d - sa.gross_revenue_30d) AS markdown_30d, -- Ensure markdown isn't negative
|
||||
((GREATEST(0, sa.gross_regular_revenue_30d - sa.gross_revenue_30d) / NULLIF(sa.gross_regular_revenue_30d, 0)) * 100)::numeric(8,2) AS markdown_rate_30d,
|
||||
-- Sell Through Rate: Sales / (Stock at end of period + Sales). This is one definition proxying for Sales / Beginning Stock.
|
||||
((sa.sales_30d / NULLIF(
|
||||
(SELECT eod_stock_quantity FROM daily_product_snapshots WHERE snapshot_date = _calculation_date AND pid = ci.pid LIMIT 1) + COALESCE(sa.sales_30d, 0)
|
||||
, 0)) * 100)::numeric(8,2) AS sell_through_30d,
|
||||
|
||||
-- Use calculated periodic metrics
|
||||
alt.avg_lead_time_days_calc,
|
||||
CASE
|
||||
WHEN ci.replenishable = FALSE THEN NULL -- Non-replenishable don't get a class
|
||||
ELSE COALESCE(fa.abc_class_calc, 'C') -- Default ranked replenishable but non-contributing to C
|
||||
END,
|
||||
|
||||
-- Forecasting intermediate values (based on historical aggregates ending _calculation_date)
|
||||
(COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) AS sales_velocity_daily, -- Ensure divisor > 0
|
||||
s.effective_lead_time AS config_lead_time, s.effective_days_of_stock AS config_days_of_stock, s.effective_safety_stock AS config_safety_stock,
|
||||
(s.effective_lead_time + s.effective_days_of_stock) AS planning_period_days,
|
||||
-- Calculate raw forecast need components (using safe velocity)
|
||||
(COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time AS lead_time_forecast_units,
|
||||
(COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock AS days_of_stock_forecast_units,
|
||||
-- Planning period forecast units (sum of lead time and DOS units)
|
||||
CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock) AS planning_period_forecast_units,
|
||||
-- Closing stock calculations (using raw forecast components for accuracy before rounding)
|
||||
(ci.current_stock + COALESCE(ooi.on_order_qty, 0) - ((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)) AS lead_time_closing_stock,
|
||||
((ci.current_stock + COALESCE(ooi.on_order_qty, 0) - ((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)))
|
||||
- ((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock) AS days_of_stock_closing_stock,
|
||||
-- Raw replenishment needed
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time) -- Use rounded forecast units
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0) AS replenishment_needed_raw,
|
||||
|
||||
-- Final Forecasting Metrics
|
||||
-- Replenishment Units (calculated need, before MOQ)
|
||||
CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
))::int AS replenishment_units,
|
||||
-- Replenishment Cost/Retail/Profit (based on replenishment_units)
|
||||
(CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
))::int) * COALESCE(ci.current_effective_cost, 0.00)::numeric(12,2) AS replenishment_cost,
|
||||
(CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
))::int) * COALESCE(ci.current_price, 0.00)::numeric(12,2) AS replenishment_retail,
|
||||
(CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
))::int) * (COALESCE(ci.current_price, 0.00) - COALESCE(ci.current_effective_cost, 0.00))::numeric(12,2) AS replenishment_profit,
|
||||
|
||||
-- *** FIX: To Order Units (Apply MOQ rounding) ***
|
||||
CASE
|
||||
WHEN COALESCE(ci.moq, 0) <= 1 THEN -- Treat no/invalid MOQ or MOQ=1 as no rounding needed
|
||||
CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
))::int
|
||||
ELSE -- Apply MOQ rounding: Round UP to nearest multiple of MOQ
|
||||
(CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
) / NULLIF(ci.moq::numeric, 0)) * COALESCE(ci.moq, 1))::int
|
||||
END AS to_order_units,
|
||||
|
||||
-- Forecast Lost Sales (Units occurring during lead time if current+on_order is insufficient)
|
||||
CEILING(GREATEST(0,
|
||||
((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time) -- Demand during lead time
|
||||
- (ci.current_stock + COALESCE(ooi.on_order_qty, 0)) -- Supply available before order arrives
|
||||
))::int AS forecast_lost_sales_units,
|
||||
-- Forecast Lost Revenue
|
||||
(CEILING(GREATEST(0,
|
||||
((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
- (ci.current_stock + COALESCE(ooi.on_order_qty, 0))
|
||||
))::int) * COALESCE(ci.current_price, 0.00)::numeric(12,2) AS forecast_lost_revenue,
|
||||
|
||||
-- Stock Cover etc (using safe velocity)
|
||||
ci.current_stock / NULLIF((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)), 0) AS stock_cover_in_days,
|
||||
COALESCE(ooi.on_order_qty, 0) / NULLIF((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)), 0) AS po_cover_in_days,
|
||||
(ci.current_stock + COALESCE(ooi.on_order_qty, 0)) / NULLIF((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)), 0) AS sells_out_in_days,
|
||||
-- Replenish Date (Project forward from 'today', which is _calculation_date + 1 day)
|
||||
CASE
|
||||
WHEN (COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) > 0 -- Check for positive velocity
|
||||
THEN
|
||||
_calculation_date + INTERVAL '1 day' -- Today
|
||||
+ FLOOR(GREATEST(0, ci.current_stock - s.effective_safety_stock) -- Stock above safety
|
||||
/ (COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) -- divided by velocity
|
||||
)::integer * INTERVAL '1 day' -- Gives date safety stock is hit
|
||||
- s.effective_lead_time * INTERVAL '1 day' -- Subtract lead time
|
||||
ELSE NULL -- Cannot calculate if no sales velocity
|
||||
END AS replenish_date,
|
||||
-- Overstocked Units (Stock above safety + planning period demand)
|
||||
GREATEST(0, ci.current_stock - s.effective_safety_stock -
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time) -- Demand during lead time
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock)) -- Demand during DOS
|
||||
)::int AS overstocked_units,
|
||||
(GREATEST(0, ci.current_stock - s.effective_safety_stock -
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
)::int) * COALESCE(ci.current_effective_cost, 0.00)::numeric(12,2) AS overstocked_cost,
|
||||
(GREATEST(0, ci.current_stock - s.effective_safety_stock -
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
)::int) * COALESCE(ci.current_price, 0.00)::numeric(12,2) AS overstocked_retail,
|
||||
-- Old Stock Flag
|
||||
(ci.created_at::date < (_calculation_date - INTERVAL '60 day')::date) AND
|
||||
(COALESCE(ci.date_last_sold, hd.max_order_date) IS NULL OR COALESCE(ci.date_last_sold, hd.max_order_date) < (_calculation_date - INTERVAL '60 day')::date) AND
|
||||
(hd.date_last_received_calc IS NULL OR hd.date_last_received_calc < (_calculation_date - INTERVAL '60 day')::date) AND
|
||||
COALESCE(ooi.on_order_qty, 0) = 0 AS is_old_stock,
|
||||
COALESCE(sa.yesterday_sales, 0) -- Sales for _calculation_date
|
||||
|
||||
FROM CurrentInfo ci
|
||||
LEFT JOIN OnOrderInfo ooi ON ci.pid = ooi.pid
|
||||
LEFT JOIN HistoricalDates hd ON ci.pid = hd.pid
|
||||
LEFT JOIN SnapshotAggregates sa ON ci.pid = sa.pid
|
||||
LEFT JOIN FirstPeriodMetrics fpm ON ci.pid = fpm.pid
|
||||
LEFT JOIN Settings s ON ci.pid = s.pid
|
||||
LEFT JOIN AvgLeadTime alt ON ci.pid = alt.pid -- Join calculated avg lead time
|
||||
LEFT JOIN FinalABC fa ON ci.pid = fa.pid -- Join calculated ABC class
|
||||
WHERE s.exclude_forecast IS FALSE OR s.exclude_forecast IS NULL
|
||||
|
||||
ON CONFLICT (pid) DO UPDATE SET
|
||||
-- *** IMPORTANT: List ALL columns here, ensuring order matches INSERT list ***
|
||||
-- Update ALL columns to ensure entire row is refreshed
|
||||
last_calculated = EXCLUDED.last_calculated, sku = EXCLUDED.sku, title = EXCLUDED.title, brand = EXCLUDED.brand, vendor = EXCLUDED.vendor, image_url = EXCLUDED.image_url, is_visible = EXCLUDED.is_visible, is_replenishable = EXCLUDED.is_replenishable,
|
||||
current_price = EXCLUDED.current_price, current_regular_price = EXCLUDED.current_regular_price, current_cost_price = EXCLUDED.current_cost_price, current_landing_cost_price = EXCLUDED.current_landing_cost_price,
|
||||
current_stock = EXCLUDED.current_stock, current_stock_cost = EXCLUDED.current_stock_cost, current_stock_retail = EXCLUDED.current_stock_retail, current_stock_gross = EXCLUDED.current_stock_gross,
|
||||
on_order_qty = EXCLUDED.on_order_qty, on_order_cost = EXCLUDED.on_order_cost, on_order_retail = EXCLUDED.on_order_retail, earliest_expected_date = EXCLUDED.earliest_expected_date,
|
||||
date_created = EXCLUDED.date_created, date_first_received = EXCLUDED.date_first_received, date_last_received = EXCLUDED.date_last_received, date_first_sold = EXCLUDED.date_first_sold, date_last_sold = EXCLUDED.date_last_sold, age_days = EXCLUDED.age_days,
|
||||
sales_7d = EXCLUDED.sales_7d, revenue_7d = EXCLUDED.revenue_7d, sales_14d = EXCLUDED.sales_14d, revenue_14d = EXCLUDED.revenue_14d, sales_30d = EXCLUDED.sales_30d, revenue_30d = EXCLUDED.revenue_30d, cogs_30d = EXCLUDED.cogs_30d, profit_30d = EXCLUDED.profit_30d,
|
||||
returns_units_30d = EXCLUDED.returns_units_30d, returns_revenue_30d = EXCLUDED.returns_revenue_30d, discounts_30d = EXCLUDED.discounts_30d, gross_revenue_30d = EXCLUDED.gross_revenue_30d, gross_regular_revenue_30d = EXCLUDED.gross_regular_revenue_30d,
|
||||
stockout_days_30d = EXCLUDED.stockout_days_30d, sales_365d = EXCLUDED.sales_365d, revenue_365d = EXCLUDED.revenue_365d,
|
||||
avg_stock_units_30d = EXCLUDED.avg_stock_units_30d, avg_stock_cost_30d = EXCLUDED.avg_stock_cost_30d, avg_stock_retail_30d = EXCLUDED.avg_stock_retail_30d, avg_stock_gross_30d = EXCLUDED.avg_stock_gross_30d,
|
||||
received_qty_30d = EXCLUDED.received_qty_30d, received_cost_30d = EXCLUDED.received_cost_30d,
|
||||
lifetime_sales = EXCLUDED.lifetime_sales, lifetime_revenue = EXCLUDED.lifetime_revenue,
|
||||
first_7_days_sales = EXCLUDED.first_7_days_sales, first_7_days_revenue = EXCLUDED.first_7_days_revenue, first_30_days_sales = EXCLUDED.first_30_days_sales, first_30_days_revenue = EXCLUDED.first_30_days_revenue,
|
||||
first_60_days_sales = EXCLUDED.first_60_days_sales, first_60_days_revenue = EXCLUDED.first_60_days_revenue, first_90_days_sales = EXCLUDED.first_90_days_sales, first_90_days_revenue = EXCLUDED.first_90_days_revenue,
|
||||
asp_30d = EXCLUDED.asp_30d, acp_30d = EXCLUDED.acp_30d, avg_ros_30d = EXCLUDED.avg_ros_30d, avg_sales_per_day_30d = EXCLUDED.avg_sales_per_day_30d,
|
||||
margin_30d = EXCLUDED.margin_30d, markup_30d = EXCLUDED.markup_30d, gmroi_30d = EXCLUDED.gmroi_30d, stockturn_30d = EXCLUDED.stockturn_30d, return_rate_30d = EXCLUDED.return_rate_30d, discount_rate_30d = EXCLUDED.discount_rate_30d,
|
||||
stockout_rate_30d = EXCLUDED.stockout_rate_30d, markdown_30d = EXCLUDED.markdown_30d, markdown_rate_30d = EXCLUDED.markdown_rate_30d, sell_through_30d = EXCLUDED.sell_through_30d,
|
||||
avg_lead_time_days = EXCLUDED.avg_lead_time_days, abc_class = EXCLUDED.abc_class,
|
||||
sales_velocity_daily = EXCLUDED.sales_velocity_daily, config_lead_time = EXCLUDED.config_lead_time, config_days_of_stock = EXCLUDED.config_days_of_stock, config_safety_stock = EXCLUDED.config_safety_stock,
|
||||
planning_period_days = EXCLUDED.planning_period_days, lead_time_forecast_units = EXCLUDED.lead_time_forecast_units, days_of_stock_forecast_units = EXCLUDED.days_of_stock_forecast_units,
|
||||
planning_period_forecast_units = EXCLUDED.planning_period_forecast_units, lead_time_closing_stock = EXCLUDED.lead_time_closing_stock, days_of_stock_closing_stock = EXCLUDED.days_of_stock_closing_stock,
|
||||
replenishment_needed_raw = EXCLUDED.replenishment_needed_raw, replenishment_units = EXCLUDED.replenishment_units, replenishment_cost = EXCLUDED.replenishment_cost, replenishment_retail = EXCLUDED.replenishment_retail, replenishment_profit = EXCLUDED.replenishment_profit,
|
||||
to_order_units = EXCLUDED.to_order_units, -- *** Update to use EXCLUDED ***
|
||||
forecast_lost_sales_units = EXCLUDED.forecast_lost_sales_units, forecast_lost_revenue = EXCLUDED.forecast_lost_revenue,
|
||||
stock_cover_in_days = EXCLUDED.stock_cover_in_days, po_cover_in_days = EXCLUDED.po_cover_in_days, sells_out_in_days = EXCLUDED.sells_out_in_days, replenish_date = EXCLUDED.replenish_date,
|
||||
overstocked_units = EXCLUDED.overstocked_units, overstocked_cost = EXCLUDED.overstocked_cost, overstocked_retail = EXCLUDED.overstocked_retail, is_old_stock = EXCLUDED.is_old_stock,
|
||||
yesterday_sales = EXCLUDED.yesterday_sales;
|
||||
RAISE NOTICE 'Finished % module. Duration: %', _module_name, clock_timestamp() - _start_time;
|
||||
END $$;
|
||||
@@ -1,152 +0,0 @@
|
||||
-- Description: Rebuilds daily product snapshots from scratch using real orders data.
|
||||
-- Fixes issues with duplicated/inflated metrics.
|
||||
-- Dependencies: Core import tables (products, orders, receivings).
|
||||
-- Frequency: One-time run to clear out problematic data.
|
||||
|
||||
DO $$
|
||||
DECLARE
|
||||
_module_name TEXT := 'rebuild_daily_snapshots';
|
||||
_start_time TIMESTAMPTZ := clock_timestamp();
|
||||
_date DATE;
|
||||
_count INT;
|
||||
_total_records INT := 0;
|
||||
_begin_date DATE := (SELECT MIN(date)::date FROM orders WHERE date >= '2024-01-01'); -- Starting point for data rebuild
|
||||
_end_date DATE := CURRENT_DATE;
|
||||
BEGIN
|
||||
RAISE NOTICE 'Beginning daily snapshots rebuild from % to %. Starting at %', _begin_date, _end_date, _start_time;
|
||||
|
||||
-- First truncate the existing snapshots to ensure a clean slate
|
||||
TRUNCATE TABLE public.daily_product_snapshots;
|
||||
RAISE NOTICE 'Cleared existing snapshot data';
|
||||
|
||||
-- Now rebuild the snapshots day by day
|
||||
_date := _begin_date;
|
||||
|
||||
WHILE _date <= _end_date LOOP
|
||||
RAISE NOTICE 'Processing date %...', _date;
|
||||
|
||||
-- Create snapshots for this date
|
||||
WITH SalesData AS (
|
||||
SELECT
|
||||
p.pid,
|
||||
p.sku,
|
||||
-- Count orders to ensure we only include products with real activity
|
||||
COUNT(o.id) as order_count,
|
||||
-- Aggregate Sales (Quantity > 0, Status not Canceled/Returned)
|
||||
COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN o.quantity ELSE 0 END), 0) AS units_sold,
|
||||
COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN o.price * o.quantity ELSE 0 END), 0.00) AS gross_revenue_unadjusted,
|
||||
COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN o.discount ELSE 0 END), 0.00) AS discounts,
|
||||
COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN COALESCE(o.costeach, p.landing_cost_price, p.cost_price) * o.quantity ELSE 0 END), 0.00) AS cogs,
|
||||
COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN p.regular_price * o.quantity ELSE 0 END), 0.00) AS gross_regular_revenue,
|
||||
|
||||
-- Aggregate Returns (Quantity < 0 or Status = Returned)
|
||||
COALESCE(SUM(CASE WHEN o.quantity < 0 OR COALESCE(o.status, 'pending') = 'returned' THEN ABS(o.quantity) ELSE 0 END), 0) AS units_returned,
|
||||
COALESCE(SUM(CASE WHEN o.quantity < 0 OR COALESCE(o.status, 'pending') = 'returned' THEN o.price * ABS(o.quantity) ELSE 0 END), 0.00) AS returns_revenue
|
||||
FROM public.products p
|
||||
LEFT JOIN public.orders o
|
||||
ON p.pid = o.pid
|
||||
AND o.date::date = _date
|
||||
GROUP BY p.pid, p.sku
|
||||
HAVING COUNT(o.id) > 0 -- Only include products with actual orders for this date
|
||||
),
|
||||
ReceivingData AS (
|
||||
SELECT
|
||||
r.pid,
|
||||
-- Count receiving documents to ensure we only include products with real activity
|
||||
COUNT(DISTINCT r.receiving_id) as receiving_count,
|
||||
-- Calculate received quantity for this day
|
||||
SUM(r.qty_each) AS units_received,
|
||||
-- Calculate received cost for this day
|
||||
SUM(r.qty_each * r.cost_each) AS cost_received
|
||||
FROM public.receivings r
|
||||
WHERE r.received_date::date = _date
|
||||
GROUP BY r.pid
|
||||
HAVING COUNT(DISTINCT r.receiving_id) > 0 OR SUM(r.qty_each) > 0
|
||||
),
|
||||
-- Get stock quantities for the day - note this is approximate since we're using current products data
|
||||
StockData AS (
|
||||
SELECT
|
||||
p.pid,
|
||||
p.stock_quantity,
|
||||
COALESCE(p.landing_cost_price, p.cost_price, 0.00) as effective_cost_price,
|
||||
COALESCE(p.price, 0.00) as current_price,
|
||||
COALESCE(p.regular_price, 0.00) as current_regular_price
|
||||
FROM public.products p
|
||||
)
|
||||
INSERT INTO public.daily_product_snapshots (
|
||||
snapshot_date,
|
||||
pid,
|
||||
sku,
|
||||
eod_stock_quantity,
|
||||
eod_stock_cost,
|
||||
eod_stock_retail,
|
||||
eod_stock_gross,
|
||||
stockout_flag,
|
||||
units_sold,
|
||||
units_returned,
|
||||
gross_revenue,
|
||||
discounts,
|
||||
returns_revenue,
|
||||
net_revenue,
|
||||
cogs,
|
||||
gross_regular_revenue,
|
||||
profit,
|
||||
units_received,
|
||||
cost_received,
|
||||
calculation_timestamp
|
||||
)
|
||||
SELECT
|
||||
_date AS snapshot_date,
|
||||
COALESCE(sd.pid, rd.pid) AS pid,
|
||||
sd.sku,
|
||||
-- Use current stock as approximation, since historical stock data may not be available
|
||||
s.stock_quantity AS eod_stock_quantity,
|
||||
s.stock_quantity * s.effective_cost_price AS eod_stock_cost,
|
||||
s.stock_quantity * s.current_price AS eod_stock_retail,
|
||||
s.stock_quantity * s.current_regular_price AS eod_stock_gross,
|
||||
(s.stock_quantity <= 0) AS stockout_flag,
|
||||
-- Sales metrics
|
||||
COALESCE(sd.units_sold, 0),
|
||||
COALESCE(sd.units_returned, 0),
|
||||
COALESCE(sd.gross_revenue_unadjusted, 0.00),
|
||||
COALESCE(sd.discounts, 0.00),
|
||||
COALESCE(sd.returns_revenue, 0.00),
|
||||
COALESCE(sd.gross_revenue_unadjusted, 0.00) - COALESCE(sd.discounts, 0.00) AS net_revenue,
|
||||
COALESCE(sd.cogs, 0.00),
|
||||
COALESCE(sd.gross_regular_revenue, 0.00),
|
||||
(COALESCE(sd.gross_revenue_unadjusted, 0.00) - COALESCE(sd.discounts, 0.00)) - COALESCE(sd.cogs, 0.00) AS profit,
|
||||
-- Receiving metrics
|
||||
COALESCE(rd.units_received, 0),
|
||||
COALESCE(rd.cost_received, 0.00),
|
||||
_start_time
|
||||
FROM SalesData sd
|
||||
FULL OUTER JOIN ReceivingData rd ON sd.pid = rd.pid
|
||||
LEFT JOIN StockData s ON COALESCE(sd.pid, rd.pid) = s.pid
|
||||
WHERE (COALESCE(sd.order_count, 0) > 0 OR COALESCE(rd.receiving_count, 0) > 0);
|
||||
|
||||
-- Get record count for this day
|
||||
GET DIAGNOSTICS _count = ROW_COUNT;
|
||||
_total_records := _total_records + _count;
|
||||
|
||||
RAISE NOTICE 'Added % snapshot records for date %', _count, _date;
|
||||
|
||||
-- Move to next day
|
||||
_date := _date + INTERVAL '1 day';
|
||||
END LOOP;
|
||||
|
||||
RAISE NOTICE 'Rebuilding daily snapshots complete. Added % total records across % days.', _total_records, (_end_date - _begin_date)::integer + 1;
|
||||
|
||||
-- Update the status table for daily_snapshots
|
||||
INSERT INTO public.calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES ('daily_snapshots', _start_time)
|
||||
ON CONFLICT (module_name) DO UPDATE SET last_calculation_timestamp = _start_time;
|
||||
|
||||
-- Now update product_metrics based on the rebuilt snapshots
|
||||
RAISE NOTICE 'Triggering update of product_metrics table...';
|
||||
|
||||
-- Call the update_product_metrics procedure directly
|
||||
-- Your system might use a different method to trigger this update
|
||||
PERFORM pg_notify('recalculate_metrics', 'product_metrics');
|
||||
|
||||
RAISE NOTICE 'Rebuild complete. Duration: %', clock_timestamp() - _start_time;
|
||||
END $$;
|
||||
@@ -42,20 +42,6 @@ BEGIN
|
||||
JOIN public.products p ON pm.pid = p.pid
|
||||
GROUP BY brand_group
|
||||
),
|
||||
PreviousPeriodBrandMetrics AS (
|
||||
-- Get previous period metrics for growth calculation
|
||||
SELECT
|
||||
COALESCE(p.brand, 'Unbranded') AS brand_group,
|
||||
SUM(CASE WHEN dps.snapshot_date >= CURRENT_DATE - INTERVAL '59 days'
|
||||
AND dps.snapshot_date < CURRENT_DATE - INTERVAL '29 days'
|
||||
THEN dps.units_sold ELSE 0 END) AS sales_prev_30d,
|
||||
SUM(CASE WHEN dps.snapshot_date >= CURRENT_DATE - INTERVAL '59 days'
|
||||
AND dps.snapshot_date < CURRENT_DATE - INTERVAL '29 days'
|
||||
THEN dps.net_revenue ELSE 0 END) AS revenue_prev_30d
|
||||
FROM public.daily_product_snapshots dps
|
||||
JOIN public.products p ON dps.pid = p.pid
|
||||
GROUP BY brand_group
|
||||
),
|
||||
AllBrands AS (
|
||||
-- Ensure all brands from products table are included, mapping NULL/empty to 'Unbranded'
|
||||
SELECT DISTINCT COALESCE(brand, 'Unbranded') as brand_group
|
||||
@@ -67,8 +53,7 @@ BEGIN
|
||||
current_stock_units, current_stock_cost, current_stock_retail,
|
||||
sales_7d, revenue_7d, sales_30d, revenue_30d, profit_30d, cogs_30d,
|
||||
sales_365d, revenue_365d, lifetime_sales, lifetime_revenue,
|
||||
avg_margin_30d,
|
||||
sales_growth_30d_vs_prev, revenue_growth_30d_vs_prev
|
||||
avg_margin_30d
|
||||
)
|
||||
SELECT
|
||||
b.brand_group,
|
||||
@@ -93,13 +78,9 @@ BEGIN
|
||||
-- This is mathematically equivalent to profit/revenue but more explicit
|
||||
((COALESCE(ba.revenue_30d, 0) - COALESCE(ba.cogs_30d, 0)) / COALESCE(ba.revenue_30d, 1)) * 100.0
|
||||
ELSE NULL -- No margin for low/no revenue brands
|
||||
END,
|
||||
-- Growth metrics
|
||||
std_numeric(safe_divide((ba.sales_30d - ppbm.sales_prev_30d) * 100.0, ppbm.sales_prev_30d), 2),
|
||||
std_numeric(safe_divide((ba.revenue_30d - ppbm.revenue_prev_30d) * 100.0, ppbm.revenue_prev_30d), 2)
|
||||
END
|
||||
FROM AllBrands b
|
||||
LEFT JOIN BrandAggregates ba ON b.brand_group = ba.brand_group
|
||||
LEFT JOIN PreviousPeriodBrandMetrics ppbm ON b.brand_group = ppbm.brand_group
|
||||
|
||||
ON CONFLICT (brand_name) DO UPDATE SET
|
||||
last_calculated = EXCLUDED.last_calculated,
|
||||
@@ -114,9 +95,7 @@ BEGIN
|
||||
profit_30d = EXCLUDED.profit_30d, cogs_30d = EXCLUDED.cogs_30d,
|
||||
sales_365d = EXCLUDED.sales_365d, revenue_365d = EXCLUDED.revenue_365d,
|
||||
lifetime_sales = EXCLUDED.lifetime_sales, lifetime_revenue = EXCLUDED.lifetime_revenue,
|
||||
avg_margin_30d = EXCLUDED.avg_margin_30d,
|
||||
sales_growth_30d_vs_prev = EXCLUDED.sales_growth_30d_vs_prev,
|
||||
revenue_growth_30d_vs_prev = EXCLUDED.revenue_growth_30d_vs_prev
|
||||
avg_margin_30d = EXCLUDED.avg_margin_30d
|
||||
WHERE -- Only update if at least one value has changed
|
||||
brand_metrics.product_count IS DISTINCT FROM EXCLUDED.product_count OR
|
||||
brand_metrics.active_product_count IS DISTINCT FROM EXCLUDED.active_product_count OR
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
-- Description: Calculates and updates aggregated metrics per category with hierarchy rollups.
|
||||
-- Dependencies: product_metrics, products, categories, product_categories, category_hierarchy, calculate_status table.
|
||||
-- Description: Calculates and updates aggregated metrics per category.
|
||||
-- Dependencies: product_metrics, products, categories, product_categories, calculate_status table.
|
||||
-- Frequency: Daily (after product_metrics update).
|
||||
|
||||
DO $$
|
||||
@@ -9,21 +9,55 @@ DECLARE
|
||||
_min_revenue NUMERIC := 50.00; -- Minimum revenue threshold for margin calculation
|
||||
BEGIN
|
||||
RAISE NOTICE 'Running % calculation...', _module_name;
|
||||
|
||||
-- Refresh the category hierarchy materialized view first
|
||||
REFRESH MATERIALIZED VIEW CONCURRENTLY category_hierarchy;
|
||||
|
||||
-- First calculate direct metrics (products directly in each category)
|
||||
WITH DirectCategoryMetrics AS (
|
||||
WITH
|
||||
-- Identify the hierarchy depth for each category
|
||||
CategoryDepth AS (
|
||||
WITH RECURSIVE CategoryTree AS (
|
||||
-- Base case: Start with categories without parents (root categories)
|
||||
SELECT cat_id, name, parent_id, 0 AS depth
|
||||
FROM public.categories
|
||||
WHERE parent_id IS NULL
|
||||
|
||||
UNION ALL
|
||||
|
||||
-- Recursive step: Add child categories with incremented depth
|
||||
SELECT c.cat_id, c.name, c.parent_id, ct.depth + 1
|
||||
FROM public.categories c
|
||||
JOIN CategoryTree ct ON c.parent_id = ct.cat_id
|
||||
)
|
||||
SELECT cat_id, depth
|
||||
FROM CategoryTree
|
||||
),
|
||||
-- For each product, find the most specific (deepest) category it belongs to
|
||||
ProductDeepestCategory AS (
|
||||
SELECT
|
||||
pc.pid,
|
||||
pc.cat_id
|
||||
FROM public.product_categories pc
|
||||
JOIN CategoryDepth cd ON pc.cat_id = cd.cat_id
|
||||
-- This is the key part: for each product, select only the category with maximum depth
|
||||
WHERE (pc.pid, cd.depth) IN (
|
||||
SELECT pc2.pid, MAX(cd2.depth)
|
||||
FROM public.product_categories pc2
|
||||
JOIN CategoryDepth cd2 ON pc2.cat_id = cd2.cat_id
|
||||
GROUP BY pc2.pid
|
||||
)
|
||||
),
|
||||
-- Calculate metrics only at the most specific category level for each product
|
||||
-- These are the direct metrics (only products directly in this category)
|
||||
DirectCategoryMetrics AS (
|
||||
SELECT
|
||||
pc.cat_id,
|
||||
pdc.cat_id,
|
||||
-- Counts
|
||||
COUNT(DISTINCT pm.pid) AS product_count,
|
||||
COUNT(DISTINCT CASE WHEN pm.is_visible THEN pm.pid END) AS active_product_count,
|
||||
COUNT(DISTINCT CASE WHEN pm.is_replenishable THEN pm.pid END) AS replenishable_product_count,
|
||||
-- Current Stock
|
||||
SUM(pm.current_stock) AS current_stock_units,
|
||||
SUM(pm.current_stock_cost) AS current_stock_cost,
|
||||
SUM(pm.current_stock_retail) AS current_stock_retail,
|
||||
-- Sales metrics with proper filtering
|
||||
-- Rolling Periods - Only include products with actual sales in each period
|
||||
SUM(CASE WHEN pm.sales_7d > 0 THEN pm.sales_7d ELSE 0 END) AS sales_7d,
|
||||
SUM(CASE WHEN pm.revenue_7d > 0 THEN pm.revenue_7d ELSE 0 END) AS revenue_7d,
|
||||
SUM(CASE WHEN pm.sales_30d > 0 THEN pm.sales_30d ELSE 0 END) AS sales_30d,
|
||||
@@ -33,141 +67,179 @@ BEGIN
|
||||
SUM(CASE WHEN pm.sales_365d > 0 THEN pm.sales_365d ELSE 0 END) AS sales_365d,
|
||||
SUM(CASE WHEN pm.revenue_365d > 0 THEN pm.revenue_365d ELSE 0 END) AS revenue_365d,
|
||||
SUM(CASE WHEN pm.lifetime_sales > 0 THEN pm.lifetime_sales ELSE 0 END) AS lifetime_sales,
|
||||
SUM(CASE WHEN pm.lifetime_revenue > 0 THEN pm.lifetime_revenue ELSE 0 END) AS lifetime_revenue
|
||||
FROM public.product_categories pc
|
||||
JOIN public.product_metrics pm ON pc.pid = pm.pid
|
||||
GROUP BY pc.cat_id
|
||||
SUM(CASE WHEN pm.lifetime_revenue > 0 THEN pm.lifetime_revenue ELSE 0 END) AS lifetime_revenue,
|
||||
-- Data for KPIs - Only average stock for products with stock
|
||||
SUM(CASE WHEN pm.avg_stock_units_30d > 0 THEN pm.avg_stock_units_30d ELSE 0 END) AS total_avg_stock_units_30d
|
||||
FROM public.product_metrics pm
|
||||
JOIN ProductDeepestCategory pdc ON pm.pid = pdc.pid
|
||||
GROUP BY pdc.cat_id
|
||||
),
|
||||
-- Calculate rolled-up metrics (including all descendant categories)
|
||||
RolledUpMetrics AS (
|
||||
-- Build a category lookup table for parent relationships
|
||||
CategoryHierarchyPaths AS (
|
||||
WITH RECURSIVE ParentPaths AS (
|
||||
-- Base case: All categories with their immediate parents
|
||||
SELECT
|
||||
cat_id,
|
||||
cat_id as leaf_id, -- Every category is its own leaf initially
|
||||
ARRAY[cat_id] as path
|
||||
FROM public.categories
|
||||
|
||||
UNION ALL
|
||||
|
||||
-- Recursive step: Walk up the parent chain
|
||||
SELECT
|
||||
c.parent_id as cat_id,
|
||||
pp.leaf_id, -- Keep the original leaf_id
|
||||
c.parent_id || pp.path as path
|
||||
FROM ParentPaths pp
|
||||
JOIN public.categories c ON pp.cat_id = c.cat_id
|
||||
WHERE c.parent_id IS NOT NULL -- Stop at root categories
|
||||
)
|
||||
-- Select distinct paths to avoid duplication
|
||||
SELECT DISTINCT cat_id, leaf_id
|
||||
FROM ParentPaths
|
||||
),
|
||||
-- Aggregate metrics from leaf categories to their ancestors without duplication
|
||||
-- These are the rolled-up metrics (including all child categories)
|
||||
RollupMetrics AS (
|
||||
SELECT
|
||||
ch.cat_id,
|
||||
-- Sum metrics from this category and all its descendants
|
||||
SUM(dcm.product_count) AS product_count,
|
||||
SUM(dcm.active_product_count) AS active_product_count,
|
||||
SUM(dcm.replenishable_product_count) AS replenishable_product_count,
|
||||
SUM(dcm.current_stock_units) AS current_stock_units,
|
||||
SUM(dcm.current_stock_cost) AS current_stock_cost,
|
||||
SUM(dcm.current_stock_retail) AS current_stock_retail,
|
||||
SUM(dcm.sales_7d) AS sales_7d,
|
||||
SUM(dcm.revenue_7d) AS revenue_7d,
|
||||
SUM(dcm.sales_30d) AS sales_30d,
|
||||
SUM(dcm.revenue_30d) AS revenue_30d,
|
||||
SUM(dcm.cogs_30d) AS cogs_30d,
|
||||
SUM(dcm.profit_30d) AS profit_30d,
|
||||
SUM(dcm.sales_365d) AS sales_365d,
|
||||
SUM(dcm.revenue_365d) AS revenue_365d,
|
||||
SUM(dcm.lifetime_sales) AS lifetime_sales,
|
||||
SUM(dcm.lifetime_revenue) AS lifetime_revenue
|
||||
FROM category_hierarchy ch
|
||||
LEFT JOIN DirectCategoryMetrics dcm ON
|
||||
dcm.cat_id = ch.cat_id OR
|
||||
dcm.cat_id = ANY(SELECT cat_id FROM category_hierarchy WHERE ch.cat_id = ANY(ancestor_ids))
|
||||
GROUP BY ch.cat_id
|
||||
chp.cat_id,
|
||||
-- For each parent category, count distinct products to avoid duplication
|
||||
COUNT(DISTINCT dcm.cat_id) AS child_categories_count,
|
||||
SUM(dcm.product_count) AS rollup_product_count,
|
||||
SUM(dcm.active_product_count) AS rollup_active_product_count,
|
||||
SUM(dcm.replenishable_product_count) AS rollup_replenishable_product_count,
|
||||
SUM(dcm.current_stock_units) AS rollup_current_stock_units,
|
||||
SUM(dcm.current_stock_cost) AS rollup_current_stock_cost,
|
||||
SUM(dcm.current_stock_retail) AS rollup_current_stock_retail,
|
||||
SUM(dcm.sales_7d) AS rollup_sales_7d,
|
||||
SUM(dcm.revenue_7d) AS rollup_revenue_7d,
|
||||
SUM(dcm.sales_30d) AS rollup_sales_30d,
|
||||
SUM(dcm.revenue_30d) AS rollup_revenue_30d,
|
||||
SUM(dcm.cogs_30d) AS rollup_cogs_30d,
|
||||
SUM(dcm.profit_30d) AS rollup_profit_30d,
|
||||
SUM(dcm.sales_365d) AS rollup_sales_365d,
|
||||
SUM(dcm.revenue_365d) AS rollup_revenue_365d,
|
||||
SUM(dcm.lifetime_sales) AS rollup_lifetime_sales,
|
||||
SUM(dcm.lifetime_revenue) AS rollup_lifetime_revenue,
|
||||
SUM(dcm.total_avg_stock_units_30d) AS rollup_total_avg_stock_units_30d
|
||||
FROM CategoryHierarchyPaths chp
|
||||
JOIN DirectCategoryMetrics dcm ON chp.leaf_id = dcm.cat_id
|
||||
GROUP BY chp.cat_id
|
||||
),
|
||||
PreviousPeriodCategoryMetrics AS (
|
||||
-- Get previous period metrics for growth calculation
|
||||
-- Combine direct and rollup metrics
|
||||
CombinedMetrics AS (
|
||||
SELECT
|
||||
pc.cat_id,
|
||||
SUM(CASE WHEN dps.snapshot_date >= CURRENT_DATE - INTERVAL '59 days'
|
||||
AND dps.snapshot_date < CURRENT_DATE - INTERVAL '29 days'
|
||||
THEN dps.units_sold ELSE 0 END) AS sales_prev_30d,
|
||||
SUM(CASE WHEN dps.snapshot_date >= CURRENT_DATE - INTERVAL '59 days'
|
||||
AND dps.snapshot_date < CURRENT_DATE - INTERVAL '29 days'
|
||||
THEN dps.net_revenue ELSE 0 END) AS revenue_prev_30d
|
||||
FROM public.daily_product_snapshots dps
|
||||
JOIN public.product_categories pc ON dps.pid = pc.pid
|
||||
GROUP BY pc.cat_id
|
||||
),
|
||||
RolledUpPreviousPeriod AS (
|
||||
-- Calculate rolled-up previous period metrics
|
||||
SELECT
|
||||
ch.cat_id,
|
||||
SUM(ppcm.sales_prev_30d) AS sales_prev_30d,
|
||||
SUM(ppcm.revenue_prev_30d) AS revenue_prev_30d
|
||||
FROM category_hierarchy ch
|
||||
LEFT JOIN PreviousPeriodCategoryMetrics ppcm ON
|
||||
ppcm.cat_id = ch.cat_id OR
|
||||
ppcm.cat_id = ANY(SELECT cat_id FROM category_hierarchy WHERE ch.cat_id = ANY(ancestor_ids))
|
||||
GROUP BY ch.cat_id
|
||||
),
|
||||
AllCategories AS (
|
||||
-- Ensure all categories are included
|
||||
SELECT
|
||||
c.cat_id,
|
||||
c.name,
|
||||
c.type,
|
||||
c.parent_id
|
||||
c.parent_id,
|
||||
-- Direct metrics (just this category)
|
||||
COALESCE(dcm.product_count, 0) AS direct_product_count,
|
||||
COALESCE(dcm.active_product_count, 0) AS direct_active_product_count,
|
||||
COALESCE(dcm.replenishable_product_count, 0) AS direct_replenishable_product_count,
|
||||
COALESCE(dcm.current_stock_units, 0) AS direct_current_stock_units,
|
||||
COALESCE(dcm.current_stock_cost, 0) AS direct_current_stock_cost,
|
||||
COALESCE(dcm.current_stock_retail, 0) AS direct_current_stock_retail,
|
||||
COALESCE(dcm.sales_7d, 0) AS direct_sales_7d,
|
||||
COALESCE(dcm.revenue_7d, 0) AS direct_revenue_7d,
|
||||
COALESCE(dcm.sales_30d, 0) AS direct_sales_30d,
|
||||
COALESCE(dcm.revenue_30d, 0) AS direct_revenue_30d,
|
||||
COALESCE(dcm.cogs_30d, 0) AS direct_cogs_30d,
|
||||
COALESCE(dcm.profit_30d, 0) AS direct_profit_30d,
|
||||
COALESCE(dcm.sales_365d, 0) AS direct_sales_365d,
|
||||
COALESCE(dcm.revenue_365d, 0) AS direct_revenue_365d,
|
||||
COALESCE(dcm.lifetime_sales, 0) AS direct_lifetime_sales,
|
||||
COALESCE(dcm.lifetime_revenue, 0) AS direct_lifetime_revenue,
|
||||
COALESCE(dcm.total_avg_stock_units_30d, 0) AS direct_avg_stock_units_30d,
|
||||
|
||||
-- Rolled up metrics (this category + all children)
|
||||
COALESCE(rm.rollup_product_count, 0) AS product_count,
|
||||
COALESCE(rm.rollup_active_product_count, 0) AS active_product_count,
|
||||
COALESCE(rm.rollup_replenishable_product_count, 0) AS replenishable_product_count,
|
||||
COALESCE(rm.rollup_current_stock_units, 0) AS current_stock_units,
|
||||
COALESCE(rm.rollup_current_stock_cost, 0) AS current_stock_cost,
|
||||
COALESCE(rm.rollup_current_stock_retail, 0) AS current_stock_retail,
|
||||
COALESCE(rm.rollup_sales_7d, 0) AS sales_7d,
|
||||
COALESCE(rm.rollup_revenue_7d, 0) AS revenue_7d,
|
||||
COALESCE(rm.rollup_sales_30d, 0) AS sales_30d,
|
||||
COALESCE(rm.rollup_revenue_30d, 0) AS revenue_30d,
|
||||
COALESCE(rm.rollup_cogs_30d, 0) AS cogs_30d,
|
||||
COALESCE(rm.rollup_profit_30d, 0) AS profit_30d,
|
||||
COALESCE(rm.rollup_sales_365d, 0) AS sales_365d,
|
||||
COALESCE(rm.rollup_revenue_365d, 0) AS revenue_365d,
|
||||
COALESCE(rm.rollup_lifetime_sales, 0) AS lifetime_sales,
|
||||
COALESCE(rm.rollup_lifetime_revenue, 0) AS lifetime_revenue,
|
||||
COALESCE(rm.rollup_total_avg_stock_units_30d, 0) AS total_avg_stock_units_30d
|
||||
FROM public.categories c
|
||||
WHERE c.status = 'active'
|
||||
LEFT JOIN DirectCategoryMetrics dcm ON c.cat_id = dcm.cat_id
|
||||
LEFT JOIN RollupMetrics rm ON c.cat_id = rm.cat_id
|
||||
)
|
||||
INSERT INTO public.category_metrics (
|
||||
category_id, category_name, category_type, parent_id, last_calculated,
|
||||
-- Rolled-up metrics
|
||||
-- Store all direct and rolled up metrics
|
||||
product_count, active_product_count, replenishable_product_count,
|
||||
current_stock_units, current_stock_cost, current_stock_retail,
|
||||
sales_7d, revenue_7d, sales_30d, revenue_30d, profit_30d, cogs_30d,
|
||||
sales_365d, revenue_365d, lifetime_sales, lifetime_revenue,
|
||||
-- Direct metrics
|
||||
-- Also store direct metrics with direct_ prefix
|
||||
direct_product_count, direct_active_product_count, direct_replenishable_product_count,
|
||||
direct_current_stock_units, direct_stock_cost, direct_stock_retail,
|
||||
direct_sales_7d, direct_revenue_7d, direct_sales_30d, direct_revenue_30d,
|
||||
direct_sales_7d, direct_revenue_7d, direct_sales_30d, direct_revenue_30d,
|
||||
direct_profit_30d, direct_cogs_30d, direct_sales_365d, direct_revenue_365d,
|
||||
direct_lifetime_sales, direct_lifetime_revenue,
|
||||
-- KPIs
|
||||
avg_margin_30d,
|
||||
sales_growth_30d_vs_prev, revenue_growth_30d_vs_prev
|
||||
avg_margin_30d, stock_turn_30d
|
||||
)
|
||||
SELECT
|
||||
ac.cat_id,
|
||||
ac.name,
|
||||
ac.type,
|
||||
ac.parent_id,
|
||||
cm.cat_id,
|
||||
cm.name,
|
||||
cm.type,
|
||||
cm.parent_id,
|
||||
_start_time,
|
||||
-- Rolled-up metrics (includes descendants)
|
||||
COALESCE(rum.product_count, 0),
|
||||
COALESCE(rum.active_product_count, 0),
|
||||
COALESCE(rum.replenishable_product_count, 0),
|
||||
COALESCE(rum.current_stock_units, 0),
|
||||
COALESCE(rum.current_stock_cost, 0.00),
|
||||
COALESCE(rum.current_stock_retail, 0.00),
|
||||
COALESCE(rum.sales_7d, 0), COALESCE(rum.revenue_7d, 0.00),
|
||||
COALESCE(rum.sales_30d, 0), COALESCE(rum.revenue_30d, 0.00),
|
||||
COALESCE(rum.profit_30d, 0.00), COALESCE(rum.cogs_30d, 0.00),
|
||||
COALESCE(rum.sales_365d, 0), COALESCE(rum.revenue_365d, 0.00),
|
||||
COALESCE(rum.lifetime_sales, 0), COALESCE(rum.lifetime_revenue, 0.00),
|
||||
-- Direct metrics (only this category)
|
||||
COALESCE(dcm.product_count, 0),
|
||||
COALESCE(dcm.active_product_count, 0),
|
||||
COALESCE(dcm.replenishable_product_count, 0),
|
||||
COALESCE(dcm.current_stock_units, 0),
|
||||
COALESCE(dcm.current_stock_cost, 0.00),
|
||||
COALESCE(dcm.current_stock_retail, 0.00),
|
||||
COALESCE(dcm.sales_7d, 0), COALESCE(dcm.revenue_7d, 0.00),
|
||||
COALESCE(dcm.sales_30d, 0), COALESCE(dcm.revenue_30d, 0.00),
|
||||
COALESCE(dcm.profit_30d, 0.00), COALESCE(dcm.cogs_30d, 0.00),
|
||||
COALESCE(dcm.sales_365d, 0), COALESCE(dcm.revenue_365d, 0.00),
|
||||
COALESCE(dcm.lifetime_sales, 0), COALESCE(dcm.lifetime_revenue, 0.00),
|
||||
-- Rolled-up metrics (total including children)
|
||||
cm.product_count,
|
||||
cm.active_product_count,
|
||||
cm.replenishable_product_count,
|
||||
cm.current_stock_units,
|
||||
cm.current_stock_cost,
|
||||
cm.current_stock_retail,
|
||||
cm.sales_7d, cm.revenue_7d,
|
||||
cm.sales_30d, cm.revenue_30d, cm.profit_30d, cm.cogs_30d,
|
||||
cm.sales_365d, cm.revenue_365d,
|
||||
cm.lifetime_sales, cm.lifetime_revenue,
|
||||
-- Direct metrics (just this category)
|
||||
cm.direct_product_count,
|
||||
cm.direct_active_product_count,
|
||||
cm.direct_replenishable_product_count,
|
||||
cm.direct_current_stock_units,
|
||||
cm.direct_current_stock_cost,
|
||||
cm.direct_current_stock_retail,
|
||||
cm.direct_sales_7d, cm.direct_revenue_7d,
|
||||
cm.direct_sales_30d, cm.direct_revenue_30d, cm.direct_profit_30d, cm.direct_cogs_30d,
|
||||
cm.direct_sales_365d, cm.direct_revenue_365d,
|
||||
cm.direct_lifetime_sales, cm.direct_lifetime_revenue,
|
||||
-- KPIs - Calculate margin only for categories with significant revenue
|
||||
CASE
|
||||
WHEN COALESCE(rum.revenue_30d, 0) >= _min_revenue THEN
|
||||
((COALESCE(rum.revenue_30d, 0) - COALESCE(rum.cogs_30d, 0)) / COALESCE(rum.revenue_30d, 1)) * 100.0
|
||||
ELSE NULL
|
||||
WHEN cm.revenue_30d >= _min_revenue THEN
|
||||
((cm.revenue_30d - cm.cogs_30d) / cm.revenue_30d) * 100.0
|
||||
ELSE NULL -- No margin for low/no revenue categories
|
||||
END,
|
||||
-- Growth metrics for rolled-up values
|
||||
std_numeric(safe_divide((rum.sales_30d - rupp.sales_prev_30d) * 100.0, rupp.sales_prev_30d), 2),
|
||||
std_numeric(safe_divide((rum.revenue_30d - rupp.revenue_prev_30d) * 100.0, rupp.revenue_prev_30d), 2)
|
||||
FROM AllCategories ac
|
||||
LEFT JOIN DirectCategoryMetrics dcm ON ac.cat_id = dcm.cat_id
|
||||
LEFT JOIN RolledUpMetrics rum ON ac.cat_id = rum.cat_id
|
||||
LEFT JOIN RolledUpPreviousPeriod rupp ON ac.cat_id = rupp.cat_id
|
||||
-- Stock Turn calculation
|
||||
CASE
|
||||
WHEN cm.total_avg_stock_units_30d > 0 THEN
|
||||
cm.sales_30d / cm.total_avg_stock_units_30d
|
||||
ELSE NULL -- No stock turn if no average stock
|
||||
END
|
||||
FROM CombinedMetrics cm
|
||||
|
||||
ON CONFLICT (category_id) DO UPDATE SET
|
||||
last_calculated = EXCLUDED.last_calculated,
|
||||
category_name = EXCLUDED.category_name,
|
||||
category_type = EXCLUDED.category_type,
|
||||
parent_id = EXCLUDED.parent_id,
|
||||
-- Rolled-up metrics
|
||||
last_calculated = EXCLUDED.last_calculated,
|
||||
|
||||
-- ROLLED-UP METRICS (includes this category + all descendants)
|
||||
product_count = EXCLUDED.product_count,
|
||||
active_product_count = EXCLUDED.active_product_count,
|
||||
replenishable_product_count = EXCLUDED.replenishable_product_count,
|
||||
@@ -179,7 +251,8 @@ BEGIN
|
||||
profit_30d = EXCLUDED.profit_30d, cogs_30d = EXCLUDED.cogs_30d,
|
||||
sales_365d = EXCLUDED.sales_365d, revenue_365d = EXCLUDED.revenue_365d,
|
||||
lifetime_sales = EXCLUDED.lifetime_sales, lifetime_revenue = EXCLUDED.lifetime_revenue,
|
||||
-- Direct metrics
|
||||
|
||||
-- DIRECT METRICS (only products directly in this category)
|
||||
direct_product_count = EXCLUDED.direct_product_count,
|
||||
direct_active_product_count = EXCLUDED.direct_active_product_count,
|
||||
direct_replenishable_product_count = EXCLUDED.direct_replenishable_product_count,
|
||||
@@ -191,9 +264,10 @@ BEGIN
|
||||
direct_profit_30d = EXCLUDED.direct_profit_30d, direct_cogs_30d = EXCLUDED.direct_cogs_30d,
|
||||
direct_sales_365d = EXCLUDED.direct_sales_365d, direct_revenue_365d = EXCLUDED.direct_revenue_365d,
|
||||
direct_lifetime_sales = EXCLUDED.direct_lifetime_sales, direct_lifetime_revenue = EXCLUDED.direct_lifetime_revenue,
|
||||
|
||||
-- Calculated KPIs
|
||||
avg_margin_30d = EXCLUDED.avg_margin_30d,
|
||||
sales_growth_30d_vs_prev = EXCLUDED.sales_growth_30d_vs_prev,
|
||||
revenue_growth_30d_vs_prev = EXCLUDED.revenue_growth_30d_vs_prev
|
||||
stock_turn_30d = EXCLUDED.stock_turn_30d
|
||||
WHERE -- Only update if at least one value has changed
|
||||
category_metrics.product_count IS DISTINCT FROM EXCLUDED.product_count OR
|
||||
category_metrics.active_product_count IS DISTINCT FROM EXCLUDED.active_product_count OR
|
||||
@@ -217,23 +291,19 @@ WITH update_stats AS (
|
||||
SELECT
|
||||
COUNT(*) as total_categories,
|
||||
COUNT(*) FILTER (WHERE last_calculated >= NOW() - INTERVAL '5 minutes') as rows_processed,
|
||||
COUNT(*) FILTER (WHERE category_type = 10) as sections,
|
||||
COUNT(*) FILTER (WHERE category_type = 11) as categories,
|
||||
COUNT(*) FILTER (WHERE category_type = 12) as subcategories,
|
||||
SUM(product_count) as total_products_rolled,
|
||||
SUM(direct_product_count) as total_products_direct,
|
||||
SUM(sales_30d) as total_sales_30d,
|
||||
SUM(revenue_30d) as total_revenue_30d
|
||||
COUNT(*) FILTER (WHERE category_type = 11) as main_categories, -- 11 = category
|
||||
COUNT(*) FILTER (WHERE category_type = 12) as subcategories, -- 12 = subcategory
|
||||
SUM(product_count) as total_products,
|
||||
SUM(active_product_count) as total_active_products,
|
||||
SUM(current_stock_units) as total_stock_units
|
||||
FROM public.category_metrics
|
||||
)
|
||||
SELECT
|
||||
rows_processed,
|
||||
total_categories,
|
||||
sections,
|
||||
categories,
|
||||
main_categories,
|
||||
subcategories,
|
||||
total_products_rolled::int,
|
||||
total_products_direct::int,
|
||||
total_sales_30d::int,
|
||||
ROUND(total_revenue_30d, 2) as total_revenue_30d
|
||||
total_products::int,
|
||||
total_active_products::int,
|
||||
total_stock_units::int
|
||||
FROM update_stats;
|
||||
@@ -1,185 +0,0 @@
|
||||
-- Description: Calculates and updates aggregated metrics per vendor.
|
||||
-- Dependencies: product_metrics, products, purchase_orders, calculate_status table.
|
||||
-- Frequency: Daily (after product_metrics update).
|
||||
|
||||
DO $$
|
||||
DECLARE
|
||||
_module_name VARCHAR := 'vendor_metrics';
|
||||
_start_time TIMESTAMPTZ := clock_timestamp();
|
||||
BEGIN
|
||||
RAISE NOTICE 'Running % calculation...', _module_name;
|
||||
|
||||
WITH VendorProductAggregates AS (
|
||||
-- Aggregate metrics from product_metrics table per vendor
|
||||
SELECT
|
||||
p.vendor,
|
||||
COUNT(DISTINCT pm.pid) AS product_count,
|
||||
COUNT(DISTINCT CASE WHEN pm.is_visible THEN pm.pid END) AS active_product_count,
|
||||
COUNT(DISTINCT CASE WHEN pm.is_replenishable THEN pm.pid END) AS replenishable_product_count,
|
||||
SUM(pm.current_stock) AS current_stock_units,
|
||||
SUM(pm.current_stock_cost) AS current_stock_cost,
|
||||
SUM(pm.current_stock_retail) AS current_stock_retail,
|
||||
SUM(pm.on_order_qty) AS on_order_units,
|
||||
SUM(pm.on_order_cost) AS on_order_cost,
|
||||
-- Only include products with valid sales data in each time period
|
||||
COUNT(DISTINCT CASE WHEN pm.sales_7d > 0 THEN pm.pid END) AS products_with_sales_7d,
|
||||
SUM(CASE WHEN pm.sales_7d > 0 THEN pm.sales_7d ELSE 0 END) AS sales_7d,
|
||||
SUM(CASE WHEN pm.revenue_7d > 0 THEN pm.revenue_7d ELSE 0 END) AS revenue_7d,
|
||||
|
||||
COUNT(DISTINCT CASE WHEN pm.sales_30d > 0 THEN pm.pid END) AS products_with_sales_30d,
|
||||
SUM(CASE WHEN pm.sales_30d > 0 THEN pm.sales_30d ELSE 0 END) AS sales_30d,
|
||||
SUM(CASE WHEN pm.revenue_30d > 0 THEN pm.revenue_30d ELSE 0 END) AS revenue_30d,
|
||||
SUM(CASE WHEN pm.cogs_30d > 0 THEN pm.cogs_30d ELSE 0 END) AS cogs_30d,
|
||||
SUM(CASE WHEN pm.profit_30d != 0 THEN pm.profit_30d ELSE 0 END) AS profit_30d,
|
||||
|
||||
COUNT(DISTINCT CASE WHEN pm.sales_365d > 0 THEN pm.pid END) AS products_with_sales_365d,
|
||||
SUM(CASE WHEN pm.sales_365d > 0 THEN pm.sales_365d ELSE 0 END) AS sales_365d,
|
||||
SUM(CASE WHEN pm.revenue_365d > 0 THEN pm.revenue_365d ELSE 0 END) AS revenue_365d,
|
||||
|
||||
COUNT(DISTINCT CASE WHEN pm.lifetime_sales > 0 THEN pm.pid END) AS products_with_lifetime_sales,
|
||||
SUM(CASE WHEN pm.lifetime_sales > 0 THEN pm.lifetime_sales ELSE 0 END) AS lifetime_sales,
|
||||
SUM(CASE WHEN pm.lifetime_revenue > 0 THEN pm.lifetime_revenue ELSE 0 END) AS lifetime_revenue
|
||||
FROM public.product_metrics pm
|
||||
JOIN public.products p ON pm.pid = p.pid
|
||||
WHERE p.vendor IS NOT NULL AND p.vendor <> ''
|
||||
GROUP BY p.vendor
|
||||
),
|
||||
PreviousPeriodVendorMetrics AS (
|
||||
-- Get previous period metrics for growth calculation
|
||||
SELECT
|
||||
p.vendor,
|
||||
SUM(CASE WHEN dps.snapshot_date >= CURRENT_DATE - INTERVAL '59 days'
|
||||
AND dps.snapshot_date < CURRENT_DATE - INTERVAL '29 days'
|
||||
THEN dps.units_sold ELSE 0 END) AS sales_prev_30d,
|
||||
SUM(CASE WHEN dps.snapshot_date >= CURRENT_DATE - INTERVAL '59 days'
|
||||
AND dps.snapshot_date < CURRENT_DATE - INTERVAL '29 days'
|
||||
THEN dps.net_revenue ELSE 0 END) AS revenue_prev_30d
|
||||
FROM public.daily_product_snapshots dps
|
||||
JOIN public.products p ON dps.pid = p.pid
|
||||
WHERE p.vendor IS NOT NULL AND p.vendor <> ''
|
||||
GROUP BY p.vendor
|
||||
),
|
||||
VendorPOAggregates AS (
|
||||
-- Aggregate PO related stats including lead time calculated from POs to receivings
|
||||
SELECT
|
||||
po.vendor,
|
||||
COUNT(DISTINCT po.po_id) AS po_count_365d,
|
||||
-- Calculate lead time by averaging the days between PO date and receiving date
|
||||
AVG(GREATEST(1, CASE
|
||||
WHEN r.received_date IS NOT NULL AND po.date IS NOT NULL
|
||||
THEN (r.received_date::date - po.date::date)
|
||||
ELSE NULL
|
||||
END))::int AS avg_lead_time_days_hist -- Avg lead time from HISTORICAL received POs
|
||||
FROM public.purchase_orders po
|
||||
-- Join to receivings table to find when items were received
|
||||
LEFT JOIN public.receivings r ON r.pid = po.pid
|
||||
WHERE po.vendor IS NOT NULL AND po.vendor <> ''
|
||||
AND po.date >= CURRENT_DATE - INTERVAL '1 year' -- Look at POs created in the last year
|
||||
AND po.status = 'done' -- Only calculate lead time on completed POs
|
||||
AND r.received_date IS NOT NULL
|
||||
AND po.date IS NOT NULL
|
||||
AND r.received_date >= po.date
|
||||
GROUP BY po.vendor
|
||||
),
|
||||
AllVendors AS (
|
||||
-- Ensure all vendors from products table are included
|
||||
SELECT DISTINCT vendor FROM public.products WHERE vendor IS NOT NULL AND vendor <> ''
|
||||
)
|
||||
INSERT INTO public.vendor_metrics (
|
||||
vendor_name, last_calculated,
|
||||
product_count, active_product_count, replenishable_product_count,
|
||||
current_stock_units, current_stock_cost, current_stock_retail,
|
||||
on_order_units, on_order_cost,
|
||||
po_count_365d, avg_lead_time_days,
|
||||
sales_7d, revenue_7d, sales_30d, revenue_30d, profit_30d, cogs_30d,
|
||||
sales_365d, revenue_365d, lifetime_sales, lifetime_revenue,
|
||||
avg_margin_30d,
|
||||
sales_growth_30d_vs_prev, revenue_growth_30d_vs_prev
|
||||
)
|
||||
SELECT
|
||||
v.vendor,
|
||||
_start_time,
|
||||
-- Base Aggregates
|
||||
COALESCE(vpa.product_count, 0),
|
||||
COALESCE(vpa.active_product_count, 0),
|
||||
COALESCE(vpa.replenishable_product_count, 0),
|
||||
COALESCE(vpa.current_stock_units, 0),
|
||||
COALESCE(vpa.current_stock_cost, 0.00),
|
||||
COALESCE(vpa.current_stock_retail, 0.00),
|
||||
COALESCE(vpa.on_order_units, 0),
|
||||
COALESCE(vpa.on_order_cost, 0.00),
|
||||
-- PO Aggregates
|
||||
COALESCE(vpoa.po_count_365d, 0),
|
||||
vpoa.avg_lead_time_days_hist, -- Can be NULL if no received POs
|
||||
-- Sales Aggregates
|
||||
COALESCE(vpa.sales_7d, 0), COALESCE(vpa.revenue_7d, 0.00),
|
||||
COALESCE(vpa.sales_30d, 0), COALESCE(vpa.revenue_30d, 0.00),
|
||||
COALESCE(vpa.profit_30d, 0.00), COALESCE(vpa.cogs_30d, 0.00),
|
||||
COALESCE(vpa.sales_365d, 0), COALESCE(vpa.revenue_365d, 0.00),
|
||||
COALESCE(vpa.lifetime_sales, 0), COALESCE(vpa.lifetime_revenue, 0.00),
|
||||
-- KPIs
|
||||
(vpa.profit_30d / NULLIF(vpa.revenue_30d, 0)) * 100.0,
|
||||
-- Growth metrics
|
||||
std_numeric(safe_divide((vpa.sales_30d - ppvm.sales_prev_30d) * 100.0, ppvm.sales_prev_30d), 2),
|
||||
std_numeric(safe_divide((vpa.revenue_30d - ppvm.revenue_prev_30d) * 100.0, ppvm.revenue_prev_30d), 2)
|
||||
FROM AllVendors v
|
||||
LEFT JOIN VendorProductAggregates vpa ON v.vendor = vpa.vendor
|
||||
LEFT JOIN VendorPOAggregates vpoa ON v.vendor = vpoa.vendor
|
||||
LEFT JOIN PreviousPeriodVendorMetrics ppvm ON v.vendor = ppvm.vendor
|
||||
|
||||
ON CONFLICT (vendor_name) DO UPDATE SET
|
||||
last_calculated = EXCLUDED.last_calculated,
|
||||
product_count = EXCLUDED.product_count,
|
||||
active_product_count = EXCLUDED.active_product_count,
|
||||
replenishable_product_count = EXCLUDED.replenishable_product_count,
|
||||
current_stock_units = EXCLUDED.current_stock_units,
|
||||
current_stock_cost = EXCLUDED.current_stock_cost,
|
||||
current_stock_retail = EXCLUDED.current_stock_retail,
|
||||
on_order_units = EXCLUDED.on_order_units,
|
||||
on_order_cost = EXCLUDED.on_order_cost,
|
||||
po_count_365d = EXCLUDED.po_count_365d,
|
||||
avg_lead_time_days = EXCLUDED.avg_lead_time_days,
|
||||
sales_7d = EXCLUDED.sales_7d, revenue_7d = EXCLUDED.revenue_7d,
|
||||
sales_30d = EXCLUDED.sales_30d, revenue_30d = EXCLUDED.revenue_30d,
|
||||
profit_30d = EXCLUDED.profit_30d, cogs_30d = EXCLUDED.cogs_30d,
|
||||
sales_365d = EXCLUDED.sales_365d, revenue_365d = EXCLUDED.revenue_365d,
|
||||
lifetime_sales = EXCLUDED.lifetime_sales, lifetime_revenue = EXCLUDED.lifetime_revenue,
|
||||
avg_margin_30d = EXCLUDED.avg_margin_30d,
|
||||
sales_growth_30d_vs_prev = EXCLUDED.sales_growth_30d_vs_prev,
|
||||
revenue_growth_30d_vs_prev = EXCLUDED.revenue_growth_30d_vs_prev
|
||||
WHERE -- Only update if at least one value has changed
|
||||
vendor_metrics.product_count IS DISTINCT FROM EXCLUDED.product_count OR
|
||||
vendor_metrics.active_product_count IS DISTINCT FROM EXCLUDED.active_product_count OR
|
||||
vendor_metrics.current_stock_units IS DISTINCT FROM EXCLUDED.current_stock_units OR
|
||||
vendor_metrics.on_order_units IS DISTINCT FROM EXCLUDED.on_order_units OR
|
||||
vendor_metrics.sales_30d IS DISTINCT FROM EXCLUDED.sales_30d OR
|
||||
vendor_metrics.revenue_30d IS DISTINCT FROM EXCLUDED.revenue_30d OR
|
||||
vendor_metrics.lifetime_sales IS DISTINCT FROM EXCLUDED.lifetime_sales;
|
||||
|
||||
-- Update calculate_status
|
||||
INSERT INTO public.calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES (_module_name, _start_time)
|
||||
ON CONFLICT (module_name) DO UPDATE SET last_calculation_timestamp = _start_time;
|
||||
|
||||
RAISE NOTICE 'Finished % calculation. Duration: %', _module_name, clock_timestamp() - _start_time;
|
||||
END $$;
|
||||
|
||||
-- Return metrics about the update operation for tracking
|
||||
WITH update_stats AS (
|
||||
SELECT
|
||||
COUNT(*) as total_vendors,
|
||||
COUNT(*) FILTER (WHERE last_calculated >= NOW() - INTERVAL '5 minutes') as rows_processed,
|
||||
SUM(product_count) as total_products,
|
||||
SUM(active_product_count) as total_active_products,
|
||||
SUM(po_count_365d) as total_pos_365d,
|
||||
AVG(avg_lead_time_days) as overall_avg_lead_time
|
||||
FROM public.vendor_metrics
|
||||
)
|
||||
SELECT
|
||||
rows_processed,
|
||||
total_vendors,
|
||||
total_products::int,
|
||||
total_active_products::int,
|
||||
total_pos_365d::int,
|
||||
ROUND(overall_avg_lead_time, 1) as overall_avg_lead_time
|
||||
FROM update_stats;
|
||||
@@ -1,222 +0,0 @@
|
||||
-- Description: Calculates and updates daily aggregated product data for recent days.
|
||||
-- Uses UPSERT (INSERT ON CONFLICT UPDATE) for idempotency.
|
||||
-- Dependencies: Core import tables (products, orders, purchase_orders), calculate_status table.
|
||||
-- Frequency: Hourly (Run ~5-10 minutes after hourly data import completes).
|
||||
|
||||
DO $$
|
||||
DECLARE
|
||||
_module_name TEXT := 'daily_snapshots';
|
||||
_start_time TIMESTAMPTZ := clock_timestamp(); -- Time execution started
|
||||
_last_calc_time TIMESTAMPTZ;
|
||||
_target_date DATE; -- Will be set in the loop
|
||||
_total_records INT := 0;
|
||||
_has_orders BOOLEAN := FALSE;
|
||||
_process_days INT := 5; -- Number of days to check/process (today plus previous 4 days)
|
||||
_day_counter INT;
|
||||
_missing_days INT[] := ARRAY[]::INT[]; -- Array to store days with missing or incomplete data
|
||||
BEGIN
|
||||
-- Get the timestamp before the last successful run of this module
|
||||
SELECT last_calculation_timestamp INTO _last_calc_time
|
||||
FROM public.calculate_status
|
||||
WHERE module_name = _module_name;
|
||||
|
||||
RAISE NOTICE 'Running % script. Start Time: %', _module_name, _start_time;
|
||||
|
||||
-- First, check which days need processing by comparing orders data with snapshot data
|
||||
FOR _day_counter IN 0..(_process_days-1) LOOP
|
||||
_target_date := CURRENT_DATE - (_day_counter * INTERVAL '1 day');
|
||||
|
||||
-- Check if this date needs updating by comparing orders to snapshot data
|
||||
-- If the date has orders but not enough snapshots, or if snapshots show zero sales but orders exist, it's incomplete
|
||||
SELECT
|
||||
CASE WHEN (
|
||||
-- We have orders for this date but not enough snapshots, or snapshots with wrong total
|
||||
(EXISTS (SELECT 1 FROM public.orders WHERE date::date = _target_date) AND
|
||||
(
|
||||
-- No snapshots exist for this date
|
||||
NOT EXISTS (SELECT 1 FROM public.daily_product_snapshots WHERE snapshot_date = _target_date) OR
|
||||
-- Or snapshots show zero sales but orders exist
|
||||
(SELECT COALESCE(SUM(units_sold), 0) FROM public.daily_product_snapshots WHERE snapshot_date = _target_date) = 0 OR
|
||||
-- Or the count of snapshot records is significantly less than distinct products in orders
|
||||
(SELECT COUNT(*) FROM public.daily_product_snapshots WHERE snapshot_date = _target_date) <
|
||||
(SELECT COUNT(DISTINCT pid) FROM public.orders WHERE date::date = _target_date) * 0.8
|
||||
)
|
||||
)
|
||||
) THEN TRUE ELSE FALSE END
|
||||
INTO _has_orders;
|
||||
|
||||
IF _has_orders THEN
|
||||
-- This day needs processing - add to our array
|
||||
_missing_days := _missing_days || _day_counter;
|
||||
RAISE NOTICE 'Day % needs updating (incomplete or missing data)', _target_date;
|
||||
END IF;
|
||||
END LOOP;
|
||||
|
||||
-- If no days need updating, exit early
|
||||
IF array_length(_missing_days, 1) IS NULL THEN
|
||||
RAISE NOTICE 'No days need updating - all snapshot data appears complete';
|
||||
|
||||
-- Still update the calculate_status to record this run
|
||||
UPDATE public.calculate_status
|
||||
SET last_calculation_timestamp = _start_time
|
||||
WHERE module_name = _module_name;
|
||||
|
||||
RETURN;
|
||||
END IF;
|
||||
|
||||
RAISE NOTICE 'Need to update % days with missing or incomplete data', array_length(_missing_days, 1);
|
||||
|
||||
-- Process only the days that need updating
|
||||
FOREACH _day_counter IN ARRAY _missing_days LOOP
|
||||
_target_date := CURRENT_DATE - (_day_counter * INTERVAL '1 day');
|
||||
RAISE NOTICE 'Processing date: %', _target_date;
|
||||
|
||||
-- IMPORTANT: First delete any existing data for this date to prevent duplication
|
||||
DELETE FROM public.daily_product_snapshots
|
||||
WHERE snapshot_date = _target_date;
|
||||
|
||||
-- Proceed with calculating daily metrics only for products with actual activity
|
||||
WITH SalesData AS (
|
||||
SELECT
|
||||
p.pid,
|
||||
p.sku,
|
||||
-- Track number of orders to ensure we have real data
|
||||
COUNT(o.id) as order_count,
|
||||
-- Aggregate Sales (Quantity > 0, Status not Canceled/Returned)
|
||||
COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN o.quantity ELSE 0 END), 0) AS units_sold,
|
||||
COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN o.price * o.quantity ELSE 0 END), 0.00) AS gross_revenue_unadjusted, -- Before discount
|
||||
COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN o.discount ELSE 0 END), 0.00) AS discounts,
|
||||
COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN
|
||||
COALESCE(
|
||||
o.costeach, -- First use order-specific cost if available
|
||||
get_weighted_avg_cost(p.pid, o.date::date), -- Then use weighted average cost
|
||||
p.landing_cost_price, -- Fallback to landing cost
|
||||
p.cost_price -- Final fallback to current cost
|
||||
) * o.quantity
|
||||
ELSE 0 END), 0.00) AS cogs,
|
||||
COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN p.regular_price * o.quantity ELSE 0 END), 0.00) AS gross_regular_revenue, -- Use current regular price for simplicity here
|
||||
|
||||
-- Aggregate Returns (Quantity < 0 or Status = Returned)
|
||||
COALESCE(SUM(CASE WHEN o.quantity < 0 OR COALESCE(o.status, 'pending') = 'returned' THEN ABS(o.quantity) ELSE 0 END), 0) AS units_returned,
|
||||
COALESCE(SUM(CASE WHEN o.quantity < 0 OR COALESCE(o.status, 'pending') = 'returned' THEN o.price * ABS(o.quantity) ELSE 0 END), 0.00) AS returns_revenue
|
||||
FROM public.products p -- Start from products to include those with no orders today
|
||||
JOIN public.orders o -- Changed to INNER JOIN to only process products with orders
|
||||
ON p.pid = o.pid
|
||||
AND o.date::date = _target_date -- Cast to date to ensure compatibility regardless of original type
|
||||
GROUP BY p.pid, p.sku
|
||||
-- No HAVING clause here - we always want to include all orders
|
||||
),
|
||||
ReceivingData AS (
|
||||
SELECT
|
||||
r.pid,
|
||||
-- Track number of receiving docs to ensure we have real data
|
||||
COUNT(DISTINCT r.receiving_id) as receiving_doc_count,
|
||||
-- Sum the quantities received on this date
|
||||
SUM(r.qty_each) AS units_received,
|
||||
-- Calculate the cost received (qty * cost)
|
||||
SUM(r.qty_each * r.cost_each) AS cost_received
|
||||
FROM public.receivings r
|
||||
WHERE r.received_date::date = _target_date
|
||||
-- Optional: Filter out canceled receivings if needed
|
||||
-- AND r.status <> 'canceled'
|
||||
GROUP BY r.pid
|
||||
-- Only include products with actual receiving activity
|
||||
HAVING COUNT(DISTINCT r.receiving_id) > 0 OR SUM(r.qty_each) > 0
|
||||
),
|
||||
CurrentStock AS (
|
||||
-- Select current stock values directly from products table
|
||||
SELECT
|
||||
pid,
|
||||
stock_quantity,
|
||||
COALESCE(landing_cost_price, cost_price, 0.00) as effective_cost_price,
|
||||
COALESCE(price, 0.00) as current_price,
|
||||
COALESCE(regular_price, 0.00) as current_regular_price
|
||||
FROM public.products
|
||||
),
|
||||
ProductsWithActivity AS (
|
||||
-- Quick pre-filter to only process products with activity
|
||||
SELECT DISTINCT pid
|
||||
FROM (
|
||||
SELECT pid FROM SalesData
|
||||
UNION
|
||||
SELECT pid FROM ReceivingData
|
||||
) a
|
||||
)
|
||||
-- Now insert records, but ONLY for products with actual activity
|
||||
INSERT INTO public.daily_product_snapshots (
|
||||
snapshot_date,
|
||||
pid,
|
||||
sku,
|
||||
eod_stock_quantity,
|
||||
eod_stock_cost,
|
||||
eod_stock_retail,
|
||||
eod_stock_gross,
|
||||
stockout_flag,
|
||||
units_sold,
|
||||
units_returned,
|
||||
gross_revenue,
|
||||
discounts,
|
||||
returns_revenue,
|
||||
net_revenue,
|
||||
cogs,
|
||||
gross_regular_revenue,
|
||||
profit,
|
||||
units_received,
|
||||
cost_received,
|
||||
calculation_timestamp
|
||||
)
|
||||
SELECT
|
||||
_target_date AS snapshot_date,
|
||||
COALESCE(sd.pid, rd.pid) AS pid, -- Use sales or receiving PID
|
||||
COALESCE(sd.sku, p.sku) AS sku, -- Get SKU from sales data or products table
|
||||
-- Inventory Metrics (Using CurrentStock)
|
||||
cs.stock_quantity AS eod_stock_quantity,
|
||||
cs.stock_quantity * cs.effective_cost_price AS eod_stock_cost,
|
||||
cs.stock_quantity * cs.current_price AS eod_stock_retail,
|
||||
cs.stock_quantity * cs.current_regular_price AS eod_stock_gross,
|
||||
(cs.stock_quantity <= 0) AS stockout_flag,
|
||||
-- Sales Metrics (From SalesData)
|
||||
COALESCE(sd.units_sold, 0),
|
||||
COALESCE(sd.units_returned, 0),
|
||||
COALESCE(sd.gross_revenue_unadjusted, 0.00),
|
||||
COALESCE(sd.discounts, 0.00),
|
||||
COALESCE(sd.returns_revenue, 0.00),
|
||||
COALESCE(sd.gross_revenue_unadjusted, 0.00) - COALESCE(sd.discounts, 0.00) AS net_revenue,
|
||||
COALESCE(sd.cogs, 0.00),
|
||||
COALESCE(sd.gross_regular_revenue, 0.00),
|
||||
(COALESCE(sd.gross_revenue_unadjusted, 0.00) - COALESCE(sd.discounts, 0.00)) - COALESCE(sd.cogs, 0.00) AS profit, -- Basic profit: Net Revenue - COGS
|
||||
-- Receiving Metrics (From ReceivingData)
|
||||
COALESCE(rd.units_received, 0),
|
||||
COALESCE(rd.cost_received, 0.00),
|
||||
_start_time -- Timestamp of this calculation run
|
||||
FROM SalesData sd
|
||||
FULL OUTER JOIN ReceivingData rd ON sd.pid = rd.pid
|
||||
JOIN ProductsWithActivity pwa ON COALESCE(sd.pid, rd.pid) = pwa.pid
|
||||
LEFT JOIN public.products p ON COALESCE(sd.pid, rd.pid) = p.pid
|
||||
LEFT JOIN CurrentStock cs ON COALESCE(sd.pid, rd.pid) = cs.pid
|
||||
WHERE p.pid IS NOT NULL; -- Ensure we only insert for existing products
|
||||
|
||||
-- Get the total number of records inserted for this date
|
||||
GET DIAGNOSTICS _total_records = ROW_COUNT;
|
||||
RAISE NOTICE 'Created % daily snapshot records for % with sales/receiving activity', _total_records, _target_date;
|
||||
END LOOP;
|
||||
|
||||
-- Update the status table with the timestamp from the START of this run
|
||||
UPDATE public.calculate_status
|
||||
SET last_calculation_timestamp = _start_time
|
||||
WHERE module_name = _module_name;
|
||||
|
||||
RAISE NOTICE 'Finished % processing for multiple dates. Duration: %', _module_name, clock_timestamp() - _start_time;
|
||||
|
||||
END $$;
|
||||
|
||||
-- Return the total records processed for tracking
|
||||
SELECT
|
||||
COUNT(*) as rows_processed,
|
||||
COUNT(DISTINCT snapshot_date) as days_processed,
|
||||
MIN(snapshot_date) as earliest_date,
|
||||
MAX(snapshot_date) as latest_date,
|
||||
SUM(units_sold) as total_units_sold,
|
||||
SUM(units_received) as total_units_received
|
||||
FROM public.daily_product_snapshots
|
||||
WHERE calculation_timestamp >= (NOW() - INTERVAL '5 minutes'); -- Recent updates only
|
||||
@@ -1,139 +0,0 @@
|
||||
-- Description: Calculates metrics that don't need hourly updates, like ABC class
|
||||
-- and average lead time.
|
||||
-- Dependencies: product_metrics, purchase_orders, settings_global, calculate_status.
|
||||
-- Frequency: Daily or Weekly (e.g., run via cron job overnight).
|
||||
|
||||
DO $$
|
||||
DECLARE
|
||||
_module_name TEXT := 'periodic_metrics';
|
||||
_start_time TIMESTAMPTZ := clock_timestamp();
|
||||
_last_calc_time TIMESTAMPTZ;
|
||||
_abc_basis VARCHAR;
|
||||
_abc_period INT;
|
||||
_threshold_a NUMERIC;
|
||||
_threshold_b NUMERIC;
|
||||
BEGIN
|
||||
-- Get the timestamp before the last successful run of this module
|
||||
SELECT last_calculation_timestamp INTO _last_calc_time
|
||||
FROM public.calculate_status
|
||||
WHERE module_name = _module_name;
|
||||
|
||||
RAISE NOTICE 'Running % module. Start Time: %', _module_name, _start_time;
|
||||
|
||||
-- 1. Calculate Average Lead Time
|
||||
RAISE NOTICE 'Calculating Average Lead Time...';
|
||||
WITH LeadTimes AS (
|
||||
SELECT
|
||||
po.pid,
|
||||
-- Calculate lead time by looking at when items ordered on POs were received
|
||||
AVG(GREATEST(1, (r.received_date::date - po.date::date))) AS avg_days -- Use GREATEST(1,...) to avoid 0 or negative days
|
||||
FROM public.purchase_orders po
|
||||
-- Join to receivings table to find actual receipts
|
||||
JOIN public.receivings r ON r.pid = po.pid
|
||||
WHERE po.status = 'done' -- Only include completed POs
|
||||
AND r.received_date >= po.date -- Ensure received date is not before order date
|
||||
-- Optional: add check to make sure receiving is related to PO if you have source_po_id
|
||||
-- AND (r.source_po_id = po.po_id OR r.source_po_id IS NULL)
|
||||
GROUP BY po.pid
|
||||
)
|
||||
UPDATE public.product_metrics pm
|
||||
SET avg_lead_time_days = lt.avg_days::int
|
||||
FROM LeadTimes lt
|
||||
WHERE pm.pid = lt.pid
|
||||
AND pm.avg_lead_time_days IS DISTINCT FROM lt.avg_days::int; -- Only update if changed
|
||||
RAISE NOTICE 'Finished Average Lead Time calculation.';
|
||||
|
||||
|
||||
-- 2. Calculate ABC Classification
|
||||
RAISE NOTICE 'Calculating ABC Classification...';
|
||||
-- Get ABC settings
|
||||
SELECT setting_value INTO _abc_basis FROM public.settings_global WHERE setting_key = 'abc_calculation_basis' LIMIT 1;
|
||||
SELECT setting_value::numeric INTO _threshold_a FROM public.settings_global WHERE setting_key = 'abc_revenue_threshold_a' LIMIT 1;
|
||||
SELECT setting_value::numeric INTO _threshold_b FROM public.settings_global WHERE setting_key = 'abc_revenue_threshold_b' LIMIT 1;
|
||||
_abc_basis := COALESCE(_abc_basis, 'revenue_30d'); -- Default basis
|
||||
_threshold_a := COALESCE(_threshold_a, 0.80);
|
||||
_threshold_b := COALESCE(_threshold_b, 0.95);
|
||||
|
||||
RAISE NOTICE 'Using ABC Basis: %, Threshold A: %, Threshold B: %', _abc_basis, _threshold_a, _threshold_b;
|
||||
|
||||
WITH RankedProducts AS (
|
||||
SELECT
|
||||
pid,
|
||||
-- Dynamically select the metric based on setting
|
||||
CASE _abc_basis
|
||||
WHEN 'sales_30d' THEN COALESCE(sales_30d, 0)
|
||||
WHEN 'lifetime_revenue' THEN COALESCE(lifetime_revenue, 0)::numeric -- Cast needed if different type
|
||||
ELSE COALESCE(revenue_30d, 0) -- Default to revenue_30d
|
||||
END AS metric_value
|
||||
FROM public.product_metrics
|
||||
WHERE is_replenishable = TRUE -- Typically only classify replenishable items
|
||||
),
|
||||
Cumulative AS (
|
||||
SELECT
|
||||
pid,
|
||||
metric_value,
|
||||
SUM(metric_value) OVER (ORDER BY metric_value DESC NULLS LAST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) as cumulative_metric,
|
||||
SUM(metric_value) OVER () as total_metric
|
||||
FROM RankedProducts
|
||||
WHERE metric_value > 0 -- Exclude items with no contribution
|
||||
)
|
||||
UPDATE public.product_metrics pm
|
||||
SET abc_class =
|
||||
CASE
|
||||
WHEN c.cumulative_metric / NULLIF(c.total_metric, 0) <= _threshold_a THEN 'A'
|
||||
WHEN c.cumulative_metric / NULLIF(c.total_metric, 0) <= _threshold_b THEN 'B'
|
||||
ELSE 'C'
|
||||
END
|
||||
FROM Cumulative c
|
||||
WHERE pm.pid = c.pid
|
||||
AND pm.abc_class IS DISTINCT FROM ( -- Only update if changed
|
||||
CASE
|
||||
WHEN c.cumulative_metric / NULLIF(c.total_metric, 0) <= _threshold_a THEN 'A'
|
||||
WHEN c.cumulative_metric / NULLIF(c.total_metric, 0) <= _threshold_b THEN 'B'
|
||||
ELSE 'C'
|
||||
END);
|
||||
|
||||
-- Set non-contributing or non-replenishable to 'C' or NULL if preferred
|
||||
UPDATE public.product_metrics
|
||||
SET abc_class = 'C' -- Or NULL
|
||||
WHERE abc_class IS NULL AND is_replenishable = TRUE; -- Catch those with 0 metric value
|
||||
|
||||
UPDATE public.product_metrics
|
||||
SET abc_class = NULL -- Or 'N/A'?
|
||||
WHERE is_replenishable = FALSE AND abc_class IS NOT NULL; -- Unclassify non-replenishable items
|
||||
|
||||
|
||||
RAISE NOTICE 'Finished ABC Classification calculation.';
|
||||
|
||||
-- Add other periodic calculations here if needed (e.g., recalculating first/last dates)
|
||||
|
||||
-- Update the status table with the timestamp from the START of this run
|
||||
UPDATE public.calculate_status
|
||||
SET last_calculation_timestamp = _start_time
|
||||
WHERE module_name = _module_name;
|
||||
|
||||
RAISE NOTICE 'Finished % module. Duration: %', _module_name, clock_timestamp() - _start_time;
|
||||
|
||||
END $$;
|
||||
|
||||
-- Return metrics about the update operation for tracking
|
||||
WITH update_stats AS (
|
||||
SELECT
|
||||
COUNT(*) as total_products,
|
||||
COUNT(*) FILTER (WHERE last_calculated >= NOW() - INTERVAL '5 minutes') as rows_processed,
|
||||
COUNT(*) FILTER (WHERE abc_class = 'A') as abc_a_count,
|
||||
COUNT(*) FILTER (WHERE abc_class = 'B') as abc_b_count,
|
||||
COUNT(*) FILTER (WHERE abc_class = 'C') as abc_c_count,
|
||||
COUNT(*) FILTER (WHERE avg_lead_time_days IS NOT NULL) as products_with_lead_time,
|
||||
AVG(avg_lead_time_days) as overall_avg_lead_time
|
||||
FROM public.product_metrics
|
||||
)
|
||||
SELECT
|
||||
rows_processed,
|
||||
total_products,
|
||||
abc_a_count,
|
||||
abc_b_count,
|
||||
abc_c_count,
|
||||
products_with_lead_time,
|
||||
ROUND(overall_avg_lead_time, 1) as overall_avg_lead_time
|
||||
FROM update_stats;
|
||||
@@ -1,609 +0,0 @@
|
||||
-- Description: Calculates and updates the main product_metrics table based on current data
|
||||
-- and aggregated daily snapshots. Uses UPSERT for idempotency.
|
||||
-- Dependencies: Core import tables, daily_product_snapshots, configuration tables, calculate_status.
|
||||
-- Frequency: Hourly (Run AFTER update_daily_snapshots.sql completes).
|
||||
|
||||
DO $$
|
||||
DECLARE
|
||||
_module_name TEXT := 'product_metrics';
|
||||
_start_time TIMESTAMPTZ := clock_timestamp();
|
||||
_last_calc_time TIMESTAMPTZ;
|
||||
_current_date DATE := CURRENT_DATE;
|
||||
BEGIN
|
||||
-- Get the timestamp before the last successful run of this module
|
||||
SELECT last_calculation_timestamp INTO _last_calc_time
|
||||
FROM public.calculate_status
|
||||
WHERE module_name = _module_name;
|
||||
|
||||
RAISE NOTICE 'Running % module. Start Time: %', _module_name, _start_time;
|
||||
|
||||
-- Use CTEs to gather all necessary information
|
||||
WITH CurrentInfo AS (
|
||||
SELECT
|
||||
p.pid,
|
||||
p.sku,
|
||||
p.title,
|
||||
p.brand,
|
||||
p.vendor,
|
||||
COALESCE(p.image_175, p.image) as image_url,
|
||||
p.visible as is_visible,
|
||||
p.replenishable as is_replenishable,
|
||||
-- Add new product fields
|
||||
p.barcode,
|
||||
p.harmonized_tariff_code,
|
||||
p.vendor_reference,
|
||||
p.notions_reference,
|
||||
p.line,
|
||||
p.subline,
|
||||
p.artist,
|
||||
p.moq,
|
||||
p.rating,
|
||||
p.reviews,
|
||||
p.weight,
|
||||
p.length,
|
||||
p.width,
|
||||
p.height,
|
||||
p.country_of_origin,
|
||||
p.location,
|
||||
p.baskets,
|
||||
p.notifies,
|
||||
p.preorder_count,
|
||||
p.notions_inv_count,
|
||||
COALESCE(p.price, 0.00) as current_price,
|
||||
COALESCE(p.regular_price, 0.00) as current_regular_price,
|
||||
COALESCE(p.cost_price, 0.00) as current_cost_price,
|
||||
COALESCE(p.landing_cost_price, p.cost_price, 0.00) as current_effective_cost, -- Use landing if available, else cost
|
||||
p.stock_quantity as current_stock,
|
||||
p.created_at,
|
||||
p.first_received,
|
||||
p.date_last_sold,
|
||||
p.total_sold as historical_total_sold, -- Add historical total_sold from products table
|
||||
p.uom -- Assuming UOM logic is handled elsewhere or simple (e.g., 1=each)
|
||||
FROM public.products p
|
||||
),
|
||||
OnOrderInfo AS (
|
||||
SELECT
|
||||
pid,
|
||||
SUM(ordered) AS on_order_qty,
|
||||
SUM(ordered * po_cost_price) AS on_order_cost,
|
||||
MIN(expected_date) AS earliest_expected_date
|
||||
FROM public.purchase_orders
|
||||
WHERE status IN ('created', 'ordered', 'preordered', 'electronically_sent', 'electronically_ready_send', 'receiving_started')
|
||||
AND status NOT IN ('canceled', 'done')
|
||||
GROUP BY pid
|
||||
),
|
||||
HistoricalDates AS (
|
||||
-- Note: Calculating these MIN/MAX values hourly can be slow on large tables.
|
||||
-- Consider calculating periodically or storing on products if import can populate them.
|
||||
SELECT
|
||||
p.pid,
|
||||
MIN(o.date)::date AS date_first_sold,
|
||||
MAX(o.date)::date AS max_order_date, -- Use MAX for potential recalc of date_last_sold
|
||||
|
||||
-- For first received, use the new receivings table
|
||||
MIN(r.received_date)::date AS date_first_received_calc,
|
||||
|
||||
-- For last received, use the new receivings table
|
||||
MAX(r.received_date)::date AS date_last_received_calc
|
||||
FROM public.products p
|
||||
LEFT JOIN public.orders o ON p.pid = o.pid AND o.quantity > 0 AND o.status NOT IN ('canceled', 'returned')
|
||||
LEFT JOIN public.receivings r ON p.pid = r.pid
|
||||
GROUP BY p.pid
|
||||
),
|
||||
SnapshotAggregates AS (
|
||||
SELECT
|
||||
pid,
|
||||
-- Get the counts of all available data
|
||||
COUNT(DISTINCT snapshot_date) AS available_days,
|
||||
|
||||
-- Rolling periods with no time constraint - just sum everything we have
|
||||
SUM(units_sold) AS total_units_sold,
|
||||
SUM(net_revenue) AS total_net_revenue,
|
||||
|
||||
-- Specific time windows using date range boundaries precisely
|
||||
-- Use _current_date - INTERVAL '6 days' to include 7 days (today + 6 previous days)
|
||||
-- This ensures we count exactly the right number of days in each period
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '6 days' AND snapshot_date <= _current_date THEN units_sold ELSE 0 END) AS sales_7d,
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '6 days' AND snapshot_date <= _current_date THEN net_revenue ELSE 0 END) AS revenue_7d,
|
||||
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '13 days' AND snapshot_date <= _current_date THEN units_sold ELSE 0 END) AS sales_14d,
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '13 days' AND snapshot_date <= _current_date THEN net_revenue ELSE 0 END) AS revenue_14d,
|
||||
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' AND snapshot_date <= _current_date THEN units_sold ELSE 0 END) AS sales_30d,
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' AND snapshot_date <= _current_date THEN net_revenue ELSE 0 END) AS revenue_30d,
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' AND snapshot_date <= _current_date THEN cogs ELSE 0 END) AS cogs_30d,
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' AND snapshot_date <= _current_date THEN profit ELSE 0 END) AS profit_30d,
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' AND snapshot_date <= _current_date THEN units_returned ELSE 0 END) AS returns_units_30d,
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' AND snapshot_date <= _current_date THEN returns_revenue ELSE 0 END) AS returns_revenue_30d,
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' AND snapshot_date <= _current_date THEN discounts ELSE 0 END) AS discounts_30d,
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' AND snapshot_date <= _current_date THEN gross_revenue ELSE 0 END) AS gross_revenue_30d,
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' AND snapshot_date <= _current_date THEN gross_regular_revenue ELSE 0 END) AS gross_regular_revenue_30d,
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' AND snapshot_date <= _current_date AND stockout_flag THEN 1 ELSE 0 END) AS stockout_days_30d,
|
||||
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '364 days' AND snapshot_date <= _current_date THEN units_sold ELSE 0 END) AS sales_365d,
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '364 days' AND snapshot_date <= _current_date THEN net_revenue ELSE 0 END) AS revenue_365d,
|
||||
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' AND snapshot_date <= _current_date THEN units_received ELSE 0 END) AS received_qty_30d,
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' AND snapshot_date <= _current_date THEN cost_received ELSE 0 END) AS received_cost_30d,
|
||||
|
||||
-- Averages for stock levels - only include dates within the specified period
|
||||
AVG(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' AND snapshot_date <= _current_date THEN eod_stock_quantity END) AS avg_stock_units_30d,
|
||||
AVG(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' AND snapshot_date <= _current_date THEN eod_stock_cost END) AS avg_stock_cost_30d,
|
||||
AVG(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' AND snapshot_date <= _current_date THEN eod_stock_retail END) AS avg_stock_retail_30d,
|
||||
AVG(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' AND snapshot_date <= _current_date THEN eod_stock_gross END) AS avg_stock_gross_30d,
|
||||
|
||||
-- Lifetime - should match total values above
|
||||
SUM(units_sold) AS lifetime_sales,
|
||||
SUM(net_revenue) AS lifetime_revenue,
|
||||
|
||||
-- Yesterday
|
||||
SUM(CASE WHEN snapshot_date = _current_date - INTERVAL '1 day' THEN units_sold ELSE 0 END) as yesterday_sales
|
||||
|
||||
FROM public.daily_product_snapshots
|
||||
GROUP BY pid
|
||||
),
|
||||
FirstPeriodMetrics AS (
|
||||
SELECT
|
||||
pid,
|
||||
date_first_sold,
|
||||
SUM(CASE WHEN snapshot_date >= date_first_sold AND snapshot_date <= date_first_sold + INTERVAL '6 days' THEN units_sold ELSE 0 END) AS first_7_days_sales,
|
||||
SUM(CASE WHEN snapshot_date >= date_first_sold AND snapshot_date <= date_first_sold + INTERVAL '6 days' THEN net_revenue ELSE 0 END) AS first_7_days_revenue,
|
||||
SUM(CASE WHEN snapshot_date >= date_first_sold AND snapshot_date <= date_first_sold + INTERVAL '29 days' THEN units_sold ELSE 0 END) AS first_30_days_sales,
|
||||
SUM(CASE WHEN snapshot_date >= date_first_sold AND snapshot_date <= date_first_sold + INTERVAL '29 days' THEN net_revenue ELSE 0 END) AS first_30_days_revenue,
|
||||
SUM(CASE WHEN snapshot_date >= date_first_sold AND snapshot_date <= date_first_sold + INTERVAL '59 days' THEN units_sold ELSE 0 END) AS first_60_days_sales,
|
||||
SUM(CASE WHEN snapshot_date >= date_first_sold AND snapshot_date <= date_first_sold + INTERVAL '59 days' THEN net_revenue ELSE 0 END) AS first_60_days_revenue,
|
||||
SUM(CASE WHEN snapshot_date >= date_first_sold AND snapshot_date <= date_first_sold + INTERVAL '89 days' THEN units_sold ELSE 0 END) AS first_90_days_sales,
|
||||
SUM(CASE WHEN snapshot_date >= date_first_sold AND snapshot_date <= date_first_sold + INTERVAL '89 days' THEN net_revenue ELSE 0 END) AS first_90_days_revenue
|
||||
FROM public.daily_product_snapshots ds
|
||||
JOIN HistoricalDates hd USING(pid)
|
||||
WHERE date_first_sold IS NOT NULL
|
||||
AND snapshot_date >= date_first_sold
|
||||
AND snapshot_date <= date_first_sold + INTERVAL '90 days' -- Limit scan range
|
||||
GROUP BY pid, date_first_sold
|
||||
),
|
||||
Settings AS (
|
||||
SELECT
|
||||
p.pid,
|
||||
COALESCE(sp.lead_time_days, sv.default_lead_time_days, (SELECT setting_value FROM settings_global WHERE setting_key = 'default_lead_time_days')::int, 14) AS effective_lead_time,
|
||||
COALESCE(sp.days_of_stock, sv.default_days_of_stock, (SELECT setting_value FROM settings_global WHERE setting_key = 'default_days_of_stock')::int, 30) AS effective_days_of_stock,
|
||||
COALESCE(sp.safety_stock, 0) AS effective_safety_stock, -- Assuming safety stock is units, not days from global for now
|
||||
COALESCE(sp.exclude_from_forecast, FALSE) AS exclude_forecast
|
||||
FROM public.products p
|
||||
LEFT JOIN public.settings_product sp ON p.pid = sp.pid
|
||||
LEFT JOIN public.settings_vendor sv ON p.vendor = sv.vendor
|
||||
),
|
||||
LifetimeRevenue AS (
|
||||
-- Calculate actual revenue from orders table
|
||||
SELECT
|
||||
o.pid,
|
||||
SUM(o.price * o.quantity - COALESCE(o.discount, 0)) AS lifetime_revenue_from_orders,
|
||||
SUM(o.quantity) AS lifetime_units_from_orders
|
||||
FROM public.orders o
|
||||
WHERE o.status NOT IN ('canceled', 'returned')
|
||||
AND o.quantity > 0
|
||||
GROUP BY o.pid
|
||||
),
|
||||
PreviousPeriodMetrics AS (
|
||||
-- Calculate metrics for previous 30-day period for growth comparison
|
||||
SELECT
|
||||
pid,
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '59 days'
|
||||
AND snapshot_date < _current_date - INTERVAL '29 days'
|
||||
THEN units_sold ELSE 0 END) AS sales_prev_30d,
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '59 days'
|
||||
AND snapshot_date < _current_date - INTERVAL '29 days'
|
||||
THEN net_revenue ELSE 0 END) AS revenue_prev_30d,
|
||||
-- Year-over-year comparison
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '395 days'
|
||||
AND snapshot_date < _current_date - INTERVAL '365 days'
|
||||
THEN units_sold ELSE 0 END) AS sales_30d_last_year,
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '395 days'
|
||||
AND snapshot_date < _current_date - INTERVAL '365 days'
|
||||
THEN net_revenue ELSE 0 END) AS revenue_30d_last_year
|
||||
FROM public.daily_product_snapshots
|
||||
GROUP BY pid
|
||||
),
|
||||
DemandVariability AS (
|
||||
-- Calculate variance and standard deviation of daily sales
|
||||
SELECT
|
||||
pid,
|
||||
COUNT(*) AS days_with_data,
|
||||
AVG(units_sold) AS avg_daily_sales,
|
||||
VARIANCE(units_sold) AS sales_variance,
|
||||
STDDEV(units_sold) AS sales_std_dev,
|
||||
-- Coefficient of variation
|
||||
CASE
|
||||
WHEN AVG(units_sold) > 0 THEN STDDEV(units_sold) / AVG(units_sold)
|
||||
ELSE NULL
|
||||
END AS sales_cv
|
||||
FROM public.daily_product_snapshots
|
||||
WHERE snapshot_date >= _current_date - INTERVAL '29 days'
|
||||
AND snapshot_date <= _current_date
|
||||
GROUP BY pid
|
||||
),
|
||||
ServiceLevels AS (
|
||||
-- Calculate service level and fill rate metrics
|
||||
SELECT
|
||||
pid,
|
||||
COUNT(*) FILTER (WHERE stockout_flag = true) AS stockout_incidents_30d,
|
||||
COUNT(*) FILTER (WHERE stockout_flag = true AND units_sold > 0) AS lost_sales_incidents_30d,
|
||||
-- Service level: percentage of days without stockouts
|
||||
(1.0 - (COUNT(*) FILTER (WHERE stockout_flag = true)::NUMERIC / NULLIF(COUNT(*), 0))) * 100 AS service_level_30d,
|
||||
-- Fill rate: units sold / (units sold + potential lost sales)
|
||||
CASE
|
||||
WHEN SUM(units_sold) > 0 THEN
|
||||
(SUM(units_sold)::NUMERIC /
|
||||
(SUM(units_sold) + SUM(CASE WHEN stockout_flag THEN units_sold * 0.2 ELSE 0 END))) * 100
|
||||
ELSE NULL
|
||||
END AS fill_rate_30d
|
||||
FROM public.daily_product_snapshots
|
||||
WHERE snapshot_date >= _current_date - INTERVAL '29 days'
|
||||
AND snapshot_date <= _current_date
|
||||
GROUP BY pid
|
||||
),
|
||||
SeasonalityAnalysis AS (
|
||||
-- Simple seasonality detection
|
||||
SELECT
|
||||
p.pid,
|
||||
sp.seasonal_pattern,
|
||||
sp.seasonality_index,
|
||||
sp.peak_season
|
||||
FROM products p
|
||||
CROSS JOIN LATERAL detect_seasonal_pattern(p.pid) sp
|
||||
)
|
||||
-- Final UPSERT into product_metrics
|
||||
INSERT INTO public.product_metrics (
|
||||
pid, last_calculated, sku, title, brand, vendor, image_url, is_visible, is_replenishable,
|
||||
barcode, harmonized_tariff_code, vendor_reference, notions_reference, line, subline, artist,
|
||||
moq, rating, reviews, weight, length, width, height, country_of_origin, location,
|
||||
baskets, notifies, preorder_count, notions_inv_count,
|
||||
current_price, current_regular_price, current_cost_price, current_landing_cost_price,
|
||||
current_stock, current_stock_cost, current_stock_retail, current_stock_gross,
|
||||
on_order_qty, on_order_cost, on_order_retail, earliest_expected_date,
|
||||
date_created, date_first_received, date_last_received, date_first_sold, date_last_sold, age_days,
|
||||
sales_7d, revenue_7d, sales_14d, revenue_14d, sales_30d, revenue_30d, cogs_30d, profit_30d,
|
||||
returns_units_30d, returns_revenue_30d, discounts_30d, gross_revenue_30d, gross_regular_revenue_30d,
|
||||
stockout_days_30d, sales_365d, revenue_365d,
|
||||
avg_stock_units_30d, avg_stock_cost_30d, avg_stock_retail_30d, avg_stock_gross_30d,
|
||||
received_qty_30d, received_cost_30d,
|
||||
lifetime_sales, lifetime_revenue, lifetime_revenue_quality,
|
||||
first_7_days_sales, first_7_days_revenue, first_30_days_sales, first_30_days_revenue,
|
||||
first_60_days_sales, first_60_days_revenue, first_90_days_sales, first_90_days_revenue,
|
||||
asp_30d, acp_30d, avg_ros_30d, avg_sales_per_day_30d, avg_sales_per_month_30d,
|
||||
margin_30d, markup_30d, gmroi_30d, stockturn_30d, return_rate_30d, discount_rate_30d,
|
||||
stockout_rate_30d, markdown_30d, markdown_rate_30d, sell_through_30d,
|
||||
-- avg_lead_time_days, -- Calculated periodically
|
||||
-- abc_class, -- Calculated periodically
|
||||
sales_velocity_daily, config_lead_time, config_days_of_stock, config_safety_stock,
|
||||
planning_period_days, lead_time_forecast_units, days_of_stock_forecast_units,
|
||||
planning_period_forecast_units, lead_time_closing_stock, days_of_stock_closing_stock,
|
||||
replenishment_needed_raw, replenishment_units, replenishment_cost, replenishment_retail, replenishment_profit,
|
||||
to_order_units, forecast_lost_sales_units, forecast_lost_revenue,
|
||||
stock_cover_in_days, po_cover_in_days, sells_out_in_days, replenish_date,
|
||||
overstocked_units, overstocked_cost, overstocked_retail, is_old_stock,
|
||||
yesterday_sales,
|
||||
status, -- Add status field for calculated status
|
||||
-- New fields
|
||||
sales_growth_30d_vs_prev, revenue_growth_30d_vs_prev,
|
||||
sales_growth_yoy, revenue_growth_yoy,
|
||||
sales_variance_30d, sales_std_dev_30d, sales_cv_30d, demand_pattern,
|
||||
fill_rate_30d, stockout_incidents_30d, service_level_30d, lost_sales_incidents_30d,
|
||||
seasonality_index, seasonal_pattern, peak_season
|
||||
)
|
||||
SELECT
|
||||
ci.pid, _start_time, ci.sku, ci.title, ci.brand, ci.vendor, ci.image_url, ci.is_visible, ci.is_replenishable,
|
||||
ci.barcode, ci.harmonized_tariff_code, ci.vendor_reference, ci.notions_reference, ci.line, ci.subline, ci.artist,
|
||||
ci.moq, ci.rating, ci.reviews, ci.weight, ci.length, ci.width, ci.height, ci.country_of_origin, ci.location,
|
||||
ci.baskets, ci.notifies, ci.preorder_count, ci.notions_inv_count,
|
||||
ci.current_price, ci.current_regular_price, ci.current_cost_price, ci.current_effective_cost,
|
||||
ci.current_stock, ci.current_stock * ci.current_effective_cost, ci.current_stock * ci.current_price, ci.current_stock * ci.current_regular_price,
|
||||
COALESCE(ooi.on_order_qty, 0), COALESCE(ooi.on_order_cost, 0.00), COALESCE(ooi.on_order_qty, 0) * ci.current_price, ooi.earliest_expected_date,
|
||||
ci.created_at::date, COALESCE(ci.first_received::date, hd.date_first_received_calc), hd.date_last_received_calc, hd.date_first_sold, COALESCE(ci.date_last_sold, hd.max_order_date),
|
||||
CASE
|
||||
WHEN ci.created_at IS NULL AND hd.date_first_sold IS NULL THEN 0
|
||||
WHEN ci.created_at IS NULL THEN (_current_date - hd.date_first_sold)::integer
|
||||
WHEN hd.date_first_sold IS NULL THEN (_current_date - ci.created_at::date)::integer
|
||||
ELSE (_current_date - LEAST(ci.created_at::date, hd.date_first_sold))::integer
|
||||
END AS age_days,
|
||||
sa.sales_7d, sa.revenue_7d, sa.sales_14d, sa.revenue_14d, sa.sales_30d, sa.revenue_30d, sa.cogs_30d, sa.profit_30d,
|
||||
sa.returns_units_30d, sa.returns_revenue_30d, sa.discounts_30d, sa.gross_revenue_30d, sa.gross_regular_revenue_30d,
|
||||
sa.stockout_days_30d, sa.sales_365d, sa.revenue_365d,
|
||||
sa.avg_stock_units_30d, sa.avg_stock_cost_30d, sa.avg_stock_retail_30d, sa.avg_stock_gross_30d,
|
||||
sa.received_qty_30d, sa.received_cost_30d,
|
||||
-- Use total_sold from products table as the source of truth for lifetime sales
|
||||
-- This includes all historical data from the production database
|
||||
ci.historical_total_sold AS lifetime_sales,
|
||||
-- Calculate lifetime revenue using actual historical prices where available
|
||||
CASE
|
||||
WHEN lr.lifetime_revenue_from_orders IS NOT NULL THEN
|
||||
-- We have some order history - use it plus estimate for remaining
|
||||
lr.lifetime_revenue_from_orders +
|
||||
(GREATEST(0, ci.historical_total_sold - COALESCE(lr.lifetime_units_from_orders, 0)) *
|
||||
COALESCE(
|
||||
-- Use oldest known price from snapshots as proxy
|
||||
(SELECT revenue_7d / NULLIF(sales_7d, 0)
|
||||
FROM daily_product_snapshots
|
||||
WHERE pid = ci.pid AND sales_7d > 0
|
||||
ORDER BY snapshot_date ASC
|
||||
LIMIT 1),
|
||||
ci.current_price
|
||||
))
|
||||
ELSE
|
||||
-- No order history - estimate using current price
|
||||
ci.historical_total_sold * ci.current_price
|
||||
END AS lifetime_revenue,
|
||||
CASE
|
||||
WHEN lr.lifetime_units_from_orders >= ci.historical_total_sold * 0.9 THEN 'exact'
|
||||
WHEN lr.lifetime_units_from_orders >= ci.historical_total_sold * 0.5 THEN 'partial'
|
||||
ELSE 'estimated'
|
||||
END AS lifetime_revenue_quality,
|
||||
fpm.first_7_days_sales, fpm.first_7_days_revenue, fpm.first_30_days_sales, fpm.first_30_days_revenue,
|
||||
fpm.first_60_days_sales, fpm.first_60_days_revenue, fpm.first_90_days_sales, fpm.first_90_days_revenue,
|
||||
sa.revenue_30d / NULLIF(sa.sales_30d, 0) AS asp_30d,
|
||||
sa.cogs_30d / NULLIF(sa.sales_30d, 0) AS acp_30d,
|
||||
sa.profit_30d / NULLIF(sa.sales_30d, 0) AS avg_ros_30d,
|
||||
sa.sales_30d / 30.0 AS avg_sales_per_day_30d,
|
||||
sa.sales_30d AS avg_sales_per_month_30d, -- Using 30d sales as proxy for month
|
||||
(sa.profit_30d / NULLIF(sa.revenue_30d, 0)) * 100 AS margin_30d,
|
||||
(sa.profit_30d / NULLIF(sa.cogs_30d, 0)) * 100 AS markup_30d,
|
||||
sa.profit_30d / NULLIF(sa.avg_stock_cost_30d, 0) AS gmroi_30d,
|
||||
sa.sales_30d / NULLIF(sa.avg_stock_units_30d, 0) AS stockturn_30d,
|
||||
(sa.returns_units_30d / NULLIF(sa.sales_30d + sa.returns_units_30d, 0)) * 100 AS return_rate_30d,
|
||||
(sa.discounts_30d / NULLIF(sa.gross_revenue_30d, 0)) * 100 AS discount_rate_30d,
|
||||
(sa.stockout_days_30d / 30.0) * 100 AS stockout_rate_30d,
|
||||
sa.gross_regular_revenue_30d - sa.gross_revenue_30d AS markdown_30d,
|
||||
((sa.gross_regular_revenue_30d - sa.gross_revenue_30d) / NULLIF(sa.gross_regular_revenue_30d, 0)) * 100 AS markdown_rate_30d,
|
||||
-- Fix sell-through rate: Industry standard is Units Sold / (Beginning Inventory + Units Received)
|
||||
-- Approximating beginning inventory as current stock + units sold - units received
|
||||
(sa.sales_30d / NULLIF(
|
||||
ci.current_stock + sa.sales_30d + sa.returns_units_30d - sa.received_qty_30d,
|
||||
0
|
||||
)) * 100 AS sell_through_30d,
|
||||
|
||||
-- Forecasting intermediate values
|
||||
-- Use the calculate_sales_velocity function instead of repetitive calculation
|
||||
calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) AS sales_velocity_daily,
|
||||
s.effective_lead_time AS config_lead_time,
|
||||
s.effective_days_of_stock AS config_days_of_stock,
|
||||
s.effective_safety_stock AS config_safety_stock,
|
||||
(s.effective_lead_time + s.effective_days_of_stock) AS planning_period_days,
|
||||
|
||||
calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_lead_time AS lead_time_forecast_units,
|
||||
|
||||
calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_days_of_stock AS days_of_stock_forecast_units,
|
||||
|
||||
calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * (s.effective_lead_time + s.effective_days_of_stock) AS planning_period_forecast_units,
|
||||
|
||||
(ci.current_stock + COALESCE(ooi.on_order_qty, 0) - (calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_lead_time)) AS lead_time_closing_stock,
|
||||
|
||||
((ci.current_stock + COALESCE(ooi.on_order_qty, 0) - (calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_lead_time))) - (calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_days_of_stock) AS days_of_stock_closing_stock,
|
||||
|
||||
((calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_lead_time) + (calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_days_of_stock)) + s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0) AS replenishment_needed_raw,
|
||||
|
||||
-- Final Forecasting / Replenishment Metrics
|
||||
CEILING(GREATEST(0, (((calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_lead_time) + (calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_days_of_stock)) + s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0))))::int AS replenishment_units,
|
||||
(CEILING(GREATEST(0, (((calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_lead_time) + (calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_days_of_stock)) + s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0))))::int) * ci.current_effective_cost AS replenishment_cost,
|
||||
(CEILING(GREATEST(0, (((calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_lead_time) + (calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_days_of_stock)) + s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0))))::int) * ci.current_price AS replenishment_retail,
|
||||
(CEILING(GREATEST(0, (((calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_lead_time) + (calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_days_of_stock)) + s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0))))::int) * (ci.current_price - ci.current_effective_cost) AS replenishment_profit,
|
||||
|
||||
-- To Order (Apply MOQ/UOM logic here if needed, otherwise equals replenishment)
|
||||
CEILING(GREATEST(0, (((calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_lead_time) + (calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_days_of_stock)) + s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0))))::int AS to_order_units,
|
||||
|
||||
GREATEST(0, - (ci.current_stock + COALESCE(ooi.on_order_qty, 0) - (calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_lead_time))) AS forecast_lost_sales_units,
|
||||
GREATEST(0, - (ci.current_stock + COALESCE(ooi.on_order_qty, 0) - (calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_lead_time))) * ci.current_price AS forecast_lost_revenue,
|
||||
|
||||
ci.current_stock / NULLIF(calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int), 0) AS stock_cover_in_days,
|
||||
COALESCE(ooi.on_order_qty, 0) / NULLIF(calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int), 0) AS po_cover_in_days,
|
||||
(ci.current_stock + COALESCE(ooi.on_order_qty, 0)) / NULLIF(calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int), 0) AS sells_out_in_days,
|
||||
|
||||
-- Replenish Date: Date when stock is projected to hit safety stock, minus lead time
|
||||
CASE
|
||||
WHEN calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) > 0
|
||||
THEN _current_date + FLOOR(GREATEST(0, ci.current_stock - s.effective_safety_stock) / calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int))::int - s.effective_lead_time
|
||||
ELSE NULL
|
||||
END AS replenish_date,
|
||||
|
||||
GREATEST(0, ci.current_stock - s.effective_safety_stock - ((calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_lead_time) + (calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_days_of_stock)))::int AS overstocked_units,
|
||||
(GREATEST(0, ci.current_stock - s.effective_safety_stock - ((calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_lead_time) + (calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_days_of_stock)))) * ci.current_effective_cost AS overstocked_cost,
|
||||
(GREATEST(0, ci.current_stock - s.effective_safety_stock - ((calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_lead_time) + (calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_days_of_stock)))) * ci.current_price AS overstocked_retail,
|
||||
|
||||
-- Old Stock Flag
|
||||
(ci.created_at::date < _current_date - INTERVAL '60 day') AND
|
||||
(COALESCE(ci.date_last_sold, hd.max_order_date) IS NULL OR COALESCE(ci.date_last_sold, hd.max_order_date) < _current_date - INTERVAL '60 day') AND
|
||||
(hd.date_last_received_calc IS NULL OR hd.date_last_received_calc < _current_date - INTERVAL '60 day') AND
|
||||
COALESCE(ooi.on_order_qty, 0) = 0
|
||||
AS is_old_stock,
|
||||
|
||||
sa.yesterday_sales,
|
||||
|
||||
-- Calculate status using direct CASE statements (inline logic)
|
||||
CASE
|
||||
-- Non-replenishable items default to Healthy
|
||||
WHEN NOT ci.is_replenishable THEN 'Healthy'
|
||||
|
||||
-- Calculate lead time and thresholds
|
||||
ELSE
|
||||
CASE
|
||||
-- Check for overstock first
|
||||
WHEN GREATEST(0, ci.current_stock - s.effective_safety_stock - ((calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_lead_time) + (calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int) * s.effective_days_of_stock))) > 0 THEN 'Overstock'
|
||||
|
||||
-- Check for Critical stock
|
||||
WHEN ci.current_stock <= 0 OR
|
||||
(ci.current_stock / NULLIF(calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int), 0)) <= 0 THEN 'Critical'
|
||||
|
||||
WHEN (ci.current_stock / NULLIF(calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int), 0)) < (COALESCE(s.effective_lead_time, 30) * 0.5) THEN 'Critical'
|
||||
|
||||
-- Check for reorder soon
|
||||
WHEN ((ci.current_stock + COALESCE(ooi.on_order_qty, 0)) / NULLIF(calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int), 0)) < (COALESCE(s.effective_lead_time, 30) + 7) THEN
|
||||
CASE
|
||||
WHEN (ci.current_stock / NULLIF(calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int), 0)) < (COALESCE(s.effective_lead_time, 30) * 0.5) THEN 'Critical'
|
||||
ELSE 'Reorder Soon'
|
||||
END
|
||||
|
||||
-- Check for 'At Risk' - old stock
|
||||
WHEN (ci.created_at::date < _current_date - INTERVAL '60 day') AND
|
||||
(COALESCE(ci.date_last_sold, hd.max_order_date) IS NULL OR COALESCE(ci.date_last_sold, hd.max_order_date) < _current_date - INTERVAL '60 day') AND
|
||||
(hd.date_last_received_calc IS NULL OR hd.date_last_received_calc < _current_date - INTERVAL '60 day') AND
|
||||
COALESCE(ooi.on_order_qty, 0) = 0 THEN 'At Risk'
|
||||
|
||||
-- Check for 'At Risk' - hasn't sold in a long time
|
||||
WHEN COALESCE(ci.date_last_sold, hd.max_order_date) IS NOT NULL
|
||||
AND COALESCE(ci.date_last_sold, hd.max_order_date) < (_current_date - INTERVAL '90 days')
|
||||
AND (CASE
|
||||
WHEN ci.created_at IS NULL AND hd.date_first_sold IS NULL THEN 0
|
||||
WHEN ci.created_at IS NULL THEN (_current_date - hd.date_first_sold)::integer
|
||||
WHEN hd.date_first_sold IS NULL THEN (_current_date - ci.created_at::date)::integer
|
||||
ELSE (_current_date - LEAST(ci.created_at::date, hd.date_first_sold))::integer
|
||||
END) > 180 THEN 'At Risk'
|
||||
|
||||
-- Very high stock cover is at risk too
|
||||
WHEN (ci.current_stock / NULLIF(calculate_sales_velocity(sa.sales_30d::int, sa.stockout_days_30d::int), 0)) > 365 THEN 'At Risk'
|
||||
|
||||
-- New products (less than 30 days old)
|
||||
WHEN (CASE
|
||||
WHEN ci.created_at IS NULL AND hd.date_first_sold IS NULL THEN 0
|
||||
WHEN ci.created_at IS NULL THEN (_current_date - hd.date_first_sold)::integer
|
||||
WHEN hd.date_first_sold IS NULL THEN (_current_date - ci.created_at::date)::integer
|
||||
ELSE (_current_date - LEAST(ci.created_at::date, hd.date_first_sold))::integer
|
||||
END) <= 30 THEN 'New'
|
||||
|
||||
-- If none of the above, assume Healthy
|
||||
ELSE 'Healthy'
|
||||
END
|
||||
END AS status,
|
||||
|
||||
-- Growth Metrics (P3) - using safe_divide and std_numeric for consistency
|
||||
std_numeric(safe_divide((sa.sales_30d - ppm.sales_prev_30d) * 100.0, ppm.sales_prev_30d), 2) AS sales_growth_30d_vs_prev,
|
||||
std_numeric(safe_divide((sa.revenue_30d - ppm.revenue_prev_30d) * 100.0, ppm.revenue_prev_30d), 2) AS revenue_growth_30d_vs_prev,
|
||||
std_numeric(safe_divide((sa.sales_30d - ppm.sales_30d_last_year) * 100.0, ppm.sales_30d_last_year), 2) AS sales_growth_yoy,
|
||||
std_numeric(safe_divide((sa.revenue_30d - ppm.revenue_30d_last_year) * 100.0, ppm.revenue_30d_last_year), 2) AS revenue_growth_yoy,
|
||||
|
||||
-- Demand Variability (P3)
|
||||
std_numeric(dv.sales_variance, 2) AS sales_variance_30d,
|
||||
std_numeric(dv.sales_std_dev, 2) AS sales_std_dev_30d,
|
||||
std_numeric(dv.sales_cv, 2) AS sales_cv_30d,
|
||||
classify_demand_pattern(dv.avg_daily_sales, dv.sales_cv) AS demand_pattern,
|
||||
|
||||
-- Service Levels (P5)
|
||||
std_numeric(COALESCE(sl.fill_rate_30d, 100), 2) AS fill_rate_30d,
|
||||
COALESCE(sl.stockout_incidents_30d, 0)::int AS stockout_incidents_30d,
|
||||
std_numeric(COALESCE(sl.service_level_30d, 100), 2) AS service_level_30d,
|
||||
COALESCE(sl.lost_sales_incidents_30d, 0)::int AS lost_sales_incidents_30d,
|
||||
|
||||
-- Seasonality (P5)
|
||||
std_numeric(season.seasonality_index, 2) AS seasonality_index,
|
||||
COALESCE(season.seasonal_pattern, 'none') AS seasonal_pattern,
|
||||
season.peak_season
|
||||
|
||||
FROM CurrentInfo ci
|
||||
LEFT JOIN OnOrderInfo ooi ON ci.pid = ooi.pid
|
||||
LEFT JOIN HistoricalDates hd ON ci.pid = hd.pid
|
||||
LEFT JOIN SnapshotAggregates sa ON ci.pid = sa.pid
|
||||
LEFT JOIN FirstPeriodMetrics fpm ON ci.pid = fpm.pid
|
||||
LEFT JOIN Settings s ON ci.pid = s.pid
|
||||
LEFT JOIN LifetimeRevenue lr ON ci.pid = lr.pid
|
||||
LEFT JOIN PreviousPeriodMetrics ppm ON ci.pid = ppm.pid
|
||||
LEFT JOIN DemandVariability dv ON ci.pid = dv.pid
|
||||
LEFT JOIN ServiceLevels sl ON ci.pid = sl.pid
|
||||
LEFT JOIN SeasonalityAnalysis season ON ci.pid = season.pid
|
||||
WHERE s.exclude_forecast IS FALSE OR s.exclude_forecast IS NULL -- Exclude products explicitly marked
|
||||
|
||||
ON CONFLICT (pid) DO UPDATE SET
|
||||
last_calculated = EXCLUDED.last_calculated,
|
||||
sku = EXCLUDED.sku, title = EXCLUDED.title, brand = EXCLUDED.brand, vendor = EXCLUDED.vendor, image_url = EXCLUDED.image_url, is_visible = EXCLUDED.is_visible, is_replenishable = EXCLUDED.is_replenishable,
|
||||
barcode = EXCLUDED.barcode, harmonized_tariff_code = EXCLUDED.harmonized_tariff_code, vendor_reference = EXCLUDED.vendor_reference, notions_reference = EXCLUDED.notions_reference, line = EXCLUDED.line, subline = EXCLUDED.subline, artist = EXCLUDED.artist,
|
||||
moq = EXCLUDED.moq, rating = EXCLUDED.rating, reviews = EXCLUDED.reviews, weight = EXCLUDED.weight, length = EXCLUDED.length, width = EXCLUDED.width, height = EXCLUDED.height, country_of_origin = EXCLUDED.country_of_origin, location = EXCLUDED.location,
|
||||
baskets = EXCLUDED.baskets, notifies = EXCLUDED.notifies, preorder_count = EXCLUDED.preorder_count, notions_inv_count = EXCLUDED.notions_inv_count,
|
||||
current_price = EXCLUDED.current_price, current_regular_price = EXCLUDED.current_regular_price, current_cost_price = EXCLUDED.current_cost_price, current_landing_cost_price = EXCLUDED.current_landing_cost_price,
|
||||
current_stock = EXCLUDED.current_stock, current_stock_cost = EXCLUDED.current_stock_cost, current_stock_retail = EXCLUDED.current_stock_retail, current_stock_gross = EXCLUDED.current_stock_gross,
|
||||
on_order_qty = EXCLUDED.on_order_qty, on_order_cost = EXCLUDED.on_order_cost, on_order_retail = EXCLUDED.on_order_retail, earliest_expected_date = EXCLUDED.earliest_expected_date,
|
||||
date_created = EXCLUDED.date_created, date_first_received = EXCLUDED.date_first_received, date_last_received = EXCLUDED.date_last_received, date_first_sold = EXCLUDED.date_first_sold, date_last_sold = EXCLUDED.date_last_sold, age_days = EXCLUDED.age_days,
|
||||
sales_7d = EXCLUDED.sales_7d, revenue_7d = EXCLUDED.revenue_7d, sales_14d = EXCLUDED.sales_14d, revenue_14d = EXCLUDED.revenue_14d, sales_30d = EXCLUDED.sales_30d, revenue_30d = EXCLUDED.revenue_30d, cogs_30d = EXCLUDED.cogs_30d, profit_30d = EXCLUDED.profit_30d,
|
||||
returns_units_30d = EXCLUDED.returns_units_30d, returns_revenue_30d = EXCLUDED.returns_revenue_30d, discounts_30d = EXCLUDED.discounts_30d, gross_revenue_30d = EXCLUDED.gross_revenue_30d, gross_regular_revenue_30d = EXCLUDED.gross_regular_revenue_30d,
|
||||
stockout_days_30d = EXCLUDED.stockout_days_30d, sales_365d = EXCLUDED.sales_365d, revenue_365d = EXCLUDED.revenue_365d,
|
||||
avg_stock_units_30d = EXCLUDED.avg_stock_units_30d, avg_stock_cost_30d = EXCLUDED.avg_stock_cost_30d, avg_stock_retail_30d = EXCLUDED.avg_stock_retail_30d, avg_stock_gross_30d = EXCLUDED.avg_stock_gross_30d,
|
||||
received_qty_30d = EXCLUDED.received_qty_30d, received_cost_30d = EXCLUDED.received_cost_30d,
|
||||
lifetime_sales = EXCLUDED.lifetime_sales, lifetime_revenue = EXCLUDED.lifetime_revenue, lifetime_revenue_quality = EXCLUDED.lifetime_revenue_quality,
|
||||
first_7_days_sales = EXCLUDED.first_7_days_sales, first_7_days_revenue = EXCLUDED.first_7_days_revenue, first_30_days_sales = EXCLUDED.first_30_days_sales, first_30_days_revenue = EXCLUDED.first_30_days_revenue,
|
||||
first_60_days_sales = EXCLUDED.first_60_days_sales, first_60_days_revenue = EXCLUDED.first_60_days_revenue, first_90_days_sales = EXCLUDED.first_90_days_sales, first_90_days_revenue = EXCLUDED.first_90_days_revenue,
|
||||
asp_30d = EXCLUDED.asp_30d, acp_30d = EXCLUDED.acp_30d, avg_ros_30d = EXCLUDED.avg_ros_30d, avg_sales_per_day_30d = EXCLUDED.avg_sales_per_day_30d, avg_sales_per_month_30d = EXCLUDED.avg_sales_per_month_30d,
|
||||
margin_30d = EXCLUDED.margin_30d, markup_30d = EXCLUDED.markup_30d, gmroi_30d = EXCLUDED.gmroi_30d, stockturn_30d = EXCLUDED.stockturn_30d, return_rate_30d = EXCLUDED.return_rate_30d, discount_rate_30d = EXCLUDED.discount_rate_30d,
|
||||
stockout_rate_30d = EXCLUDED.stockout_rate_30d, markdown_30d = EXCLUDED.markdown_30d, markdown_rate_30d = EXCLUDED.markdown_rate_30d, sell_through_30d = EXCLUDED.sell_through_30d,
|
||||
-- avg_lead_time_days = EXCLUDED.avg_lead_time_days, -- Updated Periodically
|
||||
-- abc_class = EXCLUDED.abc_class, -- Updated Periodically
|
||||
sales_velocity_daily = EXCLUDED.sales_velocity_daily, config_lead_time = EXCLUDED.config_lead_time, config_days_of_stock = EXCLUDED.config_days_of_stock, config_safety_stock = EXCLUDED.config_safety_stock,
|
||||
planning_period_days = EXCLUDED.planning_period_days, lead_time_forecast_units = EXCLUDED.lead_time_forecast_units, days_of_stock_forecast_units = EXCLUDED.days_of_stock_forecast_units,
|
||||
planning_period_forecast_units = EXCLUDED.planning_period_forecast_units, lead_time_closing_stock = EXCLUDED.lead_time_closing_stock, days_of_stock_closing_stock = EXCLUDED.days_of_stock_closing_stock,
|
||||
replenishment_needed_raw = EXCLUDED.replenishment_needed_raw, replenishment_units = EXCLUDED.replenishment_units, replenishment_cost = EXCLUDED.replenishment_cost, replenishment_retail = EXCLUDED.replenishment_retail, replenishment_profit = EXCLUDED.replenishment_profit,
|
||||
to_order_units = EXCLUDED.to_order_units, forecast_lost_sales_units = EXCLUDED.forecast_lost_sales_units, forecast_lost_revenue = EXCLUDED.forecast_lost_revenue,
|
||||
stock_cover_in_days = EXCLUDED.stock_cover_in_days, po_cover_in_days = EXCLUDED.po_cover_in_days, sells_out_in_days = EXCLUDED.sells_out_in_days, replenish_date = EXCLUDED.replenish_date,
|
||||
overstocked_units = EXCLUDED.overstocked_units, overstocked_cost = EXCLUDED.overstocked_cost, overstocked_retail = EXCLUDED.overstocked_retail, is_old_stock = EXCLUDED.is_old_stock,
|
||||
yesterday_sales = EXCLUDED.yesterday_sales,
|
||||
status = EXCLUDED.status,
|
||||
sales_growth_30d_vs_prev = EXCLUDED.sales_growth_30d_vs_prev,
|
||||
revenue_growth_30d_vs_prev = EXCLUDED.revenue_growth_30d_vs_prev,
|
||||
sales_growth_yoy = EXCLUDED.sales_growth_yoy,
|
||||
revenue_growth_yoy = EXCLUDED.revenue_growth_yoy,
|
||||
sales_variance_30d = EXCLUDED.sales_variance_30d,
|
||||
sales_std_dev_30d = EXCLUDED.sales_std_dev_30d,
|
||||
sales_cv_30d = EXCLUDED.sales_cv_30d,
|
||||
demand_pattern = EXCLUDED.demand_pattern,
|
||||
fill_rate_30d = EXCLUDED.fill_rate_30d,
|
||||
stockout_incidents_30d = EXCLUDED.stockout_incidents_30d,
|
||||
service_level_30d = EXCLUDED.service_level_30d,
|
||||
lost_sales_incidents_30d = EXCLUDED.lost_sales_incidents_30d,
|
||||
seasonality_index = EXCLUDED.seasonality_index,
|
||||
seasonal_pattern = EXCLUDED.seasonal_pattern,
|
||||
peak_season = EXCLUDED.peak_season
|
||||
WHERE -- Only update if at least one key metric has changed
|
||||
product_metrics.current_stock IS DISTINCT FROM EXCLUDED.current_stock OR
|
||||
product_metrics.current_price IS DISTINCT FROM EXCLUDED.current_price OR
|
||||
product_metrics.current_cost_price IS DISTINCT FROM EXCLUDED.current_cost_price OR
|
||||
product_metrics.on_order_qty IS DISTINCT FROM EXCLUDED.on_order_qty OR
|
||||
product_metrics.sales_7d IS DISTINCT FROM EXCLUDED.sales_7d OR
|
||||
product_metrics.sales_30d IS DISTINCT FROM EXCLUDED.sales_30d OR
|
||||
product_metrics.revenue_30d IS DISTINCT FROM EXCLUDED.revenue_30d OR
|
||||
product_metrics.status IS DISTINCT FROM EXCLUDED.status OR
|
||||
product_metrics.replenishment_units IS DISTINCT FROM EXCLUDED.replenishment_units OR
|
||||
product_metrics.stock_cover_in_days IS DISTINCT FROM EXCLUDED.stock_cover_in_days OR
|
||||
product_metrics.yesterday_sales IS DISTINCT FROM EXCLUDED.yesterday_sales OR
|
||||
-- Check a few other important fields that might change
|
||||
product_metrics.date_last_sold IS DISTINCT FROM EXCLUDED.date_last_sold OR
|
||||
product_metrics.earliest_expected_date IS DISTINCT FROM EXCLUDED.earliest_expected_date OR
|
||||
product_metrics.lifetime_sales IS DISTINCT FROM EXCLUDED.lifetime_sales OR
|
||||
product_metrics.lifetime_revenue_quality IS DISTINCT FROM EXCLUDED.lifetime_revenue_quality
|
||||
;
|
||||
|
||||
-- Update the status table with the timestamp from the START of this run
|
||||
UPDATE public.calculate_status
|
||||
SET last_calculation_timestamp = _start_time
|
||||
WHERE module_name = _module_name;
|
||||
|
||||
RAISE NOTICE 'Finished % module. Duration: %', _module_name, clock_timestamp() - _start_time;
|
||||
|
||||
END $$;
|
||||
|
||||
-- Return metrics about the update operation
|
||||
WITH update_stats AS (
|
||||
SELECT
|
||||
COUNT(*) as total_products,
|
||||
COUNT(*) FILTER (WHERE last_calculated >= NOW() - INTERVAL '5 minutes') as rows_processed,
|
||||
COUNT(*) FILTER (WHERE status = 'Critical') as critical_count,
|
||||
COUNT(*) FILTER (WHERE status = 'Reorder Soon') as reorder_soon_count,
|
||||
COUNT(*) FILTER (WHERE status = 'Healthy') as healthy_count,
|
||||
COUNT(*) FILTER (WHERE status = 'Overstock') as overstock_count,
|
||||
COUNT(*) FILTER (WHERE status = 'At Risk') as at_risk_count,
|
||||
COUNT(*) FILTER (WHERE status = 'New') as new_count
|
||||
FROM public.product_metrics
|
||||
)
|
||||
SELECT
|
||||
rows_processed,
|
||||
total_products,
|
||||
critical_count,
|
||||
reorder_soon_count,
|
||||
healthy_count,
|
||||
overstock_count,
|
||||
at_risk_count,
|
||||
new_count,
|
||||
ROUND((rows_processed::numeric / NULLIF(total_products, 0)) * 100, 2) as update_percentage
|
||||
FROM update_stats;
|
||||
@@ -1,39 +0,0 @@
|
||||
const { Pool } = require('pg');
|
||||
const path = require('path');
|
||||
require('dotenv').config({ path: path.resolve(__dirname, '../../..', '.env') });
|
||||
|
||||
// Database configuration
|
||||
const dbConfig = {
|
||||
host: process.env.DB_HOST,
|
||||
user: process.env.DB_USER,
|
||||
password: process.env.DB_PASSWORD,
|
||||
database: process.env.DB_NAME,
|
||||
port: process.env.DB_PORT || 5432,
|
||||
ssl: process.env.DB_SSL === 'true',
|
||||
// Add performance optimizations
|
||||
max: 10, // connection pool max size
|
||||
idleTimeoutMillis: 30000,
|
||||
connectionTimeoutMillis: 60000
|
||||
};
|
||||
|
||||
// Create a single pool instance to be reused
|
||||
const pool = new Pool(dbConfig);
|
||||
|
||||
// Add event handlers for pool
|
||||
pool.on('error', (err, client) => {
|
||||
console.error('Unexpected error on idle client', err);
|
||||
});
|
||||
|
||||
async function getConnection() {
|
||||
return await pool.connect();
|
||||
}
|
||||
|
||||
async function closePool() {
|
||||
await pool.end();
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
dbConfig,
|
||||
getConnection,
|
||||
closePool
|
||||
};
|
||||
@@ -1,183 +0,0 @@
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
// Helper function to format elapsed time
|
||||
function formatElapsedTime(startTime) {
|
||||
let elapsed;
|
||||
|
||||
// If startTime is a timestamp (number representing milliseconds since epoch)
|
||||
if (typeof startTime === 'number') {
|
||||
// Check if it's a timestamp (will be a large number like 1700000000000)
|
||||
if (startTime > 1000000000) { // timestamps are in milliseconds since 1970
|
||||
elapsed = Date.now() - startTime;
|
||||
} else {
|
||||
// Assume it's already elapsed milliseconds
|
||||
elapsed = startTime;
|
||||
}
|
||||
} else if (startTime instanceof Date) {
|
||||
elapsed = Date.now() - startTime.getTime();
|
||||
} else {
|
||||
// Default to 0 if invalid input
|
||||
elapsed = 0;
|
||||
}
|
||||
|
||||
const seconds = Math.floor(elapsed / 1000);
|
||||
const minutes = Math.floor(seconds / 60);
|
||||
const hours = Math.floor(minutes / 60);
|
||||
|
||||
if (hours > 0) {
|
||||
return `${hours}h ${minutes % 60}m ${seconds % 60}s`;
|
||||
} else if (minutes > 0) {
|
||||
return `${minutes}m ${seconds % 60}s`;
|
||||
} else {
|
||||
return `${seconds}s`;
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to estimate remaining time
|
||||
function estimateRemaining(startTime, current, total) {
|
||||
// Handle edge cases
|
||||
if (!current || current === 0 || !total || total === 0 || current >= total) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Calculate elapsed time in milliseconds
|
||||
const elapsed = Date.now() - startTime;
|
||||
if (elapsed <= 0) return null;
|
||||
|
||||
// Calculate rate (items per millisecond)
|
||||
const rate = current / elapsed;
|
||||
if (rate <= 0) return null;
|
||||
|
||||
// Calculate remaining time in milliseconds
|
||||
const remaining = (total - current) / rate;
|
||||
|
||||
// Convert to readable format
|
||||
const seconds = Math.floor(remaining / 1000);
|
||||
const minutes = Math.floor(seconds / 60);
|
||||
const hours = Math.floor(minutes / 60);
|
||||
|
||||
if (hours > 0) {
|
||||
return `${hours}h ${minutes % 60}m`;
|
||||
} else if (minutes > 0) {
|
||||
return `${minutes}m ${seconds % 60}s`;
|
||||
} else {
|
||||
return `${seconds}s`;
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to calculate rate
|
||||
function calculateRate(startTime, current) {
|
||||
const elapsed = (Date.now() - startTime) / 1000; // Convert to seconds
|
||||
return elapsed > 0 ? Math.round(current / elapsed) : 0;
|
||||
}
|
||||
|
||||
// Set up logging
|
||||
const LOG_DIR = path.join(__dirname, '../../../logs');
|
||||
const ERROR_LOG = path.join(LOG_DIR, 'import-errors.log');
|
||||
const IMPORT_LOG = path.join(LOG_DIR, 'import.log');
|
||||
const STATUS_FILE = path.join(LOG_DIR, 'metrics-status.json');
|
||||
|
||||
// Ensure log directory exists
|
||||
if (!fs.existsSync(LOG_DIR)) {
|
||||
fs.mkdirSync(LOG_DIR, { recursive: true });
|
||||
}
|
||||
|
||||
// Helper function to log errors
|
||||
function logError(error, context = '') {
|
||||
const timestamp = new Date().toISOString();
|
||||
const errorMessage = `[${timestamp}] ${context}\nError: ${error.message}\nStack: ${error.stack}\n\n`;
|
||||
|
||||
// Log to error file
|
||||
fs.appendFileSync(ERROR_LOG, errorMessage);
|
||||
|
||||
// Also log to console
|
||||
console.error(`\n${context}\nError: ${error.message}`);
|
||||
}
|
||||
|
||||
// Helper function to log import progress
|
||||
function logImport(message) {
|
||||
const timestamp = new Date().toISOString();
|
||||
const logMessage = `[${timestamp}] ${message}\n`;
|
||||
fs.appendFileSync(IMPORT_LOG, logMessage);
|
||||
}
|
||||
|
||||
// Helper function to output progress
|
||||
function outputProgress(data) {
|
||||
// Save progress to file for resumption
|
||||
saveProgress(data);
|
||||
// Format as SSE event
|
||||
const event = {
|
||||
progress: data
|
||||
};
|
||||
// Always send to stdout for frontend
|
||||
process.stdout.write(JSON.stringify(event) + '\n');
|
||||
|
||||
// Log significant events to disk
|
||||
const isSignificant =
|
||||
// Operation starts
|
||||
(data.operation && !data.current) ||
|
||||
// Operation completions and errors
|
||||
data.status === 'complete' ||
|
||||
data.status === 'error' ||
|
||||
// Major phase changes
|
||||
data.operation?.includes('Starting ABC classification') ||
|
||||
data.operation?.includes('Starting time-based aggregates') ||
|
||||
data.operation?.includes('Starting vendor metrics');
|
||||
|
||||
if (isSignificant) {
|
||||
logImport(`${data.operation || 'Operation'}${data.message ? ': ' + data.message : ''}${data.error ? ' Error: ' + data.error : ''}${data.status ? ' Status: ' + data.status : ''}`);
|
||||
}
|
||||
}
|
||||
|
||||
function saveProgress(progress) {
|
||||
try {
|
||||
fs.writeFileSync(STATUS_FILE, JSON.stringify({
|
||||
...progress,
|
||||
timestamp: Date.now()
|
||||
}));
|
||||
} catch (err) {
|
||||
console.error('Failed to save progress:', err);
|
||||
}
|
||||
}
|
||||
|
||||
function clearProgress() {
|
||||
try {
|
||||
if (fs.existsSync(STATUS_FILE)) {
|
||||
fs.unlinkSync(STATUS_FILE);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Failed to clear progress:', err);
|
||||
}
|
||||
}
|
||||
|
||||
function getProgress() {
|
||||
try {
|
||||
if (fs.existsSync(STATUS_FILE)) {
|
||||
const progress = JSON.parse(fs.readFileSync(STATUS_FILE, 'utf8'));
|
||||
// Check if the progress is still valid (less than 1 hour old)
|
||||
if (progress.timestamp && Date.now() - progress.timestamp < 3600000) {
|
||||
return progress;
|
||||
} else {
|
||||
// Clear old progress
|
||||
clearProgress();
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Failed to read progress:', err);
|
||||
clearProgress();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
formatElapsedTime,
|
||||
estimateRemaining,
|
||||
calculateRate,
|
||||
logError,
|
||||
logImport,
|
||||
outputProgress,
|
||||
saveProgress,
|
||||
clearProgress,
|
||||
getProgress
|
||||
};
|
||||
@@ -1,599 +0,0 @@
|
||||
const { Client } = require('pg');
|
||||
const path = require('path');
|
||||
const dotenv = require('dotenv');
|
||||
const fs = require('fs');
|
||||
|
||||
dotenv.config({ path: path.join(__dirname, '../.env') });
|
||||
|
||||
const dbConfig = {
|
||||
host: process.env.DB_HOST,
|
||||
user: process.env.DB_USER,
|
||||
password: process.env.DB_PASSWORD,
|
||||
database: process.env.DB_NAME,
|
||||
port: process.env.DB_PORT || 5432
|
||||
};
|
||||
|
||||
// Tables to always protect from being dropped
|
||||
const PROTECTED_TABLES = [
|
||||
'users',
|
||||
'permissions',
|
||||
'user_permissions',
|
||||
'calculate_history',
|
||||
'import_history',
|
||||
'ai_prompts',
|
||||
'ai_validation_performance',
|
||||
'templates',
|
||||
'reusable_images',
|
||||
'imported_daily_inventory',
|
||||
'imported_product_stat_history',
|
||||
'imported_product_current_prices'
|
||||
];
|
||||
|
||||
// Helper function to output progress in JSON format
|
||||
function outputProgress(data) {
|
||||
if (!data.status) {
|
||||
data = {
|
||||
status: 'running',
|
||||
...data
|
||||
};
|
||||
}
|
||||
console.log(JSON.stringify(data));
|
||||
}
|
||||
|
||||
// Core tables that must be created
|
||||
const CORE_TABLES = [
|
||||
'products',
|
||||
'orders',
|
||||
'purchase_orders',
|
||||
'categories',
|
||||
'product_categories'
|
||||
];
|
||||
|
||||
// Split SQL into individual statements
|
||||
function splitSQLStatements(sql) {
|
||||
// First, normalize line endings
|
||||
sql = sql.replace(/\r\n/g, '\n');
|
||||
|
||||
// Track statement boundaries
|
||||
let statements = [];
|
||||
let currentStatement = '';
|
||||
let inString = false;
|
||||
let stringChar = '';
|
||||
let inDollarQuote = false;
|
||||
let dollarQuoteTag = '';
|
||||
|
||||
// Process character by character
|
||||
for (let i = 0; i < sql.length; i++) {
|
||||
const char = sql[i];
|
||||
const nextChar = sql[i + 1] || '';
|
||||
|
||||
// Handle dollar quotes
|
||||
if (char === '$' && !inString) {
|
||||
// Look ahead to find the dollar quote tag
|
||||
let tag = '$';
|
||||
let j = i + 1;
|
||||
while (j < sql.length && sql[j] !== '$') {
|
||||
tag += sql[j];
|
||||
j++;
|
||||
}
|
||||
tag += '$';
|
||||
|
||||
if (j < sql.length) { // Found closing $
|
||||
if (!inDollarQuote) {
|
||||
inDollarQuote = true;
|
||||
dollarQuoteTag = tag;
|
||||
currentStatement += tag;
|
||||
i = j;
|
||||
continue;
|
||||
} else if (sql.substring(i, j + 1) === dollarQuoteTag) {
|
||||
inDollarQuote = false;
|
||||
dollarQuoteTag = '';
|
||||
currentStatement += tag;
|
||||
i = j;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle string literals (only if not in dollar quote)
|
||||
if (!inDollarQuote && (char === "'" || char === '"') && sql[i - 1] !== '\\') {
|
||||
if (!inString) {
|
||||
inString = true;
|
||||
stringChar = char;
|
||||
} else if (char === stringChar) {
|
||||
inString = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Handle comments (only if not in string or dollar quote)
|
||||
if (!inString && !inDollarQuote) {
|
||||
if (char === '-' && nextChar === '-') {
|
||||
// Skip to end of line
|
||||
while (i < sql.length && sql[i] !== '\n') i++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (char === '/' && nextChar === '*') {
|
||||
// Skip until closing */
|
||||
i += 2;
|
||||
while (i < sql.length && (sql[i] !== '*' || sql[i + 1] !== '/')) i++;
|
||||
i++; // Skip the closing /
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Handle statement boundaries (only if not in string or dollar quote)
|
||||
if (!inString && !inDollarQuote && char === ';') {
|
||||
if (currentStatement.trim()) {
|
||||
statements.push(currentStatement.trim());
|
||||
}
|
||||
currentStatement = '';
|
||||
} else {
|
||||
currentStatement += char;
|
||||
}
|
||||
}
|
||||
|
||||
// Add the last statement if it exists
|
||||
if (currentStatement.trim()) {
|
||||
statements.push(currentStatement.trim());
|
||||
}
|
||||
|
||||
return statements;
|
||||
}
|
||||
|
||||
async function resetDatabase() {
|
||||
outputProgress({
|
||||
operation: 'Starting database reset',
|
||||
message: 'Connecting to database...'
|
||||
});
|
||||
|
||||
// Debug: Log current directory and file paths
|
||||
outputProgress({
|
||||
operation: 'Debug paths',
|
||||
message: {
|
||||
currentDir: process.cwd(),
|
||||
__dirname: __dirname,
|
||||
schemaPath: path.join(__dirname, '../db/schema.sql')
|
||||
}
|
||||
});
|
||||
|
||||
const client = new Client(dbConfig);
|
||||
await client.connect();
|
||||
|
||||
try {
|
||||
// Check PostgreSQL version and user
|
||||
outputProgress({
|
||||
operation: 'Checking database',
|
||||
message: 'Verifying PostgreSQL version and user privileges...'
|
||||
});
|
||||
|
||||
const versionResult = await client.query('SELECT version()');
|
||||
const userResult = await client.query('SELECT current_user, current_database()');
|
||||
|
||||
outputProgress({
|
||||
operation: 'Database info',
|
||||
message: {
|
||||
version: versionResult.rows[0].version,
|
||||
user: userResult.rows[0].current_user,
|
||||
database: userResult.rows[0].current_database
|
||||
}
|
||||
});
|
||||
|
||||
// Get list of all tables in the current database
|
||||
outputProgress({
|
||||
operation: 'Getting table list',
|
||||
message: 'Retrieving all table names...'
|
||||
});
|
||||
|
||||
const tablesResult = await client.query(`
|
||||
SELECT string_agg(tablename, ', ') as tables
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename NOT IN (SELECT unnest($1::text[]));
|
||||
`, [PROTECTED_TABLES]);
|
||||
|
||||
if (!tablesResult.rows[0].tables) {
|
||||
outputProgress({
|
||||
operation: 'No tables found',
|
||||
message: 'Database is already empty'
|
||||
});
|
||||
} else {
|
||||
outputProgress({
|
||||
operation: 'Dropping tables',
|
||||
message: 'Dropping all existing tables...'
|
||||
});
|
||||
|
||||
// Disable triggers/foreign key checks
|
||||
await client.query('SET session_replication_role = \'replica\';');
|
||||
|
||||
// Drop all tables except users
|
||||
const tables = tablesResult.rows[0].tables.split(', ');
|
||||
for (const table of tables) {
|
||||
if (!PROTECTED_TABLES.includes(table)) {
|
||||
await client.query(`DROP TABLE IF EXISTS "${table}" CASCADE`);
|
||||
}
|
||||
}
|
||||
|
||||
// Only drop types if we're not preserving history tables
|
||||
const historyTablesExist = await client.query(`
|
||||
SELECT EXISTS (
|
||||
SELECT FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename IN ('calculate_history', 'import_history')
|
||||
);
|
||||
`);
|
||||
|
||||
if (!historyTablesExist.rows[0].exists) {
|
||||
await client.query('DROP TYPE IF EXISTS calculation_status CASCADE;');
|
||||
await client.query('DROP TYPE IF EXISTS module_name CASCADE;');
|
||||
}
|
||||
|
||||
// Re-enable triggers/foreign key checks
|
||||
await client.query('SET session_replication_role = \'origin\';');
|
||||
}
|
||||
|
||||
// Create enum types if they don't exist
|
||||
outputProgress({
|
||||
operation: 'Creating enum types',
|
||||
message: 'Setting up required enum types...'
|
||||
});
|
||||
|
||||
// Check if types exist before creating
|
||||
const typesExist = await client.query(`
|
||||
SELECT EXISTS (
|
||||
SELECT 1 FROM pg_type
|
||||
WHERE typname = 'calculation_status'
|
||||
) as calc_status_exists,
|
||||
EXISTS (
|
||||
SELECT 1 FROM pg_type
|
||||
WHERE typname = 'module_name'
|
||||
) as module_name_exists;
|
||||
`);
|
||||
|
||||
if (!typesExist.rows[0].calc_status_exists) {
|
||||
await client.query(`CREATE TYPE calculation_status AS ENUM ('running', 'completed', 'failed', 'cancelled')`);
|
||||
}
|
||||
|
||||
if (!typesExist.rows[0].module_name_exists) {
|
||||
await client.query(`
|
||||
CREATE TYPE module_name AS ENUM (
|
||||
'product_metrics',
|
||||
'time_aggregates',
|
||||
'financial_metrics',
|
||||
'vendor_metrics',
|
||||
'category_metrics',
|
||||
'brand_metrics',
|
||||
'sales_forecasts',
|
||||
'abc_classification',
|
||||
'daily_snapshots',
|
||||
'periodic_metrics'
|
||||
)
|
||||
`);
|
||||
}
|
||||
|
||||
// Read and execute main schema first (core tables)
|
||||
outputProgress({
|
||||
operation: 'Running database setup',
|
||||
message: 'Creating core tables...'
|
||||
});
|
||||
const schemaPath = path.join(__dirname, '../db/schema.sql');
|
||||
|
||||
// Verify file exists
|
||||
if (!fs.existsSync(schemaPath)) {
|
||||
throw new Error(`Schema file not found at: ${schemaPath}`);
|
||||
}
|
||||
|
||||
const schemaSQL = fs.readFileSync(schemaPath, 'utf8');
|
||||
|
||||
outputProgress({
|
||||
operation: 'Schema file',
|
||||
message: {
|
||||
path: schemaPath,
|
||||
exists: fs.existsSync(schemaPath),
|
||||
size: fs.statSync(schemaPath).size,
|
||||
firstFewLines: schemaSQL.split('\n').slice(0, 5).join('\n')
|
||||
}
|
||||
});
|
||||
|
||||
// Execute schema statements one at a time
|
||||
const statements = splitSQLStatements(schemaSQL);
|
||||
outputProgress({
|
||||
operation: 'SQL Execution',
|
||||
message: {
|
||||
totalStatements: statements.length,
|
||||
statements: statements.map((stmt, i) => ({
|
||||
number: i + 1,
|
||||
preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : '')
|
||||
}))
|
||||
}
|
||||
});
|
||||
|
||||
// Start a transaction for better error handling
|
||||
await client.query('BEGIN');
|
||||
try {
|
||||
for (let i = 0; i < statements.length; i++) {
|
||||
const stmt = statements[i];
|
||||
try {
|
||||
const result = await client.query(stmt);
|
||||
|
||||
// Verify if table was created (if this was a CREATE TABLE statement)
|
||||
if (stmt.trim().toLowerCase().startsWith('create table')) {
|
||||
const tableName = stmt.match(/create\s+table\s+(?:if\s+not\s+exists\s+)?["]?(\w+)["]?/i)?.[1];
|
||||
if (tableName) {
|
||||
const tableExists = await client.query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM information_schema.tables
|
||||
WHERE table_schema = 'public'
|
||||
AND table_name = $1
|
||||
`, [tableName]);
|
||||
|
||||
outputProgress({
|
||||
operation: 'Table Creation Verification',
|
||||
message: {
|
||||
table: tableName,
|
||||
exists: tableExists.rows[0].count > 0
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
outputProgress({
|
||||
operation: 'SQL Progress',
|
||||
message: {
|
||||
statement: i + 1,
|
||||
total: statements.length,
|
||||
preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : ''),
|
||||
rowCount: result.rowCount
|
||||
}
|
||||
});
|
||||
|
||||
// Commit in chunks of 10 statements to avoid long-running transactions
|
||||
if (i > 0 && i % 10 === 0) {
|
||||
await client.query('COMMIT');
|
||||
await client.query('BEGIN');
|
||||
}
|
||||
} catch (sqlError) {
|
||||
await client.query('ROLLBACK');
|
||||
outputProgress({
|
||||
status: 'error',
|
||||
operation: 'SQL Error',
|
||||
error: sqlError.message,
|
||||
statement: stmt,
|
||||
statementNumber: i + 1
|
||||
});
|
||||
throw sqlError;
|
||||
}
|
||||
}
|
||||
// Commit the final transaction
|
||||
await client.query('COMMIT');
|
||||
} catch (error) {
|
||||
await client.query('ROLLBACK');
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Verify core tables were created
|
||||
const existingTables = (await client.query(`
|
||||
SELECT table_name
|
||||
FROM information_schema.tables
|
||||
WHERE table_schema = 'public'
|
||||
`)).rows.map(t => t.table_name);
|
||||
|
||||
outputProgress({
|
||||
operation: 'Core tables verification',
|
||||
message: {
|
||||
found: existingTables,
|
||||
expected: CORE_TABLES
|
||||
}
|
||||
});
|
||||
|
||||
const missingCoreTables = CORE_TABLES.filter(
|
||||
t => !existingTables.includes(t)
|
||||
);
|
||||
|
||||
if (missingCoreTables.length > 0) {
|
||||
throw new Error(
|
||||
`Failed to create core tables: ${missingCoreTables.join(', ')}`
|
||||
);
|
||||
}
|
||||
|
||||
outputProgress({
|
||||
operation: 'Core tables created',
|
||||
message: `Successfully created tables: ${CORE_TABLES.join(', ')}`
|
||||
});
|
||||
|
||||
// Now read and execute config schema (since core tables exist)
|
||||
outputProgress({
|
||||
operation: 'Running config setup',
|
||||
message: 'Creating configuration tables...'
|
||||
});
|
||||
const configSchemaPath = path.join(__dirname, '../db/config-schema-new.sql');
|
||||
|
||||
// Verify file exists
|
||||
if (!fs.existsSync(configSchemaPath)) {
|
||||
throw new Error(`Config schema file not found at: ${configSchemaPath}`);
|
||||
}
|
||||
|
||||
const configSchemaSQL = fs.readFileSync(configSchemaPath, 'utf8');
|
||||
|
||||
outputProgress({
|
||||
operation: 'Config Schema file',
|
||||
message: {
|
||||
path: configSchemaPath,
|
||||
exists: fs.existsSync(configSchemaPath),
|
||||
size: fs.statSync(configSchemaPath).size,
|
||||
firstFewLines: configSchemaSQL.split('\n').slice(0, 5).join('\n')
|
||||
}
|
||||
});
|
||||
|
||||
// Execute config schema statements one at a time
|
||||
const configStatements = splitSQLStatements(configSchemaSQL);
|
||||
outputProgress({
|
||||
operation: 'Config SQL Execution',
|
||||
message: {
|
||||
totalStatements: configStatements.length,
|
||||
statements: configStatements.map((stmt, i) => ({
|
||||
number: i + 1,
|
||||
preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : '')
|
||||
}))
|
||||
}
|
||||
});
|
||||
|
||||
// Start a transaction for better error handling
|
||||
await client.query('BEGIN');
|
||||
try {
|
||||
for (let i = 0; i < configStatements.length; i++) {
|
||||
const stmt = configStatements[i];
|
||||
try {
|
||||
const result = await client.query(stmt);
|
||||
|
||||
outputProgress({
|
||||
operation: 'Config SQL Progress',
|
||||
message: {
|
||||
statement: i + 1,
|
||||
total: configStatements.length,
|
||||
preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : ''),
|
||||
rowCount: result.rowCount
|
||||
}
|
||||
});
|
||||
|
||||
// Commit in chunks of 10 statements to avoid long-running transactions
|
||||
if (i > 0 && i % 10 === 0) {
|
||||
await client.query('COMMIT');
|
||||
await client.query('BEGIN');
|
||||
}
|
||||
} catch (sqlError) {
|
||||
await client.query('ROLLBACK');
|
||||
outputProgress({
|
||||
status: 'error',
|
||||
operation: 'Config SQL Error',
|
||||
error: sqlError.message,
|
||||
statement: stmt,
|
||||
statementNumber: i + 1
|
||||
});
|
||||
throw sqlError;
|
||||
}
|
||||
}
|
||||
// Commit the final transaction
|
||||
await client.query('COMMIT');
|
||||
} catch (error) {
|
||||
await client.query('ROLLBACK');
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Read and execute metrics schema (metrics tables)
|
||||
outputProgress({
|
||||
operation: 'Running metrics setup',
|
||||
message: 'Creating metrics tables...'
|
||||
});
|
||||
const metricsSchemaPath = path.join(__dirname, '../db/metrics-schema-new.sql');
|
||||
|
||||
// Verify file exists
|
||||
if (!fs.existsSync(metricsSchemaPath)) {
|
||||
throw new Error(`Metrics schema file not found at: ${metricsSchemaPath}`);
|
||||
}
|
||||
|
||||
const metricsSchemaSQL = fs.readFileSync(metricsSchemaPath, 'utf8');
|
||||
|
||||
outputProgress({
|
||||
operation: 'Metrics Schema file',
|
||||
message: {
|
||||
path: metricsSchemaPath,
|
||||
exists: fs.existsSync(metricsSchemaPath),
|
||||
size: fs.statSync(metricsSchemaPath).size,
|
||||
firstFewLines: metricsSchemaSQL.split('\n').slice(0, 5).join('\n')
|
||||
}
|
||||
});
|
||||
|
||||
// Execute metrics schema statements one at a time
|
||||
const metricsStatements = splitSQLStatements(metricsSchemaSQL);
|
||||
outputProgress({
|
||||
operation: 'Metrics SQL Execution',
|
||||
message: {
|
||||
totalStatements: metricsStatements.length,
|
||||
statements: metricsStatements.map((stmt, i) => ({
|
||||
number: i + 1,
|
||||
preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : '')
|
||||
}))
|
||||
}
|
||||
});
|
||||
|
||||
// Start a transaction for better error handling
|
||||
await client.query('BEGIN');
|
||||
try {
|
||||
for (let i = 0; i < metricsStatements.length; i++) {
|
||||
const stmt = metricsStatements[i];
|
||||
try {
|
||||
const result = await client.query(stmt);
|
||||
|
||||
outputProgress({
|
||||
operation: 'Metrics SQL Progress',
|
||||
message: {
|
||||
statement: i + 1,
|
||||
total: metricsStatements.length,
|
||||
preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : ''),
|
||||
rowCount: result.rowCount
|
||||
}
|
||||
});
|
||||
|
||||
// Commit in chunks of 10 statements to avoid long-running transactions
|
||||
if (i > 0 && i % 10 === 0) {
|
||||
await client.query('COMMIT');
|
||||
await client.query('BEGIN');
|
||||
}
|
||||
} catch (sqlError) {
|
||||
await client.query('ROLLBACK');
|
||||
outputProgress({
|
||||
status: 'error',
|
||||
operation: 'Metrics SQL Error',
|
||||
error: sqlError.message,
|
||||
statement: stmt,
|
||||
statementNumber: i + 1
|
||||
});
|
||||
throw sqlError;
|
||||
}
|
||||
}
|
||||
// Commit the final transaction
|
||||
await client.query('COMMIT');
|
||||
} catch (error) {
|
||||
await client.query('ROLLBACK');
|
||||
throw error;
|
||||
}
|
||||
|
||||
outputProgress({
|
||||
status: 'complete',
|
||||
operation: 'Database reset complete',
|
||||
message: 'Database has been reset and all tables recreated'
|
||||
});
|
||||
} catch (error) {
|
||||
outputProgress({
|
||||
status: 'error',
|
||||
operation: 'Failed to reset database',
|
||||
error: error.message,
|
||||
stack: error.stack
|
||||
});
|
||||
process.exit(1);
|
||||
} finally {
|
||||
// Make sure to re-enable foreign key checks if they were disabled
|
||||
try {
|
||||
await client.query('SET session_replication_role = \'origin\'');
|
||||
} catch (e) {
|
||||
console.error('Error re-enabling foreign key checks:', e.message);
|
||||
}
|
||||
|
||||
// Close the database connection
|
||||
await client.end();
|
||||
}
|
||||
}
|
||||
|
||||
// Export if required as a module
|
||||
if (typeof module !== 'undefined' && module.exports) {
|
||||
module.exports = resetDatabase;
|
||||
}
|
||||
|
||||
// Run if called directly
|
||||
if (require.main === module) {
|
||||
resetDatabase().catch(error => {
|
||||
console.error('Error:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
}
|
||||
@@ -1,384 +0,0 @@
|
||||
const { Client } = require('pg');
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
require('dotenv').config({ path: path.resolve(__dirname, '../.env') });
|
||||
|
||||
const dbConfig = {
|
||||
host: process.env.DB_HOST,
|
||||
user: process.env.DB_USER,
|
||||
password: process.env.DB_PASSWORD,
|
||||
database: process.env.DB_NAME,
|
||||
port: process.env.DB_PORT || 5432
|
||||
};
|
||||
|
||||
function outputProgress(data) {
|
||||
if (!data.status) {
|
||||
data = {
|
||||
status: 'running',
|
||||
...data
|
||||
};
|
||||
}
|
||||
console.log(JSON.stringify(data));
|
||||
}
|
||||
|
||||
// Tables to always protect from being dropped
|
||||
const PROTECTED_TABLES = [
|
||||
'users',
|
||||
'permissions',
|
||||
'user_permissions',
|
||||
'calculate_history',
|
||||
'import_history',
|
||||
'ai_prompts',
|
||||
'ai_validation_performance',
|
||||
'templates',
|
||||
'reusable_images',
|
||||
'imported_daily_inventory',
|
||||
'imported_product_stat_history',
|
||||
'imported_product_current_prices'
|
||||
];
|
||||
|
||||
// Split SQL into individual statements
|
||||
function splitSQLStatements(sql) {
|
||||
sql = sql.replace(/\r\n/g, '\n');
|
||||
let statements = [];
|
||||
let currentStatement = '';
|
||||
let inString = false;
|
||||
let stringChar = '';
|
||||
|
||||
for (let i = 0; i < sql.length; i++) {
|
||||
const char = sql[i];
|
||||
const nextChar = sql[i + 1] || '';
|
||||
|
||||
if ((char === "'" || char === '"') && sql[i - 1] !== '\\') {
|
||||
if (!inString) {
|
||||
inString = true;
|
||||
stringChar = char;
|
||||
} else if (char === stringChar) {
|
||||
inString = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!inString && char === '-' && nextChar === '-') {
|
||||
while (i < sql.length && sql[i] !== '\n') i++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!inString && char === '/' && nextChar === '*') {
|
||||
i += 2;
|
||||
while (i < sql.length && (sql[i] !== '*' || sql[i + 1] !== '/')) i++;
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!inString && char === ';') {
|
||||
if (currentStatement.trim()) {
|
||||
statements.push(currentStatement.trim());
|
||||
}
|
||||
currentStatement = '';
|
||||
} else {
|
||||
currentStatement += char;
|
||||
}
|
||||
}
|
||||
|
||||
if (currentStatement.trim()) {
|
||||
statements.push(currentStatement.trim());
|
||||
}
|
||||
|
||||
return statements;
|
||||
}
|
||||
|
||||
async function resetMetrics() {
|
||||
let client;
|
||||
try {
|
||||
outputProgress({
|
||||
operation: 'Starting metrics reset',
|
||||
message: 'Connecting to database...'
|
||||
});
|
||||
|
||||
client = new Client(dbConfig);
|
||||
await client.connect();
|
||||
|
||||
// Get metrics tables from the schema file by looking for CREATE TABLE statements
|
||||
const schemaPath = path.resolve(__dirname, '../db/metrics-schema-new.sql');
|
||||
if (!fs.existsSync(schemaPath)) {
|
||||
throw new Error(`Schema file not found at: ${schemaPath}`);
|
||||
}
|
||||
|
||||
const schemaSQL = fs.readFileSync(schemaPath, 'utf8');
|
||||
const createTableRegex = /create\s+table\s+(?:if\s+not\s+exists\s+)?["]?(?:public\.)?(\w+)["]?/gi;
|
||||
let metricsTables = [];
|
||||
let match;
|
||||
|
||||
while ((match = createTableRegex.exec(schemaSQL)) !== null) {
|
||||
if (match[1] && !PROTECTED_TABLES.includes(match[1])) {
|
||||
metricsTables.push(match[1]);
|
||||
}
|
||||
}
|
||||
|
||||
if (metricsTables.length === 0) {
|
||||
throw new Error('No tables found in the schema file');
|
||||
}
|
||||
|
||||
outputProgress({
|
||||
operation: 'Schema analysis',
|
||||
message: `Found ${metricsTables.length} metrics tables in schema: ${metricsTables.join(', ')}`
|
||||
});
|
||||
|
||||
// Explicitly begin a transaction
|
||||
await client.query('BEGIN');
|
||||
|
||||
// First verify current state
|
||||
const initialTables = await client.query(`
|
||||
SELECT tablename as name
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename = ANY($1)
|
||||
AND tablename NOT IN (SELECT unnest($2::text[]))
|
||||
`, [metricsTables, PROTECTED_TABLES]);
|
||||
|
||||
outputProgress({
|
||||
operation: 'Initial state',
|
||||
message: `Found ${initialTables.rows.length} existing metrics tables: ${initialTables.rows.map(t => t.name).join(', ')}`
|
||||
});
|
||||
|
||||
// Disable foreign key checks at the start
|
||||
await client.query('SET session_replication_role = \'replica\'');
|
||||
|
||||
// Drop all metrics tables in reverse order to handle dependencies
|
||||
outputProgress({
|
||||
operation: 'Dropping metrics tables',
|
||||
message: 'Removing existing metrics tables...'
|
||||
});
|
||||
|
||||
// Reverse the array to handle dependencies properly
|
||||
for (const table of [...metricsTables].reverse()) {
|
||||
// Skip protected tables (redundant check)
|
||||
if (PROTECTED_TABLES.includes(table)) {
|
||||
outputProgress({
|
||||
operation: 'Protected table',
|
||||
message: `Skipping protected table: ${table}`
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
// Use NOWAIT to avoid hanging if there's a lock
|
||||
await client.query(`DROP TABLE IF EXISTS "${table}" CASCADE`);
|
||||
|
||||
// Verify the table was actually dropped
|
||||
const checkDrop = await client.query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename = $1
|
||||
`, [table]);
|
||||
|
||||
if (parseInt(checkDrop.rows[0].count) > 0) {
|
||||
throw new Error(`Failed to drop table ${table} - table still exists`);
|
||||
}
|
||||
|
||||
outputProgress({
|
||||
operation: 'Table dropped',
|
||||
message: `Successfully dropped table: ${table}`
|
||||
});
|
||||
|
||||
// Commit after each table drop to ensure locks are released
|
||||
await client.query('COMMIT');
|
||||
// Start a new transaction for the next table
|
||||
await client.query('BEGIN');
|
||||
// Re-disable foreign key constraints for the new transaction
|
||||
await client.query('SET session_replication_role = \'replica\'');
|
||||
} catch (err) {
|
||||
outputProgress({
|
||||
status: 'error',
|
||||
operation: 'Drop table error',
|
||||
message: `Error dropping table ${table}: ${err.message}`
|
||||
});
|
||||
await client.query('ROLLBACK');
|
||||
// Re-start transaction for next table
|
||||
await client.query('BEGIN');
|
||||
await client.query('SET session_replication_role = \'replica\'');
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all tables were dropped
|
||||
const afterDrop = await client.query(`
|
||||
SELECT tablename as name
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename = ANY($1)
|
||||
`, [metricsTables]);
|
||||
|
||||
if (afterDrop.rows.length > 0) {
|
||||
throw new Error(`Failed to drop all tables. Remaining tables: ${afterDrop.rows.map(t => t.name).join(', ')}`);
|
||||
}
|
||||
|
||||
// Make sure we have a fresh transaction here
|
||||
await client.query('COMMIT');
|
||||
await client.query('BEGIN');
|
||||
await client.query('SET session_replication_role = \'replica\'');
|
||||
|
||||
// Read metrics schema
|
||||
outputProgress({
|
||||
operation: 'Reading schema',
|
||||
message: 'Loading metrics schema file...'
|
||||
});
|
||||
|
||||
const statements = splitSQLStatements(schemaSQL);
|
||||
|
||||
outputProgress({
|
||||
operation: 'Schema loaded',
|
||||
message: `Found ${statements.length} SQL statements to execute`
|
||||
});
|
||||
|
||||
// Execute schema statements
|
||||
for (let i = 0; i < statements.length; i++) {
|
||||
const stmt = statements[i];
|
||||
try {
|
||||
const result = await client.query(stmt);
|
||||
|
||||
// If this is a CREATE TABLE statement, verify the table was created
|
||||
if (stmt.trim().toLowerCase().startsWith('create table')) {
|
||||
const tableName = stmt.match(/create\s+table\s+(?:if\s+not\s+exists\s+)?["]?(?:public\.)?(\w+)["]?/i)?.[1];
|
||||
if (tableName) {
|
||||
const checkCreate = await client.query(`
|
||||
SELECT tablename as name
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename = $1
|
||||
`, [tableName]);
|
||||
|
||||
if (checkCreate.rows.length === 0) {
|
||||
throw new Error(`Failed to create table ${tableName} - table does not exist after CREATE statement`);
|
||||
}
|
||||
|
||||
outputProgress({
|
||||
operation: 'Table created',
|
||||
message: `Successfully created table: ${tableName}`
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
outputProgress({
|
||||
operation: 'SQL Progress',
|
||||
message: {
|
||||
statement: i + 1,
|
||||
total: statements.length,
|
||||
preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : ''),
|
||||
rowCount: result.rowCount
|
||||
}
|
||||
});
|
||||
|
||||
// Commit every 10 statements to avoid long-running transactions
|
||||
if (i > 0 && i % 10 === 0) {
|
||||
await client.query('COMMIT');
|
||||
await client.query('BEGIN');
|
||||
await client.query('SET session_replication_role = \'replica\'');
|
||||
}
|
||||
} catch (sqlError) {
|
||||
outputProgress({
|
||||
status: 'error',
|
||||
operation: 'SQL Error',
|
||||
message: {
|
||||
error: sqlError.message,
|
||||
statement: stmt,
|
||||
statementNumber: i + 1
|
||||
}
|
||||
});
|
||||
await client.query('ROLLBACK');
|
||||
throw sqlError;
|
||||
}
|
||||
}
|
||||
|
||||
// Final commit for any pending statements
|
||||
await client.query('COMMIT');
|
||||
|
||||
// Start new transaction for final checks
|
||||
await client.query('BEGIN');
|
||||
|
||||
// Re-enable foreign key checks after all tables are created
|
||||
await client.query('SET session_replication_role = \'origin\'');
|
||||
|
||||
// Verify metrics tables were created
|
||||
outputProgress({
|
||||
operation: 'Verifying metrics tables',
|
||||
message: 'Checking all metrics tables were created...'
|
||||
});
|
||||
|
||||
const metricsTablesResult = await client.query(`
|
||||
SELECT tablename as name
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename = ANY($1)
|
||||
`, [metricsTables]);
|
||||
|
||||
outputProgress({
|
||||
operation: 'Tables found',
|
||||
message: `Found ${metricsTablesResult.rows.length} tables: ${metricsTablesResult.rows.map(t => t.name).join(', ')}`
|
||||
});
|
||||
|
||||
const existingMetricsTables = metricsTablesResult.rows.map(t => t.name);
|
||||
const missingMetricsTables = metricsTables.filter(t => !existingMetricsTables.includes(t));
|
||||
|
||||
if (missingMetricsTables.length > 0) {
|
||||
// Do one final check of the actual tables
|
||||
const finalCheck = await client.query(`
|
||||
SELECT tablename as name
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
`);
|
||||
outputProgress({
|
||||
operation: 'Final table check',
|
||||
message: `All database tables: ${finalCheck.rows.map(t => t.name).join(', ')}`
|
||||
});
|
||||
await client.query('ROLLBACK');
|
||||
throw new Error(`Failed to create metrics tables: ${missingMetricsTables.join(', ')}`);
|
||||
}
|
||||
|
||||
// Commit final transaction
|
||||
await client.query('COMMIT');
|
||||
|
||||
outputProgress({
|
||||
status: 'complete',
|
||||
operation: 'Reset complete',
|
||||
message: 'All metrics tables have been reset successfully'
|
||||
});
|
||||
} catch (error) {
|
||||
outputProgress({
|
||||
status: 'error',
|
||||
operation: 'Reset failed',
|
||||
message: error.message,
|
||||
stack: error.stack
|
||||
});
|
||||
|
||||
if (client) {
|
||||
try {
|
||||
await client.query('ROLLBACK');
|
||||
} catch (rollbackError) {
|
||||
console.error('Error during rollback:', rollbackError);
|
||||
}
|
||||
// Make sure to re-enable foreign key checks even if there's an error
|
||||
await client.query('SET session_replication_role = \'origin\'').catch(() => {});
|
||||
}
|
||||
throw error;
|
||||
} finally {
|
||||
if (client) {
|
||||
// One final attempt to ensure foreign key checks are enabled
|
||||
await client.query('SET session_replication_role = \'origin\'').catch(() => {});
|
||||
await client.end();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Export if required as a module
|
||||
if (typeof module !== 'undefined' && module.exports) {
|
||||
module.exports = resetMetrics;
|
||||
}
|
||||
|
||||
// Run if called from command line
|
||||
if (require.main === module) {
|
||||
resetMetrics().catch(error => {
|
||||
console.error('Error:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
}
|
||||
Reference in New Issue
Block a user