Add metrics historical backfill scripts, fix up all new metrics calc queries and add combined script to run
This commit is contained in:
608
inventory-server/scripts/calculate-metrics-new.js
Normal file
608
inventory-server/scripts/calculate-metrics-new.js
Normal file
@@ -0,0 +1,608 @@
|
||||
// run-all-updates.js
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const { Pool } = require('pg'); // Assuming you use 'pg'
|
||||
|
||||
// --- Configuration ---
|
||||
// Toggle these constants to enable/disable specific steps for testing
|
||||
const RUN_DAILY_SNAPSHOTS = true;
|
||||
const RUN_PRODUCT_METRICS = true;
|
||||
const RUN_PERIODIC_METRICS = true;
|
||||
|
||||
// Maximum execution time for the entire sequence (e.g., 90 minutes)
|
||||
const MAX_EXECUTION_TIME_TOTAL = 90 * 60 * 1000;
|
||||
// Maximum execution time per individual SQL step (e.g., 30 minutes)
|
||||
const MAX_EXECUTION_TIME_PER_STEP = 30 * 60 * 1000;
|
||||
// Query cancellation timeout
|
||||
const CANCEL_QUERY_AFTER_SECONDS = 5;
|
||||
// --- End Configuration ---
|
||||
|
||||
// Change working directory to script directory
|
||||
process.chdir(path.dirname(__filename));
|
||||
|
||||
// Log script path for debugging
|
||||
console.log('Script running from:', __dirname);
|
||||
|
||||
// Try to load environment variables from multiple locations
|
||||
const envPaths = [
|
||||
path.resolve(__dirname, '../..', '.env'), // Two levels up (inventory/.env)
|
||||
path.resolve(__dirname, '..', '.env'), // One level up (inventory-server/.env)
|
||||
path.resolve(__dirname, '.env'), // Same directory
|
||||
'/var/www/html/inventory/.env' // Server absolute path
|
||||
];
|
||||
|
||||
let envLoaded = false;
|
||||
for (const envPath of envPaths) {
|
||||
if (fs.existsSync(envPath)) {
|
||||
console.log(`Loading environment from: ${envPath}`);
|
||||
require('dotenv').config({ path: envPath });
|
||||
envLoaded = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!envLoaded) {
|
||||
console.warn('WARNING: Could not find .env file in any of the expected locations.');
|
||||
console.warn('Checked paths:', envPaths);
|
||||
}
|
||||
|
||||
// --- Database Setup ---
|
||||
// Make sure we have the required DB credentials
|
||||
if (!process.env.DB_HOST && !process.env.DATABASE_URL) {
|
||||
console.error('WARNING: Neither DB_HOST nor DATABASE_URL environment variables found');
|
||||
}
|
||||
|
||||
// Only validate individual parameters if not using connection string
|
||||
if (!process.env.DATABASE_URL) {
|
||||
if (!process.env.DB_USER) console.error('WARNING: DB_USER environment variable is missing');
|
||||
if (!process.env.DB_NAME) console.error('WARNING: DB_NAME environment variable is missing');
|
||||
|
||||
// Password must be a string for PostgreSQL SCRAM authentication
|
||||
if (!process.env.DB_PASSWORD || typeof process.env.DB_PASSWORD !== 'string') {
|
||||
console.error('WARNING: DB_PASSWORD environment variable is missing or not a string');
|
||||
}
|
||||
}
|
||||
|
||||
// Configure database connection to match individual scripts
|
||||
let dbConfig;
|
||||
|
||||
// Check if a DATABASE_URL exists (common in production environments)
|
||||
if (process.env.DATABASE_URL && typeof process.env.DATABASE_URL === 'string') {
|
||||
console.log('Using DATABASE_URL for connection');
|
||||
dbConfig = {
|
||||
connectionString: process.env.DATABASE_URL,
|
||||
ssl: process.env.DB_SSL === 'true' ? { rejectUnauthorized: false } : false,
|
||||
// Add performance optimizations
|
||||
max: 10, // connection pool max size
|
||||
idleTimeoutMillis: 30000,
|
||||
connectionTimeoutMillis: 60000,
|
||||
// Set timeouts for long-running queries
|
||||
statement_timeout: 1800000, // 30 minutes
|
||||
query_timeout: 1800000 // 30 minutes
|
||||
};
|
||||
} else {
|
||||
// Use individual connection parameters
|
||||
dbConfig = {
|
||||
host: process.env.DB_HOST,
|
||||
user: process.env.DB_USER,
|
||||
password: process.env.DB_PASSWORD,
|
||||
database: process.env.DB_NAME,
|
||||
port: process.env.DB_PORT || 5432,
|
||||
ssl: process.env.DB_SSL === 'true',
|
||||
// Add performance optimizations
|
||||
max: 10, // connection pool max size
|
||||
idleTimeoutMillis: 30000,
|
||||
connectionTimeoutMillis: 60000,
|
||||
// Set timeouts for long-running queries
|
||||
statement_timeout: 1800000, // 30 minutes
|
||||
query_timeout: 1800000 // 30 minutes
|
||||
};
|
||||
}
|
||||
|
||||
// Try to load from utils DB module as a last resort
|
||||
try {
|
||||
if (!process.env.DB_HOST && !process.env.DATABASE_URL) {
|
||||
console.log('Attempting to load DB config from individual script modules...');
|
||||
const dbModule = require('./metrics-new/utils/db');
|
||||
if (dbModule && dbModule.dbConfig) {
|
||||
console.log('Found DB config in individual script module');
|
||||
dbConfig = {
|
||||
...dbModule.dbConfig,
|
||||
// Add performance optimizations if not present
|
||||
max: dbModule.dbConfig.max || 10,
|
||||
idleTimeoutMillis: dbModule.dbConfig.idleTimeoutMillis || 30000,
|
||||
connectionTimeoutMillis: dbModule.dbConfig.connectionTimeoutMillis || 60000,
|
||||
statement_timeout: 1800000,
|
||||
query_timeout: 1800000
|
||||
};
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn('Could not load DB config from individual script modules:', err.message);
|
||||
}
|
||||
|
||||
// Debug log connection info (without password)
|
||||
console.log('DB Connection Info:', {
|
||||
connectionString: dbConfig.connectionString ? 'PROVIDED' : undefined,
|
||||
host: dbConfig.host,
|
||||
user: dbConfig.user,
|
||||
database: dbConfig.database,
|
||||
port: dbConfig.port,
|
||||
ssl: dbConfig.ssl ? 'ENABLED' : 'DISABLED',
|
||||
password: (dbConfig.password || dbConfig.connectionString) ? '****' : 'MISSING' // Only show if credentials exist
|
||||
});
|
||||
|
||||
const pool = new Pool(dbConfig);
|
||||
|
||||
const getConnection = () => {
|
||||
return pool.connect();
|
||||
};
|
||||
|
||||
const closePool = () => {
|
||||
console.log("Closing database connection pool.");
|
||||
return pool.end();
|
||||
};
|
||||
|
||||
// --- Progress Utilities ---
|
||||
// Using functions directly instead of globals
|
||||
const progressUtils = require('./metrics-new/utils/progress'); // Assuming utils/progress.js exports these
|
||||
|
||||
// --- State & Cancellation ---
|
||||
let isCancelled = false;
|
||||
let currentStep = ''; // Track which step is running for cancellation message
|
||||
let overallStartTime = null;
|
||||
let mainTimeoutHandle = null;
|
||||
let stepTimeoutHandle = null;
|
||||
|
||||
async function cancelCalculation(reason = 'cancelled by user') {
|
||||
if (isCancelled) return; // Prevent multiple cancellations
|
||||
isCancelled = true;
|
||||
console.log(`Calculation ${reason}. Attempting to cancel active step: ${currentStep}`);
|
||||
|
||||
// Clear timeouts
|
||||
if (mainTimeoutHandle) clearTimeout(mainTimeoutHandle);
|
||||
if (stepTimeoutHandle) clearTimeout(stepTimeoutHandle);
|
||||
|
||||
// Attempt to cancel the long-running query in Postgres
|
||||
let conn = null;
|
||||
try {
|
||||
console.log(`Attempting to cancel queries running longer than ${CANCEL_QUERY_AFTER_SECONDS} seconds...`);
|
||||
conn = await getConnection();
|
||||
const result = await conn.query(`
|
||||
SELECT pg_cancel_backend(pid)
|
||||
FROM pg_stat_activity
|
||||
WHERE query_start < now() - interval '${CANCEL_QUERY_AFTER_SECONDS} seconds'
|
||||
AND application_name = 'node-metrics-calculator' -- Match specific app name
|
||||
AND state = 'active' -- Only cancel active queries
|
||||
AND query NOT LIKE '%pg_cancel_backend%'
|
||||
AND pid <> pg_backend_pid(); -- Don't cancel self
|
||||
`);
|
||||
console.log(`Sent ${result.rowCount} cancellation signal(s).`);
|
||||
conn.release();
|
||||
} catch (err) {
|
||||
console.error('Error during database query cancellation:', err.message);
|
||||
if (conn) {
|
||||
try { conn.release(); } catch (e) { console.error("Error releasing cancellation connection", e); }
|
||||
}
|
||||
// Proceed with script termination attempt even if DB cancel fails
|
||||
} finally {
|
||||
// Update progress to show cancellation
|
||||
progressUtils.outputProgress({
|
||||
status: 'cancelled',
|
||||
operation: `Calculation ${reason} during step: ${currentStep}`,
|
||||
current: 0, // Reset progress indicators
|
||||
total: 100,
|
||||
elapsed: overallStartTime ? progressUtils.formatElapsedTime(overallStartTime) : 'N/A',
|
||||
remaining: null,
|
||||
rate: 0,
|
||||
percentage: '0', // Or keep last known percentage?
|
||||
timing: {
|
||||
start_time: overallStartTime ? new Date(overallStartTime).toISOString() : 'N/A',
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: overallStartTime ? Math.round((Date.now() - overallStartTime) / 1000) : 0
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Note: We don't force exit here anymore. We let the main function's error
|
||||
// handling catch the cancellation error thrown by executeSqlStep or the timeout.
|
||||
return {
|
||||
success: true, // Indicates cancellation was initiated
|
||||
message: `Calculation ${reason}`
|
||||
};
|
||||
}
|
||||
|
||||
// Handle SIGINT (Ctrl+C) and SIGTERM (kill) signals
|
||||
process.on('SIGINT', () => {
|
||||
console.log('\nReceived SIGINT (Ctrl+C).');
|
||||
cancelCalculation('cancelled by user (SIGINT)');
|
||||
// Give cancellation a moment to propagate before force-exiting if needed
|
||||
setTimeout(() => process.exit(1), 2000);
|
||||
});
|
||||
process.on('SIGTERM', () => {
|
||||
console.log('Received SIGTERM.');
|
||||
cancelCalculation('cancelled by system (SIGTERM)');
|
||||
// Give cancellation a moment to propagate before force-exiting if needed
|
||||
setTimeout(() => process.exit(1), 2000);
|
||||
});
|
||||
|
||||
// Add error handlers for uncaught exceptions/rejections
|
||||
process.on('uncaughtException', (error) => {
|
||||
console.error('Uncaught Exception:', error);
|
||||
// Attempt graceful shutdown/logging if possible, then exit
|
||||
cancelCalculation('failed due to uncaught exception').finally(() => {
|
||||
closePool().finally(() => process.exit(1));
|
||||
});
|
||||
});
|
||||
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
||||
// Attempt graceful shutdown/logging if possible, then exit
|
||||
cancelCalculation('failed due to unhandled rejection').finally(() => {
|
||||
closePool().finally(() => process.exit(1));
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
// --- Core Logic ---
|
||||
|
||||
/**
|
||||
* Executes a single SQL calculation step.
|
||||
* @param {object} config - Configuration for the step.
|
||||
* @param {string} config.name - User-friendly name of the step.
|
||||
* @param {string} config.sqlFile - Path to the SQL file.
|
||||
* @param {string} config.historyType - Type identifier for calculate_history.
|
||||
* @param {string} config.statusModule - Module name for calculate_status.
|
||||
* @param {object} progress - Progress utility functions.
|
||||
* @returns {Promise<{success: boolean, message: string, duration: number}>}
|
||||
*/
|
||||
async function executeSqlStep(config, progress) {
|
||||
if (isCancelled) throw new Error(`Calculation skipped step ${config.name} due to prior cancellation.`);
|
||||
|
||||
currentStep = config.name; // Update global state
|
||||
console.log(`\n--- Starting Step: ${config.name} ---`);
|
||||
const stepStartTime = Date.now();
|
||||
let connection = null;
|
||||
let calculateHistoryId = null;
|
||||
|
||||
// Set timeout for this specific step
|
||||
if (stepTimeoutHandle) clearTimeout(stepTimeoutHandle); // Clear previous step's timeout
|
||||
stepTimeoutHandle = setTimeout(() => {
|
||||
// Don't exit directly, throw an error to be caught by the main loop
|
||||
const timeoutError = new Error(`Step "${config.name}" timed out after ${MAX_EXECUTION_TIME_PER_STEP / 1000} seconds.`);
|
||||
cancelCalculation(`timed out during step: ${config.name}`); // Initiate cancellation process
|
||||
// The error will likely be thrown before cancelCalculation fully completes,
|
||||
// but cancelCalculation attempts to stop the query.
|
||||
// The main catch block will handle cleanup.
|
||||
}, MAX_EXECUTION_TIME_PER_STEP);
|
||||
|
||||
|
||||
try {
|
||||
// 1. Read SQL File
|
||||
const sqlFilePath = path.resolve(__dirname, config.sqlFile);
|
||||
if (!fs.existsSync(sqlFilePath)) {
|
||||
throw new Error(`SQL file not found: ${sqlFilePath}`);
|
||||
}
|
||||
const sqlQuery = fs.readFileSync(sqlFilePath, 'utf8');
|
||||
console.log(`Read SQL file: ${config.sqlFile}`);
|
||||
|
||||
// Check for potential parameter references that might cause issues
|
||||
const parameterMatches = sqlQuery.match(/\$\d+(?!\:\:)/g);
|
||||
if (parameterMatches && parameterMatches.length > 0) {
|
||||
console.warn(`WARNING: Found ${parameterMatches.length} untyped parameters in SQL: ${parameterMatches.slice(0, 5).join(', ')}${parameterMatches.length > 5 ? '...' : ''}`);
|
||||
console.warn('These might cause "could not determine data type of parameter" errors.');
|
||||
}
|
||||
|
||||
// 2. Get Database Connection
|
||||
connection = await getConnection();
|
||||
console.log("Database connection acquired.");
|
||||
|
||||
// 3. Clean up Previous Runs & Create History Record (within a transaction)
|
||||
await connection.query('BEGIN');
|
||||
|
||||
// Ensure calculate_status table exists
|
||||
await connection.query(`
|
||||
CREATE TABLE IF NOT EXISTS calculate_status (
|
||||
module_name TEXT PRIMARY KEY,
|
||||
last_calculation_timestamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
`);
|
||||
|
||||
// Ensure calculate_history table exists (basic structure)
|
||||
await connection.query(`
|
||||
CREATE TABLE IF NOT EXISTS calculate_history (
|
||||
id SERIAL PRIMARY KEY,
|
||||
start_time TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
end_time TIMESTAMP WITH TIME ZONE,
|
||||
duration_seconds INTEGER,
|
||||
status TEXT, -- 'running', 'completed', 'failed', 'cancelled'
|
||||
error_message TEXT,
|
||||
additional_info JSONB
|
||||
);
|
||||
`);
|
||||
|
||||
// Mark previous runs of this type as cancelled
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
status = 'cancelled',
|
||||
end_time = NOW(),
|
||||
duration_seconds = EXTRACT(EPOCH FROM (NOW() - start_time))::INTEGER,
|
||||
error_message = 'Previous calculation was not completed properly or was superseded.'
|
||||
WHERE status = 'running' AND additional_info->>'type' = $1::text;
|
||||
`, [config.historyType]);
|
||||
|
||||
// Create history record for this run
|
||||
const historyResult = await connection.query(`
|
||||
INSERT INTO calculate_history (status, additional_info)
|
||||
VALUES ('running', jsonb_build_object('type', $1::text, 'sql_file', $2::text))
|
||||
RETURNING id;
|
||||
`, [config.historyType, config.sqlFile]);
|
||||
calculateHistoryId = historyResult.rows[0].id;
|
||||
|
||||
await connection.query('COMMIT');
|
||||
console.log(`Created history record ID: ${calculateHistoryId}`);
|
||||
|
||||
// 4. Initial Progress Update
|
||||
progress.outputProgress({
|
||||
status: 'running',
|
||||
operation: `Starting: ${config.name}`,
|
||||
current: 0, total: 100,
|
||||
elapsed: progress.formatElapsedTime(stepStartTime),
|
||||
remaining: 'Calculating...', rate: 0, percentage: '0',
|
||||
timing: { start_time: new Date(stepStartTime).toISOString() }
|
||||
});
|
||||
|
||||
// 5. Execute the Main SQL Query
|
||||
progress.outputProgress({
|
||||
status: 'running',
|
||||
operation: `Executing SQL: ${config.name}`,
|
||||
current: 25, total: 100,
|
||||
elapsed: progress.formatElapsedTime(stepStartTime),
|
||||
remaining: 'Executing...', rate: 0, percentage: '25',
|
||||
timing: { start_time: new Date(stepStartTime).toISOString() }
|
||||
});
|
||||
console.log(`Executing SQL for ${config.name}...`);
|
||||
|
||||
try {
|
||||
// Try executing exactly as individual scripts do
|
||||
console.log('Executing SQL with simple query method...');
|
||||
await connection.query(sqlQuery);
|
||||
} catch (sqlError) {
|
||||
if (sqlError.message.includes('could not determine data type of parameter')) {
|
||||
console.log('Simple query failed with parameter type error, trying alternative method...');
|
||||
try {
|
||||
// Execute with explicit text mode to avoid parameter confusion
|
||||
await connection.query({
|
||||
text: sqlQuery,
|
||||
rowMode: 'text'
|
||||
});
|
||||
} catch (altError) {
|
||||
console.error('Alternative execution method also failed:', altError.message);
|
||||
throw altError; // Re-throw the alternative error
|
||||
}
|
||||
} else {
|
||||
console.error('SQL Execution Error:', sqlError.message);
|
||||
if (sqlError.position) {
|
||||
// If the error has a position, try to show the relevant part of the SQL query
|
||||
const position = parseInt(sqlError.position, 10);
|
||||
const startPos = Math.max(0, position - 100);
|
||||
const endPos = Math.min(sqlQuery.length, position + 100);
|
||||
console.error('SQL Error Context:');
|
||||
console.error('...' + sqlQuery.substring(startPos, position) + ' [ERROR HERE] ' + sqlQuery.substring(position, endPos) + '...');
|
||||
}
|
||||
throw sqlError; // Re-throw to be caught by the main try/catch
|
||||
}
|
||||
}
|
||||
|
||||
// Check for cancellation immediately after query finishes
|
||||
if (isCancelled) throw new Error(`Calculation cancelled during SQL execution for ${config.name}`);
|
||||
|
||||
console.log(`SQL execution finished for ${config.name}.`);
|
||||
|
||||
// 6. Update Status & History (within a transaction)
|
||||
await connection.query('BEGIN');
|
||||
|
||||
await connection.query(`
|
||||
INSERT INTO calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES ($1::text, NOW())
|
||||
ON CONFLICT (module_name) DO UPDATE
|
||||
SET last_calculation_timestamp = EXCLUDED.last_calculation_timestamp;
|
||||
`, [config.statusModule]);
|
||||
|
||||
const stepDuration = Math.round((Date.now() - stepStartTime) / 1000);
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1::integer,
|
||||
status = 'completed'
|
||||
WHERE id = $2::integer;
|
||||
`, [stepDuration, calculateHistoryId]);
|
||||
|
||||
await connection.query('COMMIT');
|
||||
|
||||
// 7. Final Progress Update for Step
|
||||
progress.outputProgress({
|
||||
status: 'complete',
|
||||
operation: `Completed: ${config.name}`,
|
||||
current: 100, total: 100,
|
||||
elapsed: progress.formatElapsedTime(stepStartTime),
|
||||
remaining: '0s', rate: 0, percentage: '100',
|
||||
timing: {
|
||||
start_time: new Date(stepStartTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: stepDuration
|
||||
}
|
||||
});
|
||||
console.log(`--- Finished Step: ${config.name} (Duration: ${progress.formatElapsedTime(stepStartTime)}) ---`);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: `${config.name} completed successfully`,
|
||||
duration: stepDuration
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
clearTimeout(stepTimeoutHandle); // Clear timeout on error
|
||||
const errorEndTime = Date.now();
|
||||
const errorDuration = Math.round((errorEndTime - stepStartTime) / 1000);
|
||||
const finalStatus = isCancelled ? 'cancelled' : 'failed';
|
||||
const errorMessage = error.message || 'Unknown error';
|
||||
|
||||
console.error(`--- ERROR in Step: ${config.name} ---`);
|
||||
console.error(error); // Log the full error
|
||||
console.error(`------------------------------------`);
|
||||
|
||||
// Update history with error/cancellation status
|
||||
if (connection && calculateHistoryId) {
|
||||
try {
|
||||
// Use a separate transaction for error logging
|
||||
await connection.query('ROLLBACK'); // Rollback any partial transaction from try block
|
||||
await connection.query('BEGIN');
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1::integer,
|
||||
status = $2::text,
|
||||
error_message = $3::text
|
||||
WHERE id = $4::integer;
|
||||
`, [errorDuration, finalStatus, errorMessage.substring(0, 1000), calculateHistoryId]); // Limit error message size
|
||||
await connection.query('COMMIT');
|
||||
console.log(`Updated history record ID ${calculateHistoryId} with status: ${finalStatus}`);
|
||||
} catch (historyError) {
|
||||
console.error("FATAL: Failed to update history record on error:", historyError);
|
||||
// Cannot rollback here if already rolled back or commit failed
|
||||
}
|
||||
} else {
|
||||
console.warn("Could not update history record on error (no connection or history ID).");
|
||||
}
|
||||
|
||||
// Update progress file with error/cancellation
|
||||
progress.outputProgress({
|
||||
status: finalStatus,
|
||||
operation: `Error in ${config.name}: ${errorMessage.split('\n')[0]}`, // Show first line of error
|
||||
current: 50, total: 100, // Indicate partial completion
|
||||
elapsed: progress.formatElapsedTime(stepStartTime),
|
||||
remaining: null, rate: 0, percentage: '50',
|
||||
timing: {
|
||||
start_time: new Date(stepStartTime).toISOString(),
|
||||
end_time: new Date(errorEndTime).toISOString(),
|
||||
elapsed_seconds: errorDuration
|
||||
}
|
||||
});
|
||||
|
||||
// Rethrow the error to be caught by the main runCalculations function
|
||||
throw error; // Add context if needed: new Error(`Step ${config.name} failed: ${errorMessage}`)
|
||||
|
||||
} finally {
|
||||
clearTimeout(stepTimeoutHandle); // Ensure timeout is cleared
|
||||
currentStep = ''; // Reset current step
|
||||
if (connection) {
|
||||
try {
|
||||
await connection.release();
|
||||
console.log("Database connection released.");
|
||||
} catch (releaseError) {
|
||||
console.error("Error releasing database connection:", releaseError);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Main function to run all calculation steps sequentially.
|
||||
*/
|
||||
async function runAllCalculations() {
|
||||
overallStartTime = Date.now();
|
||||
isCancelled = false; // Reset cancellation flag at start
|
||||
|
||||
// Overall timeout for the entire script
|
||||
mainTimeoutHandle = setTimeout(() => {
|
||||
console.error(`--- OVERALL TIMEOUT REACHED (${MAX_EXECUTION_TIME_TOTAL / 1000}s) ---`);
|
||||
cancelCalculation(`overall timeout reached`);
|
||||
// The process should exit via the unhandled rejection/exception handlers
|
||||
// or the SIGTERM/SIGINT handlers after cancellation attempt.
|
||||
}, MAX_EXECUTION_TIME_TOTAL);
|
||||
|
||||
const steps = [
|
||||
{
|
||||
run: RUN_DAILY_SNAPSHOTS,
|
||||
name: 'Daily Snapshots Update',
|
||||
sqlFile: 'metrics-new/update_daily_snapshots.sql',
|
||||
historyType: 'daily_snapshots',
|
||||
statusModule: 'daily_snapshots'
|
||||
},
|
||||
{
|
||||
run: RUN_PRODUCT_METRICS,
|
||||
name: 'Product Metrics Update',
|
||||
sqlFile: 'metrics-new/update_product_metrics.sql', // ASSUMING the initial population is now part of a regular update
|
||||
historyType: 'product_metrics',
|
||||
statusModule: 'product_metrics'
|
||||
},
|
||||
{
|
||||
run: RUN_PERIODIC_METRICS,
|
||||
name: 'Periodic Metrics Update',
|
||||
sqlFile: 'metrics-new/update_periodic_metrics.sql',
|
||||
historyType: 'periodic_metrics',
|
||||
statusModule: 'periodic_metrics'
|
||||
}
|
||||
];
|
||||
|
||||
let overallSuccess = true;
|
||||
|
||||
try {
|
||||
for (const step of steps) {
|
||||
if (step.run) {
|
||||
if (isCancelled) {
|
||||
console.log(`Skipping step "${step.name}" due to cancellation.`);
|
||||
overallSuccess = false; // Mark as not fully successful if steps are skipped due to cancel
|
||||
continue; // Skip to next step
|
||||
}
|
||||
// Pass the progress utilities to the step executor
|
||||
await executeSqlStep(step, progressUtils);
|
||||
} else {
|
||||
console.log(`Skipping step "${step.name}" (disabled by configuration).`);
|
||||
}
|
||||
}
|
||||
|
||||
// If we finished naturally (no errors thrown out)
|
||||
clearTimeout(mainTimeoutHandle); // Clear the main timeout
|
||||
|
||||
if (isCancelled) {
|
||||
console.log("\n--- Calculation finished with cancellation ---");
|
||||
overallSuccess = false;
|
||||
} else {
|
||||
console.log("\n--- All enabled calculations finished successfully ---");
|
||||
progressUtils.clearProgress(); // Clear progress only on full success
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
clearTimeout(mainTimeoutHandle); // Clear the main timeout
|
||||
console.error("\n--- SCRIPT EXECUTION FAILED ---");
|
||||
// Error details were already logged by executeSqlStep or global handlers
|
||||
overallSuccess = false;
|
||||
// Don't re-log the error here unless adding context
|
||||
// console.error("Overall failure reason:", error.message);
|
||||
} finally {
|
||||
await closePool();
|
||||
console.log(`Total execution time: ${progressUtils.formatElapsedTime(overallStartTime)}`);
|
||||
process.exit(overallSuccess ? 0 : 1);
|
||||
}
|
||||
}
|
||||
|
||||
// --- Script Execution ---
|
||||
if (require.main === module) {
|
||||
runAllCalculations();
|
||||
} else {
|
||||
// Export functions if needed as a module (e.g., for testing or API)
|
||||
module.exports = {
|
||||
runAllCalculations,
|
||||
cancelCalculation,
|
||||
// Expose individual steps if useful, wrapping them slightly
|
||||
runDailySnapshots: () => executeSqlStep({ name: 'Daily Snapshots Update', sqlFile: 'update_daily_snapshots.sql', historyType: 'daily_snapshots', statusModule: 'daily_snapshots' }, progressUtils),
|
||||
runProductMetrics: () => executeSqlStep({ name: 'Product Metrics Update', sqlFile: 'update_product_metrics.sql', historyType: 'product_metrics', statusModule: 'product_metrics' }, progressUtils),
|
||||
runPeriodicMetrics: () => executeSqlStep({ name: 'Periodic Metrics Update', sqlFile: 'update_periodic_metrics.sql', historyType: 'periodic_metrics', statusModule: 'periodic_metrics' }, progressUtils),
|
||||
getProgress: progressUtils.getProgress
|
||||
};
|
||||
}
|
||||
@@ -1,558 +0,0 @@
|
||||
const path = require('path');
|
||||
|
||||
// Change working directory to script directory
|
||||
process.chdir(path.dirname(__filename));
|
||||
|
||||
require('dotenv').config({ path: path.resolve(__dirname, '..', '.env') });
|
||||
|
||||
// Configuration flags for controlling which metrics to calculate
|
||||
// Set to 1 to skip the corresponding calculation, 0 to run it
|
||||
const SKIP_PRODUCT_METRICS = 0;
|
||||
const SKIP_TIME_AGGREGATES = 0;
|
||||
const SKIP_FINANCIAL_METRICS = 0;
|
||||
const SKIP_VENDOR_METRICS = 0;
|
||||
const SKIP_CATEGORY_METRICS = 0;
|
||||
const SKIP_BRAND_METRICS = 0;
|
||||
const SKIP_SALES_FORECASTS = 0;
|
||||
|
||||
// Add error handler for uncaught exceptions
|
||||
process.on('uncaughtException', (error) => {
|
||||
console.error('Uncaught Exception:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// Add error handler for unhandled promise rejections
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
const progress = require('./metrics/utils/progress');
|
||||
console.log('Progress module loaded:', {
|
||||
modulePath: require.resolve('./metrics/utils/progress'),
|
||||
exports: Object.keys(progress),
|
||||
currentDir: process.cwd(),
|
||||
scriptDir: __dirname
|
||||
});
|
||||
|
||||
// Store progress functions in global scope to ensure availability
|
||||
global.formatElapsedTime = progress.formatElapsedTime;
|
||||
global.estimateRemaining = progress.estimateRemaining;
|
||||
global.calculateRate = progress.calculateRate;
|
||||
global.outputProgress = progress.outputProgress;
|
||||
global.clearProgress = progress.clearProgress;
|
||||
global.getProgress = progress.getProgress;
|
||||
global.logError = progress.logError;
|
||||
|
||||
// List of temporary tables used in the calculation process
|
||||
const TEMP_TABLES = [
|
||||
'temp_revenue_ranks',
|
||||
'temp_sales_metrics',
|
||||
'temp_purchase_metrics',
|
||||
'temp_product_metrics',
|
||||
'temp_vendor_metrics',
|
||||
'temp_category_metrics',
|
||||
'temp_brand_metrics',
|
||||
'temp_forecast_dates',
|
||||
'temp_daily_sales',
|
||||
'temp_product_stats',
|
||||
'temp_category_sales',
|
||||
'temp_category_stats',
|
||||
'temp_beginning_inventory',
|
||||
'temp_monthly_inventory'
|
||||
];
|
||||
|
||||
// Add cleanup function for temporary tables
|
||||
async function cleanupTemporaryTables(connection) {
|
||||
try {
|
||||
// Drop each temporary table if it exists
|
||||
for (const table of TEMP_TABLES) {
|
||||
await connection.query(`DROP TABLE IF EXISTS ${table}`);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Error cleaning up temporary tables:', err);
|
||||
}
|
||||
}
|
||||
|
||||
const { getConnection, closePool } = require('./metrics/utils/db');
|
||||
const calculateProductMetrics = require('./metrics/product-metrics');
|
||||
const calculateTimeAggregates = require('./metrics/time-aggregates');
|
||||
const calculateFinancialMetrics = require('./metrics/financial-metrics');
|
||||
const calculateVendorMetrics = require('./metrics/vendor-metrics');
|
||||
const calculateCategoryMetrics = require('./metrics/category-metrics');
|
||||
const calculateBrandMetrics = require('./metrics/brand-metrics');
|
||||
const calculateSalesForecasts = require('./metrics/sales-forecasts');
|
||||
|
||||
// Add cancel handler
|
||||
let isCancelled = false;
|
||||
|
||||
function cancelCalculation() {
|
||||
isCancelled = true;
|
||||
console.log('Calculation has been cancelled by user');
|
||||
|
||||
// Force-terminate any query that's been running for more than 5 seconds
|
||||
try {
|
||||
const connection = getConnection();
|
||||
connection.then(async (conn) => {
|
||||
try {
|
||||
// Identify and terminate long-running queries from our application
|
||||
await conn.query(`
|
||||
SELECT pg_cancel_backend(pid)
|
||||
FROM pg_stat_activity
|
||||
WHERE query_start < now() - interval '5 seconds'
|
||||
AND application_name LIKE '%node%'
|
||||
AND query NOT LIKE '%pg_cancel_backend%'
|
||||
`);
|
||||
|
||||
// Clean up any temporary tables
|
||||
await cleanupTemporaryTables(conn);
|
||||
|
||||
// Release connection
|
||||
conn.release();
|
||||
} catch (err) {
|
||||
console.error('Error during force cancellation:', err);
|
||||
conn.release();
|
||||
}
|
||||
}).catch(err => {
|
||||
console.error('Could not get connection for cancellation:', err);
|
||||
});
|
||||
} catch (err) {
|
||||
console.error('Failed to terminate running queries:', err);
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: 'Calculation has been cancelled'
|
||||
};
|
||||
}
|
||||
|
||||
// Handle SIGTERM signal for cancellation
|
||||
process.on('SIGTERM', cancelCalculation);
|
||||
|
||||
// Update the main calculation function to use the new modular structure
|
||||
async function calculateMetrics() {
|
||||
let connection;
|
||||
const startTime = Date.now();
|
||||
let processedProducts = 0;
|
||||
let processedOrders = 0;
|
||||
let processedPurchaseOrders = 0;
|
||||
let totalProducts = 0;
|
||||
let totalOrders = 0;
|
||||
let totalPurchaseOrders = 0;
|
||||
let calculateHistoryId;
|
||||
|
||||
// Set a maximum execution time (30 minutes)
|
||||
const MAX_EXECUTION_TIME = 30 * 60 * 1000;
|
||||
const timeout = setTimeout(() => {
|
||||
console.error(`Calculation timed out after ${MAX_EXECUTION_TIME/1000} seconds, forcing termination`);
|
||||
// Call cancel and force exit
|
||||
cancelCalculation();
|
||||
process.exit(1);
|
||||
}, MAX_EXECUTION_TIME);
|
||||
|
||||
try {
|
||||
// Clean up any previously running calculations
|
||||
connection = await getConnection();
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
status = 'cancelled',
|
||||
end_time = NOW(),
|
||||
duration_seconds = EXTRACT(EPOCH FROM (NOW() - start_time))::INTEGER,
|
||||
error_message = 'Previous calculation was not completed properly'
|
||||
WHERE status = 'running'
|
||||
`);
|
||||
|
||||
// Get counts from all relevant tables
|
||||
const [productCountResult, orderCountResult, poCountResult] = await Promise.all([
|
||||
connection.query('SELECT COUNT(*) as total FROM products'),
|
||||
connection.query('SELECT COUNT(*) as total FROM orders'),
|
||||
connection.query('SELECT COUNT(*) as total FROM purchase_orders')
|
||||
]);
|
||||
|
||||
totalProducts = parseInt(productCountResult.rows[0].total);
|
||||
totalOrders = parseInt(orderCountResult.rows[0].total);
|
||||
totalPurchaseOrders = parseInt(poCountResult.rows[0].total);
|
||||
|
||||
// Create history record for this calculation
|
||||
const historyResult = await connection.query(`
|
||||
INSERT INTO calculate_history (
|
||||
start_time,
|
||||
status,
|
||||
total_products,
|
||||
total_orders,
|
||||
total_purchase_orders,
|
||||
additional_info
|
||||
) VALUES (
|
||||
NOW(),
|
||||
'running',
|
||||
$1,
|
||||
$2,
|
||||
$3,
|
||||
jsonb_build_object(
|
||||
'skip_product_metrics', ($4::int > 0),
|
||||
'skip_time_aggregates', ($5::int > 0),
|
||||
'skip_financial_metrics', ($6::int > 0),
|
||||
'skip_vendor_metrics', ($7::int > 0),
|
||||
'skip_category_metrics', ($8::int > 0),
|
||||
'skip_brand_metrics', ($9::int > 0),
|
||||
'skip_sales_forecasts', ($10::int > 0)
|
||||
)
|
||||
) RETURNING id
|
||||
`, [
|
||||
totalProducts,
|
||||
totalOrders,
|
||||
totalPurchaseOrders,
|
||||
SKIP_PRODUCT_METRICS,
|
||||
SKIP_TIME_AGGREGATES,
|
||||
SKIP_FINANCIAL_METRICS,
|
||||
SKIP_VENDOR_METRICS,
|
||||
SKIP_CATEGORY_METRICS,
|
||||
SKIP_BRAND_METRICS,
|
||||
SKIP_SALES_FORECASTS
|
||||
]);
|
||||
calculateHistoryId = historyResult.rows[0].id;
|
||||
|
||||
// Add debug logging for the progress functions
|
||||
console.log('Debug - Progress functions:', {
|
||||
formatElapsedTime: typeof global.formatElapsedTime,
|
||||
estimateRemaining: typeof global.estimateRemaining,
|
||||
calculateRate: typeof global.calculateRate,
|
||||
startTime: startTime
|
||||
});
|
||||
|
||||
try {
|
||||
const elapsed = global.formatElapsedTime(startTime);
|
||||
console.log('Debug - formatElapsedTime test successful:', elapsed);
|
||||
} catch (err) {
|
||||
console.error('Debug - Error testing formatElapsedTime:', err);
|
||||
throw err;
|
||||
}
|
||||
|
||||
// Release the connection before getting a new one
|
||||
connection.release();
|
||||
isCancelled = false;
|
||||
connection = await getConnection();
|
||||
|
||||
try {
|
||||
global.outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting metrics calculation',
|
||||
current: 0,
|
||||
total: 100,
|
||||
elapsed: '0s',
|
||||
remaining: 'Calculating...',
|
||||
rate: 0,
|
||||
percentage: '0',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// Update progress periodically
|
||||
const updateProgress = async (products = null, orders = null, purchaseOrders = null) => {
|
||||
// Ensure all values are valid numbers or default to previous value
|
||||
if (products !== null) processedProducts = Number(products) || processedProducts || 0;
|
||||
if (orders !== null) processedOrders = Number(orders) || processedOrders || 0;
|
||||
if (purchaseOrders !== null) processedPurchaseOrders = Number(purchaseOrders) || processedPurchaseOrders || 0;
|
||||
|
||||
// Ensure we never send NaN to the database
|
||||
const safeProducts = Number(processedProducts) || 0;
|
||||
const safeOrders = Number(processedOrders) || 0;
|
||||
const safePurchaseOrders = Number(processedPurchaseOrders) || 0;
|
||||
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
processed_products = $1,
|
||||
processed_orders = $2,
|
||||
processed_purchase_orders = $3
|
||||
WHERE id = $4
|
||||
`, [safeProducts, safeOrders, safePurchaseOrders, calculateHistoryId]);
|
||||
};
|
||||
|
||||
// Helper function to ensure valid progress numbers
|
||||
const ensureValidProgress = (current, total) => ({
|
||||
current: Number(current) || 0,
|
||||
total: Number(total) || 1, // Default to 1 to avoid division by zero
|
||||
percentage: (((Number(current) || 0) / (Number(total) || 1)) * 100).toFixed(1)
|
||||
});
|
||||
|
||||
// Initial progress
|
||||
const initialProgress = ensureValidProgress(0, totalProducts);
|
||||
global.outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting metrics calculation',
|
||||
current: initialProgress.current,
|
||||
total: initialProgress.total,
|
||||
elapsed: '0s',
|
||||
remaining: 'Calculating...',
|
||||
rate: 0,
|
||||
percentage: initialProgress.percentage,
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
if (!SKIP_PRODUCT_METRICS) {
|
||||
const result = await calculateProductMetrics(startTime, totalProducts);
|
||||
await updateProgress(result.processedProducts, result.processedOrders, result.processedPurchaseOrders);
|
||||
if (!result.success) {
|
||||
throw new Error('Product metrics calculation failed');
|
||||
}
|
||||
} else {
|
||||
console.log('Skipping product metrics calculation...');
|
||||
processedProducts = Math.floor(totalProducts * 0.6);
|
||||
await updateProgress(processedProducts);
|
||||
global.outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Skipping product metrics calculation',
|
||||
current: processedProducts,
|
||||
total: totalProducts,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: global.estimateRemaining(startTime, processedProducts, totalProducts),
|
||||
rate: global.calculateRate(startTime, processedProducts),
|
||||
percentage: '60',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Calculate time-based aggregates
|
||||
if (!SKIP_TIME_AGGREGATES) {
|
||||
const result = await calculateTimeAggregates(startTime, totalProducts, processedProducts);
|
||||
await updateProgress(result.processedProducts, result.processedOrders, result.processedPurchaseOrders);
|
||||
if (!result.success) {
|
||||
throw new Error('Time aggregates calculation failed');
|
||||
}
|
||||
} else {
|
||||
console.log('Skipping time aggregates calculation');
|
||||
}
|
||||
|
||||
// Calculate financial metrics
|
||||
if (!SKIP_FINANCIAL_METRICS) {
|
||||
const result = await calculateFinancialMetrics(startTime, totalProducts, processedProducts);
|
||||
await updateProgress(result.processedProducts, result.processedOrders, result.processedPurchaseOrders);
|
||||
if (!result.success) {
|
||||
throw new Error('Financial metrics calculation failed');
|
||||
}
|
||||
} else {
|
||||
console.log('Skipping financial metrics calculation');
|
||||
}
|
||||
|
||||
// Calculate vendor metrics
|
||||
if (!SKIP_VENDOR_METRICS) {
|
||||
const result = await calculateVendorMetrics(startTime, totalProducts, processedProducts);
|
||||
await updateProgress(result.processedProducts, result.processedOrders, result.processedPurchaseOrders);
|
||||
if (!result.success) {
|
||||
throw new Error('Vendor metrics calculation failed');
|
||||
}
|
||||
} else {
|
||||
console.log('Skipping vendor metrics calculation');
|
||||
}
|
||||
|
||||
// Calculate category metrics
|
||||
if (!SKIP_CATEGORY_METRICS) {
|
||||
const result = await calculateCategoryMetrics(startTime, totalProducts, processedProducts);
|
||||
await updateProgress(result.processedProducts, result.processedOrders, result.processedPurchaseOrders);
|
||||
if (!result.success) {
|
||||
throw new Error('Category metrics calculation failed');
|
||||
}
|
||||
} else {
|
||||
console.log('Skipping category metrics calculation');
|
||||
}
|
||||
|
||||
// Calculate brand metrics
|
||||
if (!SKIP_BRAND_METRICS) {
|
||||
const result = await calculateBrandMetrics(startTime, totalProducts, processedProducts);
|
||||
await updateProgress(result.processedProducts, result.processedOrders, result.processedPurchaseOrders);
|
||||
if (!result.success) {
|
||||
throw new Error('Brand metrics calculation failed');
|
||||
}
|
||||
} else {
|
||||
console.log('Skipping brand metrics calculation');
|
||||
}
|
||||
|
||||
// Calculate sales forecasts
|
||||
if (!SKIP_SALES_FORECASTS) {
|
||||
const result = await calculateSalesForecasts(startTime, totalProducts, processedProducts);
|
||||
await updateProgress(result.processedProducts, result.processedOrders, result.processedPurchaseOrders);
|
||||
if (!result.success) {
|
||||
throw new Error('Sales forecasts calculation failed');
|
||||
}
|
||||
} else {
|
||||
console.log('Skipping sales forecasts calculation');
|
||||
}
|
||||
|
||||
// Final progress update with guaranteed valid numbers
|
||||
const finalProgress = ensureValidProgress(totalProducts, totalProducts);
|
||||
|
||||
// Final success message
|
||||
outputProgress({
|
||||
status: 'complete',
|
||||
operation: 'Metrics calculation complete',
|
||||
current: finalProgress.current,
|
||||
total: finalProgress.total,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: '0s',
|
||||
rate: global.calculateRate(startTime, finalProgress.current),
|
||||
percentage: '100',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// Ensure all values are valid numbers before final update
|
||||
const finalStats = {
|
||||
processedProducts: Number(processedProducts) || 0,
|
||||
processedOrders: Number(processedOrders) || 0,
|
||||
processedPurchaseOrders: Number(processedPurchaseOrders) || 0
|
||||
};
|
||||
|
||||
// Update history with completion
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1,
|
||||
processed_products = $2,
|
||||
processed_orders = $3,
|
||||
processed_purchase_orders = $4,
|
||||
status = 'completed'
|
||||
WHERE id = $5
|
||||
`, [Math.round((Date.now() - startTime) / 1000),
|
||||
finalStats.processedProducts,
|
||||
finalStats.processedOrders,
|
||||
finalStats.processedPurchaseOrders,
|
||||
calculateHistoryId]);
|
||||
|
||||
// Clear progress file on successful completion
|
||||
global.clearProgress();
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: 'Calculation completed successfully',
|
||||
duration: Math.round((Date.now() - startTime) / 1000)
|
||||
};
|
||||
} catch (error) {
|
||||
const endTime = Date.now();
|
||||
const totalElapsedSeconds = Math.round((endTime - startTime) / 1000);
|
||||
|
||||
// Update history with error
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1,
|
||||
processed_products = $2,
|
||||
processed_orders = $3,
|
||||
processed_purchase_orders = $4,
|
||||
status = $5,
|
||||
error_message = $6
|
||||
WHERE id = $7
|
||||
`, [
|
||||
totalElapsedSeconds,
|
||||
processedProducts || 0, // Ensure we have a valid number
|
||||
processedOrders || 0, // Ensure we have a valid number
|
||||
processedPurchaseOrders || 0, // Ensure we have a valid number
|
||||
isCancelled ? 'cancelled' : 'failed',
|
||||
error.message,
|
||||
calculateHistoryId
|
||||
]);
|
||||
|
||||
if (isCancelled) {
|
||||
global.outputProgress({
|
||||
status: 'cancelled',
|
||||
operation: 'Calculation cancelled',
|
||||
current: processedProducts,
|
||||
total: totalProducts || 0,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: null,
|
||||
rate: global.calculateRate(startTime, processedProducts),
|
||||
percentage: ((processedProducts / (totalProducts || 1)) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
} else {
|
||||
global.outputProgress({
|
||||
status: 'error',
|
||||
operation: 'Error: ' + error.message,
|
||||
current: processedProducts,
|
||||
total: totalProducts || 0,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: null,
|
||||
rate: global.calculateRate(startTime, processedProducts),
|
||||
percentage: ((processedProducts / (totalProducts || 1)) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
}
|
||||
throw error;
|
||||
} finally {
|
||||
// Clear the timeout to prevent forced termination
|
||||
clearTimeout(timeout);
|
||||
|
||||
// Always clean up and release connection
|
||||
if (connection) {
|
||||
try {
|
||||
await cleanupTemporaryTables(connection);
|
||||
connection.release();
|
||||
} catch (err) {
|
||||
console.error('Error in final cleanup:', err);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error in metrics calculation', error);
|
||||
|
||||
try {
|
||||
if (connection) {
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
status = 'failed',
|
||||
end_time = NOW(),
|
||||
duration_seconds = EXTRACT(EPOCH FROM (NOW() - start_time))::INTEGER,
|
||||
error_message = $1
|
||||
WHERE id = $2
|
||||
`, [error.message.substring(0, 500), calculateHistoryId]);
|
||||
}
|
||||
} catch (updateError) {
|
||||
console.error('Error updating calculation history:', updateError);
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Export as a module with all necessary functions
|
||||
module.exports = {
|
||||
calculateMetrics,
|
||||
cancelCalculation,
|
||||
getProgress: global.getProgress
|
||||
};
|
||||
|
||||
// Run directly if called from command line
|
||||
if (require.main === module) {
|
||||
calculateMetrics().catch(error => {
|
||||
if (!error.message.includes('Operation cancelled')) {
|
||||
console.error('Error:', error);
|
||||
}
|
||||
process.exit(1);
|
||||
});
|
||||
}
|
||||
426
inventory-server/scripts/metrics-new/backfill-snapshots.js
Normal file
426
inventory-server/scripts/metrics-new/backfill-snapshots.js
Normal file
@@ -0,0 +1,426 @@
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const progress = require('./utils/progress'); // Assuming progress utils are here
|
||||
const { getConnection, closePool } = require('./utils/db'); // Assuming db utils are here
|
||||
const os = require('os'); // For detecting number of CPU cores
|
||||
|
||||
// --- Configuration ---
|
||||
const BATCH_SIZE_DAYS = 1; // Process 1 day per database function call
|
||||
const SQL_FUNCTION_FILE = path.resolve(__dirname, 'backfill_historical_snapshots.sql'); // Correct path
|
||||
const LOG_PROGRESS_INTERVAL_MS = 5000; // Update console progress roughly every 5 seconds
|
||||
const HISTORY_TYPE = 'backfill_snapshots'; // Identifier for history table
|
||||
const MAX_WORKERS = Math.max(1, Math.floor(os.cpus().length / 2)); // Use half of available CPU cores
|
||||
const USE_PARALLEL = false; // Set to true to enable parallel processing
|
||||
const PG_STATEMENT_TIMEOUT_MS = 1800000; // 30 minutes max per query
|
||||
|
||||
// --- Cancellation Handling ---
|
||||
let isCancelled = false;
|
||||
let runningQueryPromise = null; // To potentially track the active query
|
||||
|
||||
function requestCancellation() {
|
||||
if (!isCancelled) {
|
||||
isCancelled = true;
|
||||
console.warn('\nCancellation requested. Finishing current batch then stopping...');
|
||||
// Note: We are NOT forcefully cancelling the backend query anymore.
|
||||
}
|
||||
}
|
||||
|
||||
process.on('SIGINT', requestCancellation); // Handle Ctrl+C
|
||||
process.on('SIGTERM', requestCancellation); // Handle termination signals
|
||||
|
||||
// --- Main Backfill Function ---
|
||||
async function backfillSnapshots(cmdStartDate, cmdEndDate, cmdStartBatch = 1) {
|
||||
let connection;
|
||||
const overallStartTime = Date.now();
|
||||
let calculateHistoryId = null;
|
||||
let processedDaysTotal = 0; // Track total days processed across all batches executed in this run
|
||||
let currentBatchNum = cmdStartBatch > 0 ? cmdStartBatch : 1;
|
||||
let totalBatches = 0; // Initialize totalBatches
|
||||
let totalDays = 0; // Initialize totalDays
|
||||
|
||||
console.log(`Starting snapshot backfill process...`);
|
||||
console.log(`SQL Function definition file: ${SQL_FUNCTION_FILE}`);
|
||||
if (!fs.existsSync(SQL_FUNCTION_FILE)) {
|
||||
console.error(`FATAL: SQL file not found at ${SQL_FUNCTION_FILE}`);
|
||||
process.exit(1); // Exit early if file doesn't exist
|
||||
}
|
||||
|
||||
try {
|
||||
// Set up a connection with higher memory limits
|
||||
connection = await getConnection({
|
||||
// Add performance-related settings
|
||||
application_name: 'backfill_snapshots',
|
||||
statement_timeout: PG_STATEMENT_TIMEOUT_MS, // 30 min timeout per statement
|
||||
// These parameters may need to be configured in your database:
|
||||
// work_mem: '1GB',
|
||||
// maintenance_work_mem: '2GB',
|
||||
// temp_buffers: '1GB',
|
||||
});
|
||||
|
||||
console.log('Database connection acquired.');
|
||||
|
||||
// --- Ensure Function Exists ---
|
||||
console.log('Ensuring database function is up-to-date...');
|
||||
try {
|
||||
const sqlFunctionDef = fs.readFileSync(SQL_FUNCTION_FILE, 'utf8');
|
||||
if (!sqlFunctionDef.includes('CREATE OR REPLACE FUNCTION backfill_daily_snapshots_range_final')) {
|
||||
throw new Error(`SQL file ${SQL_FUNCTION_FILE} does not seem to contain the function definition.`);
|
||||
}
|
||||
await connection.query(sqlFunctionDef); // Execute the whole file
|
||||
console.log('Database function `backfill_daily_snapshots_range_final` created/updated.');
|
||||
|
||||
// Add performance query hints to the database
|
||||
await connection.query(`
|
||||
-- Analyze tables for better query planning
|
||||
ANALYZE public.products;
|
||||
ANALYZE public.imported_daily_inventory;
|
||||
ANALYZE public.imported_product_stat_history;
|
||||
ANALYZE public.daily_product_snapshots;
|
||||
ANALYZE public.imported_product_current_prices;
|
||||
`).catch(err => {
|
||||
// Non-fatal if analyze fails
|
||||
console.warn('Failed to analyze tables (non-fatal):', err.message);
|
||||
});
|
||||
|
||||
} catch (err) {
|
||||
console.error(`Error processing SQL function file ${SQL_FUNCTION_FILE}:`, err);
|
||||
throw new Error(`Failed to create or replace DB function: ${err.message}`);
|
||||
}
|
||||
|
||||
// --- Prepare History Record ---
|
||||
console.log('Preparing calculation history record...');
|
||||
// Ensure history table exists (optional, could be done elsewhere)
|
||||
await connection.query(`
|
||||
CREATE TABLE IF NOT EXISTS public.calculate_history (
|
||||
id SERIAL PRIMARY KEY,
|
||||
start_time TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
end_time TIMESTAMPTZ,
|
||||
duration_seconds INTEGER,
|
||||
status VARCHAR(20) NOT NULL, -- e.g., 'running', 'completed', 'failed', 'cancelled'
|
||||
error_message TEXT,
|
||||
additional_info JSONB -- Store type, file, batch info etc.
|
||||
);
|
||||
`);
|
||||
// Mark previous runs of this type as potentially failed if they were left 'running'
|
||||
await connection.query(`
|
||||
UPDATE public.calculate_history
|
||||
SET status = 'failed', error_message = 'Interrupted by new run.'
|
||||
WHERE status = 'running' AND additional_info->>'type' = $1;
|
||||
`, [HISTORY_TYPE]);
|
||||
|
||||
// Create new history record
|
||||
const historyResult = await connection.query(`
|
||||
INSERT INTO public.calculate_history (start_time, status, additional_info)
|
||||
VALUES (NOW(), 'running', jsonb_build_object('type', $1::text, 'sql_file', $2::text, 'start_batch', $3::integer))
|
||||
RETURNING id;
|
||||
`, [HISTORY_TYPE, path.basename(SQL_FUNCTION_FILE), cmdStartBatch]);
|
||||
calculateHistoryId = historyResult.rows[0].id;
|
||||
console.log(`Calculation history record created with ID: ${calculateHistoryId}`);
|
||||
|
||||
|
||||
// --- Determine Date Range ---
|
||||
console.log('Determining date range...');
|
||||
let effectiveStartDate, effectiveEndDate;
|
||||
|
||||
// Use command-line dates if provided, otherwise query DB
|
||||
if (cmdStartDate) {
|
||||
effectiveStartDate = cmdStartDate;
|
||||
} else {
|
||||
const minDateResult = await connection.query(`
|
||||
SELECT LEAST(
|
||||
COALESCE((SELECT MIN(date) FROM public.imported_daily_inventory WHERE date > '1970-01-01'), CURRENT_DATE),
|
||||
COALESCE((SELECT MIN(date) FROM public.imported_product_stat_history WHERE date > '1970-01-01'), CURRENT_DATE)
|
||||
)::date as min_date;
|
||||
`);
|
||||
effectiveStartDate = minDateResult.rows[0]?.min_date || new Date().toISOString().split('T')[0]; // Fallback
|
||||
console.log(`Auto-detected start date: ${effectiveStartDate}`);
|
||||
}
|
||||
|
||||
if (cmdEndDate) {
|
||||
effectiveEndDate = cmdEndDate;
|
||||
} else {
|
||||
const maxDateResult = await connection.query(`
|
||||
SELECT GREATEST(
|
||||
COALESCE((SELECT MAX(date) FROM public.imported_daily_inventory WHERE date < CURRENT_DATE), '1970-01-01'::date),
|
||||
COALESCE((SELECT MAX(date) FROM public.imported_product_stat_history WHERE date < CURRENT_DATE), '1970-01-01'::date)
|
||||
)::date as max_date;
|
||||
`);
|
||||
// Ensure end date is not today or in the future
|
||||
effectiveEndDate = maxDateResult.rows[0]?.max_date || new Date(Date.now() - 86400000).toISOString().split('T')[0]; // Default yesterday
|
||||
if (new Date(effectiveEndDate) >= new Date(new Date().toISOString().split('T')[0])) {
|
||||
effectiveEndDate = new Date(Date.now() - 86400000).toISOString().split('T')[0]; // Set to yesterday if >= today
|
||||
}
|
||||
console.log(`Auto-detected end date: ${effectiveEndDate}`);
|
||||
}
|
||||
|
||||
// Validate dates
|
||||
const dStart = new Date(effectiveStartDate);
|
||||
const dEnd = new Date(effectiveEndDate);
|
||||
if (isNaN(dStart.getTime()) || isNaN(dEnd.getTime()) || dStart > dEnd) {
|
||||
throw new Error(`Invalid date range: Start "${effectiveStartDate}", End "${effectiveEndDate}"`);
|
||||
}
|
||||
|
||||
// --- Batch Processing ---
|
||||
totalDays = Math.ceil((dEnd - dStart) / (1000 * 60 * 60 * 24)) + 1; // Inclusive
|
||||
totalBatches = Math.ceil(totalDays / BATCH_SIZE_DAYS);
|
||||
|
||||
console.log(`Target Date Range: ${effectiveStartDate} to ${effectiveEndDate} (${totalDays} days)`);
|
||||
console.log(`Total Batches: ${totalBatches} (Batch Size: ${BATCH_SIZE_DAYS} days)`);
|
||||
console.log(`Starting from Batch: ${currentBatchNum}`);
|
||||
|
||||
// Initial progress update
|
||||
progress.outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting Batch Processing',
|
||||
currentBatch: currentBatchNum,
|
||||
totalBatches: totalBatches,
|
||||
totalDays: totalDays,
|
||||
elapsed: '0s',
|
||||
remaining: 'Calculating...',
|
||||
rate: 0,
|
||||
historyId: calculateHistoryId // Include history ID in the object
|
||||
});
|
||||
|
||||
while (currentBatchNum <= totalBatches && !isCancelled) {
|
||||
const batchOffset = (currentBatchNum - 1) * BATCH_SIZE_DAYS;
|
||||
const batchStartDate = new Date(dStart);
|
||||
batchStartDate.setDate(dStart.getDate() + batchOffset);
|
||||
|
||||
const batchEndDate = new Date(batchStartDate);
|
||||
batchEndDate.setDate(batchStartDate.getDate() + BATCH_SIZE_DAYS - 1);
|
||||
|
||||
// Clamp batch end date to the overall effective end date
|
||||
if (batchEndDate > dEnd) {
|
||||
batchEndDate.setTime(dEnd.getTime());
|
||||
}
|
||||
|
||||
const batchStartDateStr = batchStartDate.toISOString().split('T')[0];
|
||||
const batchEndDateStr = batchEndDate.toISOString().split('T')[0];
|
||||
const batchStartTime = Date.now();
|
||||
|
||||
console.log(`\n--- Processing Batch ${currentBatchNum} / ${totalBatches} ---`);
|
||||
console.log(` Dates: ${batchStartDateStr} to ${batchEndDateStr}`);
|
||||
|
||||
// Execute the function for the batch
|
||||
try {
|
||||
progress.outputProgress({
|
||||
status: 'running',
|
||||
operation: `Executing DB function for batch ${currentBatchNum}...`,
|
||||
currentBatch: currentBatchNum,
|
||||
totalBatches: totalBatches,
|
||||
totalDays: totalDays,
|
||||
elapsed: progress.formatElapsedTime(overallStartTime),
|
||||
remaining: 'Executing...',
|
||||
rate: 0,
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
|
||||
// Performance improvement: Add batch processing hint
|
||||
await connection.query('SET LOCAL enable_parallel_append = on; SET LOCAL enable_parallel_hash = on; SET LOCAL max_parallel_workers_per_gather = 4;');
|
||||
|
||||
// Store promise in case we need to try and cancel (though not implemented forcefully)
|
||||
runningQueryPromise = connection.query(
|
||||
`SELECT backfill_daily_snapshots_range_final($1::date, $2::date);`,
|
||||
[batchStartDateStr, batchEndDateStr]
|
||||
);
|
||||
await runningQueryPromise; // Wait for the function call to complete
|
||||
runningQueryPromise = null; // Clear the promise
|
||||
|
||||
const batchDurationMs = Date.now() - batchStartTime;
|
||||
const daysInThisBatch = Math.ceil((batchEndDate - batchStartDate) / (1000 * 60 * 60 * 24)) + 1;
|
||||
processedDaysTotal += daysInThisBatch;
|
||||
|
||||
console.log(` Batch ${currentBatchNum} completed in ${progress.formatElapsedTime(batchStartTime)}.`);
|
||||
|
||||
// --- Update Progress & History ---
|
||||
const overallElapsedSec = Math.round((Date.now() - overallStartTime) / 1000);
|
||||
progress.outputProgress({
|
||||
status: 'running',
|
||||
operation: `Completed batch ${currentBatchNum}`,
|
||||
currentBatch: currentBatchNum,
|
||||
totalBatches: totalBatches,
|
||||
totalDays: totalDays,
|
||||
processedDays: processedDaysTotal,
|
||||
elapsed: progress.formatElapsedTime(overallStartTime),
|
||||
remaining: progress.estimateRemaining(overallStartTime, processedDaysTotal, totalDays),
|
||||
rate: progress.calculateRate(overallStartTime, processedDaysTotal),
|
||||
batchDuration: progress.formatElapsedTime(batchStartTime),
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
|
||||
// Save checkpoint in history
|
||||
await connection.query(`
|
||||
UPDATE public.calculate_history
|
||||
SET additional_info = jsonb_set(additional_info, '{last_completed_batch}', $1::jsonb)
|
||||
|| jsonb_build_object('last_processed_date', $2::text)
|
||||
WHERE id = $3::integer;
|
||||
`, [JSON.stringify(currentBatchNum), batchEndDateStr, calculateHistoryId]);
|
||||
|
||||
|
||||
} catch (batchError) {
|
||||
console.error(`\n--- ERROR in Batch ${currentBatchNum} (${batchStartDateStr} to ${batchEndDateStr}) ---`);
|
||||
console.error(' Database Error:', batchError.message);
|
||||
console.error(' DB Error Code:', batchError.code);
|
||||
// Log detailed error to history and re-throw to stop the process
|
||||
await connection.query(`
|
||||
UPDATE public.calculate_history
|
||||
SET status = 'failed',
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1::integer,
|
||||
error_message = $2::text,
|
||||
additional_info = additional_info || jsonb_build_object('failed_batch', $3::integer, 'failed_date_range', $4::text)
|
||||
WHERE id = $5::integer;
|
||||
`, [
|
||||
Math.round((Date.now() - overallStartTime) / 1000),
|
||||
`Batch ${currentBatchNum} failed: ${batchError.message} (Code: ${batchError.code || 'N/A'})`,
|
||||
currentBatchNum,
|
||||
`${batchStartDateStr} to ${batchEndDateStr}`,
|
||||
calculateHistoryId
|
||||
]);
|
||||
throw batchError; // Stop execution
|
||||
}
|
||||
|
||||
currentBatchNum++;
|
||||
// Optional delay between batches
|
||||
// await new Promise(resolve => setTimeout(resolve, 500));
|
||||
|
||||
} // End while loop
|
||||
|
||||
// --- Final Outcome ---
|
||||
const finalStatus = isCancelled ? 'cancelled' : 'completed';
|
||||
const finalMessage = isCancelled ? `Calculation stopped after completing batch ${currentBatchNum - 1}.` : 'Historical snapshots backfill completed successfully.';
|
||||
const finalDurationSec = Math.round((Date.now() - overallStartTime) / 1000);
|
||||
|
||||
console.log(`\n--- Backfill ${finalStatus.toUpperCase()} ---`);
|
||||
console.log(finalMessage);
|
||||
console.log(`Total duration: ${progress.formatElapsedTime(overallStartTime)}`);
|
||||
|
||||
// Update history record
|
||||
await connection.query(`
|
||||
UPDATE public.calculate_history SET status = $1::calculation_status, end_time = NOW(), duration_seconds = $2::integer, error_message = $3
|
||||
WHERE id = $4::integer;
|
||||
`, [finalStatus, finalDurationSec, (isCancelled ? 'User cancelled' : null), calculateHistoryId]);
|
||||
|
||||
if (!isCancelled) {
|
||||
progress.clearProgress(); // Clear progress state only on successful completion
|
||||
} else {
|
||||
progress.outputProgress({ // Final cancelled status update
|
||||
status: 'cancelled',
|
||||
operation: finalMessage,
|
||||
currentBatch: currentBatchNum - 1,
|
||||
totalBatches: totalBatches,
|
||||
totalDays: totalDays,
|
||||
processedDays: processedDaysTotal,
|
||||
elapsed: progress.formatElapsedTime(overallStartTime),
|
||||
remaining: 'Cancelled',
|
||||
rate: 0,
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
}
|
||||
|
||||
return { success: true, status: finalStatus, message: finalMessage, duration: finalDurationSec };
|
||||
|
||||
} catch (error) {
|
||||
console.error('\n--- Backfill encountered an unrecoverable error ---');
|
||||
console.error(error.message);
|
||||
const finalDurationSec = Math.round((Date.now() - overallStartTime) / 1000);
|
||||
|
||||
// Update history if possible
|
||||
if (connection && calculateHistoryId) {
|
||||
try {
|
||||
await connection.query(`
|
||||
UPDATE public.calculate_history
|
||||
SET status = $1::calculation_status, end_time = NOW(), duration_seconds = $2::integer, error_message = $3::text
|
||||
WHERE id = $4::integer;
|
||||
`, [
|
||||
isCancelled ? 'cancelled' : 'failed',
|
||||
finalDurationSec,
|
||||
error.message,
|
||||
calculateHistoryId
|
||||
]);
|
||||
} catch (histError) {
|
||||
console.error("Failed to update history record with error state:", histError);
|
||||
}
|
||||
} else {
|
||||
console.error("Could not update history record (no ID or connection).");
|
||||
}
|
||||
|
||||
// FIX: Use initialized value or a default if loop never started
|
||||
const batchNumForError = currentBatchNum > cmdStartBatch ? currentBatchNum - 1 : cmdStartBatch - 1;
|
||||
|
||||
// Update progress.outputProgress call to match actual function signature
|
||||
try {
|
||||
// Create progress data object
|
||||
const progressData = {
|
||||
status: 'failed',
|
||||
operation: 'Backfill failed',
|
||||
message: error.message,
|
||||
currentBatch: batchNumForError,
|
||||
totalBatches: totalBatches,
|
||||
totalDays: totalDays,
|
||||
processedDays: processedDaysTotal,
|
||||
elapsed: progress.formatElapsedTime(overallStartTime),
|
||||
remaining: 'Failed',
|
||||
rate: 0,
|
||||
// Include history ID in progress data if needed
|
||||
historyId: calculateHistoryId
|
||||
};
|
||||
|
||||
// Call with single object parameter (not separate historyId)
|
||||
progress.outputProgress(progressData);
|
||||
} catch (progressError) {
|
||||
console.error('Failed to report progress:', progressError);
|
||||
}
|
||||
|
||||
return { success: false, status: 'failed', error: error.message, duration: finalDurationSec };
|
||||
|
||||
} finally {
|
||||
if (connection) {
|
||||
console.log('Releasing database connection.');
|
||||
connection.release();
|
||||
}
|
||||
// Close pool only if this script is meant to be standalone
|
||||
// If part of a larger app, the app should manage pool closure
|
||||
// console.log('Closing database pool.');
|
||||
// await closePool();
|
||||
}
|
||||
}
|
||||
|
||||
// --- Script Execution ---
|
||||
|
||||
// Parse command-line arguments
|
||||
const args = process.argv.slice(2);
|
||||
let cmdStartDateArg, cmdEndDateArg, cmdStartBatchArg = 1; // Default start batch is 1
|
||||
|
||||
for (let i = 0; i < args.length; i++) {
|
||||
if (args[i] === '--start-date' && args[i+1]) cmdStartDateArg = args[++i];
|
||||
else if (args[i] === '--end-date' && args[i+1]) cmdEndDateArg = args[++i];
|
||||
else if (args[i] === '--start-batch' && args[i+1]) cmdStartBatchArg = parseInt(args[++i], 10);
|
||||
}
|
||||
|
||||
if (isNaN(cmdStartBatchArg) || cmdStartBatchArg < 1) {
|
||||
console.warn(`Invalid --start-batch value. Defaulting to 1.`);
|
||||
cmdStartBatchArg = 1;
|
||||
}
|
||||
|
||||
// Run the backfill process
|
||||
backfillSnapshots(cmdStartDateArg, cmdEndDateArg, cmdStartBatchArg)
|
||||
.then(result => {
|
||||
if (result.success) {
|
||||
console.log(`\n✅ ${result.message} (Duration: ${result.duration}s)`);
|
||||
process.exitCode = 0; // Success
|
||||
} else {
|
||||
console.error(`\n❌ Backfill failed: ${result.error || 'Unknown error'} (Duration: ${result.duration}s)`);
|
||||
process.exitCode = 1; // Failure
|
||||
}
|
||||
})
|
||||
.catch(err => {
|
||||
console.error('\n❌ Unexpected error during backfill execution:', err);
|
||||
process.exitCode = 1; // Failure
|
||||
})
|
||||
.finally(async () => {
|
||||
// Ensure pool is closed if run standalone
|
||||
console.log('Backfill script finished. Closing pool.');
|
||||
await closePool(); // Make sure closePool exists and works in your db utils
|
||||
process.exit(process.exitCode); // Exit with appropriate code
|
||||
});
|
||||
@@ -0,0 +1,161 @@
|
||||
-- Description: Backfills the daily_product_snapshots table using imported historical unit data
|
||||
-- (daily inventory/stats) and historical price data (current prices table).
|
||||
-- - Uses imported daily sales/receipt UNIT counts for accuracy.
|
||||
-- - ESTIMATES historical stock levels using a forward calculation.
|
||||
-- - APPROXIMATES historical REVENUE using looked-up historical base prices.
|
||||
-- - APPROXIMATES historical COGS, PROFIT, and STOCK VALUE using CURRENT product costs/prices.
|
||||
-- Run ONCE after importing historical data and before initial product_metrics population.
|
||||
-- Dependencies: Core import tables (products), imported history tables (imported_daily_inventory,
|
||||
-- imported_product_stat_history, imported_product_current_prices),
|
||||
-- daily_product_snapshots table must exist.
|
||||
-- Frequency: Run ONCE.
|
||||
|
||||
CREATE OR REPLACE FUNCTION backfill_daily_snapshots_range_final(
|
||||
_start_date DATE,
|
||||
_end_date DATE
|
||||
)
|
||||
RETURNS VOID AS $$
|
||||
DECLARE
|
||||
_current_processing_date DATE := _start_date;
|
||||
_batch_start_time TIMESTAMPTZ;
|
||||
_row_count INTEGER;
|
||||
BEGIN
|
||||
RAISE NOTICE 'Starting FINAL historical snapshot backfill from % to %.', _start_date, _end_date;
|
||||
RAISE NOTICE 'Using historical units and historical prices (for revenue approximation).';
|
||||
RAISE NOTICE 'WARNING: Historical COGS, Profit, and Stock Value use CURRENT product costs/prices.';
|
||||
|
||||
-- Ensure end date is not in the future
|
||||
IF _end_date >= CURRENT_DATE THEN
|
||||
_end_date := CURRENT_DATE - INTERVAL '1 day';
|
||||
RAISE NOTICE 'Adjusted end date to % to avoid conflict with hourly script.', _end_date;
|
||||
END IF;
|
||||
|
||||
-- Performance: Create temporary table with product info to avoid repeated lookups
|
||||
CREATE TEMP TABLE IF NOT EXISTS temp_product_info AS
|
||||
SELECT
|
||||
pid,
|
||||
sku,
|
||||
COALESCE(landing_cost_price, cost_price, 0.00) as effective_cost_price,
|
||||
COALESCE(price, 0.00) as current_price,
|
||||
COALESCE(regular_price, 0.00) as current_regular_price
|
||||
FROM public.products;
|
||||
|
||||
-- Performance: Create index on temporary table
|
||||
CREATE INDEX IF NOT EXISTS temp_product_info_pid_idx ON temp_product_info(pid);
|
||||
|
||||
ANALYZE temp_product_info;
|
||||
|
||||
RAISE NOTICE 'Created temporary product info table with % products', (SELECT COUNT(*) FROM temp_product_info);
|
||||
|
||||
WHILE _current_processing_date <= _end_date LOOP
|
||||
_batch_start_time := clock_timestamp();
|
||||
RAISE NOTICE 'Processing date: %', _current_processing_date;
|
||||
|
||||
-- Get Daily Transaction Unit Info from imported history
|
||||
WITH DailyHistoryUnits AS (
|
||||
SELECT
|
||||
pids.pid,
|
||||
-- Prioritize daily_inventory, fallback to product_stat_history for sold qty
|
||||
COALESCE(di.amountsold, ps.qty_sold, 0)::integer as units_sold_today,
|
||||
COALESCE(di.qtyreceived, 0)::integer as units_received_today
|
||||
FROM
|
||||
(SELECT DISTINCT pid FROM temp_product_info) pids -- Ensure all products are considered
|
||||
LEFT JOIN public.imported_daily_inventory di
|
||||
ON pids.pid = di.pid AND di.date = _current_processing_date
|
||||
LEFT JOIN public.imported_product_stat_history ps
|
||||
ON pids.pid = ps.pid AND ps.date = _current_processing_date
|
||||
-- Removed WHERE clause to ensure snapshots are created even for days with 0 activity,
|
||||
-- allowing stock carry-over. The main query will handle products properly.
|
||||
),
|
||||
HistoricalPrice AS (
|
||||
-- Find the base price (qty_buy=1) active on the processing date
|
||||
SELECT DISTINCT ON (pid)
|
||||
pid,
|
||||
price_each
|
||||
FROM public.imported_product_current_prices
|
||||
WHERE
|
||||
qty_buy = 1
|
||||
-- Use TIMESTAMPTZ comparison logic:
|
||||
AND date_active <= (_current_processing_date + interval '1 day' - interval '1 second') -- Active sometime on or before end of processing day
|
||||
AND (date_deactive IS NULL OR date_deactive > _current_processing_date) -- Not deactivated before start of processing day
|
||||
-- Assuming 'active' flag isn't needed if dates are correct; add 'AND active != 0' if necessary
|
||||
ORDER BY
|
||||
pid, date_active DESC -- Get the most recently activated price
|
||||
),
|
||||
PreviousStock AS (
|
||||
-- Get the estimated stock from the PREVIOUS day snapshot
|
||||
SELECT pid, eod_stock_quantity
|
||||
FROM public.daily_product_snapshots
|
||||
WHERE snapshot_date = _current_processing_date - INTERVAL '1 day'
|
||||
)
|
||||
-- Insert into the daily snapshots table
|
||||
INSERT INTO public.daily_product_snapshots (
|
||||
snapshot_date, pid, sku,
|
||||
eod_stock_quantity, eod_stock_cost, eod_stock_retail, eod_stock_gross, stockout_flag,
|
||||
units_sold, units_returned,
|
||||
gross_revenue, discounts, returns_revenue,
|
||||
net_revenue, cogs, gross_regular_revenue, profit,
|
||||
units_received, cost_received,
|
||||
calculation_timestamp
|
||||
)
|
||||
SELECT
|
||||
_current_processing_date AS snapshot_date,
|
||||
p.pid,
|
||||
p.sku,
|
||||
-- Estimated EOD Stock (using historical daily units)
|
||||
-- Handle potential NULL from joins with COALESCE 0
|
||||
COALESCE(ps.eod_stock_quantity, 0) + COALESCE(dh.units_received_today, 0) - COALESCE(dh.units_sold_today, 0) AS estimated_eod_stock,
|
||||
-- Valued Stock (using estimated stock and CURRENT prices/costs - APPROXIMATION)
|
||||
GREATEST(0, COALESCE(ps.eod_stock_quantity, 0) + COALESCE(dh.units_received_today, 0) - COALESCE(dh.units_sold_today, 0)) * p.effective_cost_price AS eod_stock_cost,
|
||||
GREATEST(0, COALESCE(ps.eod_stock_quantity, 0) + COALESCE(dh.units_received_today, 0) - COALESCE(dh.units_sold_today, 0)) * p.current_price AS eod_stock_retail, -- Stock retail uses current price
|
||||
GREATEST(0, COALESCE(ps.eod_stock_quantity, 0) + COALESCE(dh.units_received_today, 0) - COALESCE(dh.units_sold_today, 0)) * p.current_regular_price AS eod_stock_gross, -- Stock gross uses current regular price
|
||||
-- Stockout Flag (based on estimated stock)
|
||||
(COALESCE(ps.eod_stock_quantity, 0) + COALESCE(dh.units_received_today, 0) - COALESCE(dh.units_sold_today, 0)) <= 0 AS stockout_flag,
|
||||
|
||||
-- Today's Unit Aggregates from History
|
||||
COALESCE(dh.units_sold_today, 0) as units_sold,
|
||||
0 AS units_returned, -- Placeholder: Cannot determine returns from daily summary
|
||||
|
||||
-- Monetary Values using looked-up Historical Price and CURRENT Cost/RegPrice
|
||||
COALESCE(dh.units_sold_today, 0) * COALESCE(hp.price_each, p.current_price) AS gross_revenue, -- Approx Revenue
|
||||
0 AS discounts, -- Placeholder
|
||||
0 AS returns_revenue, -- Placeholder
|
||||
COALESCE(dh.units_sold_today, 0) * COALESCE(hp.price_each, p.current_price) AS net_revenue, -- Approx Net Revenue
|
||||
COALESCE(dh.units_sold_today, 0) * p.effective_cost_price AS cogs, -- Approx COGS (uses CURRENT cost)
|
||||
COALESCE(dh.units_sold_today, 0) * p.current_regular_price AS gross_regular_revenue, -- Approx Gross Regular Revenue
|
||||
-- Approx Profit
|
||||
(COALESCE(dh.units_sold_today, 0) * COALESCE(hp.price_each, p.current_price)) - (COALESCE(dh.units_sold_today, 0) * p.effective_cost_price) AS profit,
|
||||
|
||||
COALESCE(dh.units_received_today, 0) as units_received,
|
||||
-- Estimate received cost using CURRENT product cost
|
||||
COALESCE(dh.units_received_today, 0) * p.effective_cost_price AS cost_received, -- Approx
|
||||
|
||||
clock_timestamp() -- Timestamp of this specific calculation
|
||||
FROM temp_product_info p -- Use the temp table for better performance
|
||||
LEFT JOIN PreviousStock ps ON p.pid = ps.pid
|
||||
LEFT JOIN DailyHistoryUnits dh ON p.pid = dh.pid -- Join today's historical activity
|
||||
LEFT JOIN HistoricalPrice hp ON p.pid = hp.pid -- Join the looked-up historical price
|
||||
-- Optimization: Only process products with activity or previous stock
|
||||
WHERE (dh.units_sold_today > 0 OR dh.units_received_today > 0 OR COALESCE(ps.eod_stock_quantity, 0) > 0)
|
||||
|
||||
ON CONFLICT (snapshot_date, pid) DO NOTHING; -- Avoid errors if rerunning parts, but prefer clean runs
|
||||
|
||||
GET DIAGNOSTICS _row_count = ROW_COUNT;
|
||||
RAISE NOTICE 'Processed %: Inserted/Skipped % rows. Duration: %',
|
||||
_current_processing_date,
|
||||
_row_count,
|
||||
clock_timestamp() - _batch_start_time;
|
||||
|
||||
_current_processing_date := _current_processing_date + INTERVAL '1 day';
|
||||
|
||||
END LOOP;
|
||||
|
||||
-- Clean up temporary tables
|
||||
DROP TABLE IF EXISTS temp_product_info;
|
||||
|
||||
RAISE NOTICE 'Finished FINAL historical snapshot backfill.';
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Example usage:
|
||||
-- SELECT backfill_daily_snapshots_range_final('2023-01-01'::date, '2023-12-31'::date);
|
||||
396
inventory-server/scripts/metrics-new/populate-initial-metrics.js
Normal file
396
inventory-server/scripts/metrics-new/populate-initial-metrics.js
Normal file
@@ -0,0 +1,396 @@
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const os = require('os'); // For detecting CPU cores
|
||||
|
||||
// Get the base directory (the directory containing the inventory-server folder)
|
||||
const baseDir = path.resolve(__dirname, '../../..');
|
||||
|
||||
// Load environment variables from the inventory-server directory
|
||||
require('dotenv').config({ path: path.resolve(__dirname, '../..', '.env') });
|
||||
|
||||
// Configure statement timeout (30 minutes)
|
||||
const PG_STATEMENT_TIMEOUT_MS = 1800000;
|
||||
|
||||
// Add error handler for uncaught exceptions
|
||||
process.on('uncaughtException', (error) => {
|
||||
console.error('Uncaught Exception:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// Add error handler for unhandled promise rejections
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// Load progress module
|
||||
const progress = require('./utils/progress');
|
||||
|
||||
// Store progress functions in global scope to ensure availability
|
||||
global.formatElapsedTime = progress.formatElapsedTime;
|
||||
global.estimateRemaining = progress.estimateRemaining;
|
||||
global.calculateRate = progress.calculateRate;
|
||||
global.outputProgress = progress.outputProgress;
|
||||
global.clearProgress = progress.clearProgress;
|
||||
global.getProgress = progress.getProgress;
|
||||
global.logError = progress.logError;
|
||||
|
||||
// Load database module
|
||||
const { getConnection, closePool } = require('./utils/db');
|
||||
|
||||
// Add cancel handler
|
||||
let isCancelled = false;
|
||||
let runningQueryPromise = null;
|
||||
|
||||
function cancelCalculation() {
|
||||
if (!isCancelled) {
|
||||
isCancelled = true;
|
||||
console.log('Calculation has been cancelled by user');
|
||||
|
||||
// Store the query promise to potentially cancel it
|
||||
const queryToCancel = runningQueryPromise;
|
||||
if (queryToCancel) {
|
||||
console.log('Attempting to cancel the running query...');
|
||||
}
|
||||
|
||||
// Force-terminate any query that's been running for more than 5 seconds
|
||||
try {
|
||||
const connection = getConnection();
|
||||
connection.then(async (conn) => {
|
||||
try {
|
||||
// Identify and terminate long-running queries from our application
|
||||
await conn.query(`
|
||||
SELECT pg_cancel_backend(pid)
|
||||
FROM pg_stat_activity
|
||||
WHERE query_start < now() - interval '5 seconds'
|
||||
AND application_name = 'populate_metrics'
|
||||
AND query NOT LIKE '%pg_cancel_backend%'
|
||||
`);
|
||||
|
||||
// Release connection
|
||||
conn.release();
|
||||
} catch (err) {
|
||||
console.error('Error during force cancellation:', err);
|
||||
conn.release();
|
||||
}
|
||||
}).catch(err => {
|
||||
console.error('Could not get connection for cancellation:', err);
|
||||
});
|
||||
} catch (err) {
|
||||
console.error('Failed to terminate running queries:', err);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: 'Calculation has been cancelled'
|
||||
};
|
||||
}
|
||||
|
||||
// Handle SIGTERM signal for cancellation
|
||||
process.on('SIGTERM', cancelCalculation);
|
||||
process.on('SIGINT', cancelCalculation);
|
||||
|
||||
async function populateInitialMetrics() {
|
||||
let connection;
|
||||
const startTime = Date.now();
|
||||
let calculateHistoryId;
|
||||
|
||||
try {
|
||||
// Clean up any previously running calculations
|
||||
connection = await getConnection({
|
||||
// Add performance-related settings
|
||||
application_name: 'populate_metrics',
|
||||
statement_timeout: PG_STATEMENT_TIMEOUT_MS, // 30 min timeout per statement
|
||||
});
|
||||
|
||||
// Ensure the calculate_status table exists and has the correct structure
|
||||
await connection.query(`
|
||||
CREATE TABLE IF NOT EXISTS calculate_status (
|
||||
module_name TEXT PRIMARY KEY,
|
||||
last_calculation_timestamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
`);
|
||||
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
status = 'cancelled',
|
||||
end_time = NOW(),
|
||||
duration_seconds = EXTRACT(EPOCH FROM (NOW() - start_time))::INTEGER,
|
||||
error_message = 'Previous calculation was not completed properly'
|
||||
WHERE status = 'running' AND additional_info->>'type' = 'populate_initial_metrics'
|
||||
`);
|
||||
|
||||
// Create history record for this calculation
|
||||
const historyResult = await connection.query(`
|
||||
INSERT INTO calculate_history (
|
||||
start_time,
|
||||
status,
|
||||
additional_info
|
||||
) VALUES (
|
||||
NOW(),
|
||||
'running',
|
||||
jsonb_build_object(
|
||||
'type', 'populate_initial_metrics',
|
||||
'sql_file', 'populate_initial_product_metrics.sql'
|
||||
)
|
||||
) RETURNING id
|
||||
`);
|
||||
calculateHistoryId = historyResult.rows[0].id;
|
||||
|
||||
// Initialize progress
|
||||
global.outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting initial product metrics population',
|
||||
current: 0,
|
||||
total: 100,
|
||||
elapsed: '0s',
|
||||
remaining: 'Calculating... (this may take a while)',
|
||||
rate: 0,
|
||||
percentage: '0',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
},
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
|
||||
// Prepare the database - analyze tables
|
||||
global.outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Analyzing database tables for better query performance',
|
||||
current: 2,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: 'Analyzing...',
|
||||
rate: 0,
|
||||
percentage: '2',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
},
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
|
||||
// Enable better query planning and parallel operations
|
||||
await connection.query(`
|
||||
-- Analyze tables for better query planning
|
||||
ANALYZE public.products;
|
||||
ANALYZE public.purchase_orders;
|
||||
ANALYZE public.daily_product_snapshots;
|
||||
ANALYZE public.orders;
|
||||
|
||||
-- Enable parallel operations
|
||||
SET LOCAL enable_parallel_append = on;
|
||||
SET LOCAL enable_parallel_hash = on;
|
||||
SET LOCAL max_parallel_workers_per_gather = 4;
|
||||
|
||||
-- Larger work memory for complex sorts/joins
|
||||
SET LOCAL work_mem = '128MB';
|
||||
`).catch(err => {
|
||||
// Non-fatal if analyze fails
|
||||
console.warn('Failed to analyze tables (non-fatal):', err.message);
|
||||
});
|
||||
|
||||
// Execute the SQL query
|
||||
global.outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Executing initial metrics SQL query',
|
||||
current: 5,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: 'Calculating... (this could take several hours with 150M+ records)',
|
||||
rate: 0,
|
||||
percentage: '5',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
},
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
|
||||
// Read the SQL file
|
||||
const sqlFilePath = path.resolve(__dirname, 'populate_initial_product_metrics.sql');
|
||||
console.log('Base directory:', baseDir);
|
||||
console.log('Script directory:', __dirname);
|
||||
console.log('SQL file path:', sqlFilePath);
|
||||
console.log('Current working directory:', process.cwd());
|
||||
|
||||
if (!fs.existsSync(sqlFilePath)) {
|
||||
throw new Error(`SQL file not found at ${sqlFilePath}`);
|
||||
}
|
||||
|
||||
// Read and clean up the SQL (Slightly more robust cleaning)
|
||||
const sqlQuery = fs.readFileSync(sqlFilePath, 'utf8')
|
||||
.replace(/\r\n/g, '\n') // Handle Windows endings
|
||||
.replace(/\r/g, '\n') // Handle old Mac endings
|
||||
.trim(); // Remove leading/trailing whitespace VERY IMPORTANT
|
||||
|
||||
// Log details again AFTER cleaning
|
||||
console.log('SQL Query length (cleaned):', sqlQuery.length);
|
||||
console.log('SQL Query structure validation:');
|
||||
console.log('- Contains DO block:', sqlQuery.includes('DO $$') || sqlQuery.includes('DO $')); // Check both types of tag start
|
||||
console.log('- Contains BEGIN:', sqlQuery.includes('BEGIN'));
|
||||
console.log('- Contains END:', sqlQuery.includes('END $$;') || sqlQuery.includes('END $')); // Check both types of tag end
|
||||
console.log('- First 50 chars:', JSON.stringify(sqlQuery.slice(0, 50)));
|
||||
console.log('- Last 100 chars (cleaned):', JSON.stringify(sqlQuery.slice(-100)));
|
||||
|
||||
// Final check to ensure clean SQL ending
|
||||
if (!sqlQuery.endsWith('END $$;')) {
|
||||
console.warn('WARNING: SQL does not end with "END $$;". This might cause issues.');
|
||||
console.log('Exact ending:', JSON.stringify(sqlQuery.slice(-20)));
|
||||
}
|
||||
|
||||
// Execute the script
|
||||
console.log('Starting initial product metrics population...');
|
||||
|
||||
// Track the query promise for potential cancellation
|
||||
runningQueryPromise = connection.query({
|
||||
text: sqlQuery,
|
||||
rowMode: 'array'
|
||||
});
|
||||
await runningQueryPromise;
|
||||
runningQueryPromise = null;
|
||||
|
||||
// Update progress to 100%
|
||||
global.outputProgress({
|
||||
status: 'complete',
|
||||
operation: 'Initial product metrics population complete',
|
||||
current: 100,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: '0s',
|
||||
rate: 0,
|
||||
percentage: '100',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
},
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
|
||||
// Update history with completion
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1,
|
||||
status = 'completed'
|
||||
WHERE id = $2
|
||||
`, [Math.round((Date.now() - startTime) / 1000), calculateHistoryId]);
|
||||
|
||||
// Clear progress file on successful completion
|
||||
global.clearProgress();
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: 'Initial product metrics population completed successfully',
|
||||
duration: Math.round((Date.now() - startTime) / 1000)
|
||||
};
|
||||
} catch (error) {
|
||||
const endTime = Date.now();
|
||||
const totalElapsedSeconds = Math.round((endTime - startTime) / 1000);
|
||||
|
||||
// Enhanced error logging
|
||||
console.error('Error details:', {
|
||||
message: error.message,
|
||||
code: error.code,
|
||||
hint: error.hint,
|
||||
position: error.position,
|
||||
detail: error.detail,
|
||||
where: error.where ? error.where.substring(0, 500) + '...' : undefined, // Truncate to avoid huge logs
|
||||
severity: error.severity,
|
||||
file: error.file,
|
||||
line: error.line,
|
||||
routine: error.routine
|
||||
});
|
||||
|
||||
// Update history with error
|
||||
if (connection && calculateHistoryId) {
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1,
|
||||
status = $2,
|
||||
error_message = $3
|
||||
WHERE id = $4
|
||||
`, [
|
||||
totalElapsedSeconds,
|
||||
isCancelled ? 'cancelled' : 'failed',
|
||||
error.message,
|
||||
calculateHistoryId
|
||||
]);
|
||||
}
|
||||
|
||||
if (isCancelled) {
|
||||
global.outputProgress({
|
||||
status: 'cancelled',
|
||||
operation: 'Calculation cancelled',
|
||||
current: 50,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: null,
|
||||
rate: 0,
|
||||
percentage: '50',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: totalElapsedSeconds
|
||||
},
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
} else {
|
||||
global.outputProgress({
|
||||
status: 'error',
|
||||
operation: 'Error during initial product metrics population',
|
||||
message: error.message,
|
||||
current: 0,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: null,
|
||||
rate: 0,
|
||||
percentage: '0',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: totalElapsedSeconds
|
||||
},
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
}
|
||||
|
||||
console.error('Error during initial product metrics population:', error);
|
||||
return {
|
||||
success: false,
|
||||
error: error.message,
|
||||
duration: totalElapsedSeconds
|
||||
};
|
||||
} finally {
|
||||
if (connection) {
|
||||
connection.release();
|
||||
}
|
||||
await closePool();
|
||||
}
|
||||
}
|
||||
|
||||
// Start population process
|
||||
populateInitialMetrics()
|
||||
.then(result => {
|
||||
if (result.success) {
|
||||
console.log(`Initial product metrics population completed successfully in ${result.duration} seconds`);
|
||||
process.exit(0);
|
||||
} else {
|
||||
console.error(`Initial product metrics population failed: ${result.error}`);
|
||||
process.exit(1);
|
||||
}
|
||||
})
|
||||
.catch(err => {
|
||||
console.error('Unexpected error:', err);
|
||||
process.exit(1);
|
||||
});
|
||||
@@ -0,0 +1,448 @@
|
||||
-- Description: Performs the first population OR full recalculation of the product_metrics table based on
|
||||
-- historically backfilled daily_product_snapshots and current product/PO data.
|
||||
-- Calculates all metrics considering the full available history up to 'yesterday'.
|
||||
-- Run ONCE after backfill_historical_snapshots_final.sql completes successfully.
|
||||
-- Dependencies: Core import tables (products, purchase_orders), daily_product_snapshots (historically populated),
|
||||
-- configuration tables (settings_*), product_metrics table must exist.
|
||||
-- Frequency: Run ONCE.
|
||||
DO $$
|
||||
DECLARE
|
||||
_module_name VARCHAR := 'product_metrics_population'; -- Generic name
|
||||
_start_time TIMESTAMPTZ := clock_timestamp();
|
||||
-- Calculate metrics AS OF the end of the last fully completed day
|
||||
_calculation_date DATE := CURRENT_DATE - INTERVAL '1 day';
|
||||
BEGIN
|
||||
RAISE NOTICE 'Running % module. Calculating AS OF: %. Start Time: %', _module_name, _calculation_date, _start_time;
|
||||
|
||||
-- Optional: Consider TRUNCATE if you want a completely fresh start,
|
||||
-- otherwise ON CONFLICT will update existing rows if this is rerun.
|
||||
-- TRUNCATE TABLE public.product_metrics;
|
||||
RAISE NOTICE 'Populating product_metrics table. This may take some time...';
|
||||
|
||||
-- CTEs to gather necessary information AS OF _calculation_date
|
||||
WITH CurrentInfo AS (
|
||||
-- Fetches current product details, including costs/prices used for forecasting & fallbacks
|
||||
SELECT
|
||||
p.pid, p.sku, p.title, p.brand, p.vendor, COALESCE(p.image_175, p.image) as image_url,
|
||||
p.visible as is_visible, p.replenishable,
|
||||
COALESCE(p.price, 0.00) as current_price, COALESCE(p.regular_price, 0.00) as current_regular_price,
|
||||
COALESCE(p.cost_price, 0.00) as current_cost_price,
|
||||
COALESCE(p.landing_cost_price, p.cost_price, 0.00) as current_effective_cost, -- Use landing if available, else cost
|
||||
p.stock_quantity as current_stock, -- Use actual current stock for forecast base
|
||||
p.created_at, p.first_received, p.date_last_sold,
|
||||
p.moq,
|
||||
p.uom
|
||||
FROM public.products p
|
||||
),
|
||||
OnOrderInfo AS (
|
||||
-- Calculates current on-order quantities and costs
|
||||
SELECT
|
||||
pid,
|
||||
COALESCE(SUM(ordered - received), 0) AS on_order_qty,
|
||||
COALESCE(SUM((ordered - received) * cost_price), 0.00) AS on_order_cost,
|
||||
MIN(expected_date) AS earliest_expected_date
|
||||
FROM public.purchase_orders
|
||||
-- Use the most common statuses representing active, unfulfilled POs
|
||||
WHERE status IN ('open', 'partially_received', 'ordered', 'preordered', 'receiving_started', 'electronically_sent', 'electronically_ready_send')
|
||||
AND (ordered - received) > 0
|
||||
GROUP BY pid
|
||||
),
|
||||
HistoricalDates AS (
|
||||
-- Determines key historical dates from orders and PO history (receiving_history)
|
||||
SELECT
|
||||
p.pid,
|
||||
MIN(o.date)::date AS date_first_sold,
|
||||
MAX(o.date)::date AS max_order_date, -- Used as fallback for date_last_sold
|
||||
MIN(rh.first_receipt_date) AS date_first_received_calc,
|
||||
MAX(rh.last_receipt_date) AS date_last_received_calc
|
||||
FROM public.products p
|
||||
LEFT JOIN public.orders o ON p.pid = o.pid AND o.quantity > 0 AND o.status NOT IN ('canceled', 'returned')
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
po.pid,
|
||||
MIN((rh.item->>'received_at')::date) as first_receipt_date,
|
||||
MAX((rh.item->>'received_at')::date) as last_receipt_date
|
||||
FROM public.purchase_orders po
|
||||
CROSS JOIN LATERAL jsonb_array_elements(po.receiving_history) AS rh(item)
|
||||
WHERE jsonb_typeof(po.receiving_history) = 'array' AND jsonb_array_length(po.receiving_history) > 0
|
||||
GROUP BY po.pid
|
||||
) rh ON p.pid = rh.pid
|
||||
GROUP BY p.pid
|
||||
),
|
||||
SnapshotAggregates AS (
|
||||
-- Aggregates metrics from historical snapshots up to the _calculation_date
|
||||
SELECT
|
||||
pid,
|
||||
-- Rolling periods relative to _calculation_date
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '6 days' AND _calculation_date THEN units_sold ELSE 0 END) AS sales_7d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '6 days' AND _calculation_date THEN net_revenue ELSE 0 END) AS revenue_7d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '13 days' AND _calculation_date THEN units_sold ELSE 0 END) AS sales_14d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '13 days' AND _calculation_date THEN net_revenue ELSE 0 END) AS revenue_14d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN units_sold ELSE 0 END) AS sales_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN net_revenue ELSE 0 END) AS revenue_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN cogs ELSE 0 END) AS cogs_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN profit ELSE 0 END) AS profit_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN units_returned ELSE 0 END) AS returns_units_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN returns_revenue ELSE 0 END) AS returns_revenue_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN discounts ELSE 0 END) AS discounts_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN gross_revenue ELSE 0 END) AS gross_revenue_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN gross_regular_revenue ELSE 0 END) AS gross_regular_revenue_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date AND stockout_flag THEN 1 ELSE 0 END) AS stockout_days_30d,
|
||||
-- Add 90-day aggregates if needed
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '89 days' AND _calculation_date THEN units_sold ELSE 0 END) AS sales_90d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '89 days' AND _calculation_date THEN net_revenue ELSE 0 END) AS revenue_90d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '89 days' AND _calculation_date THEN cogs ELSE 0 END) AS cogs_90d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '89 days' AND _calculation_date THEN profit ELSE 0 END) AS profit_90d,
|
||||
-- Add 60-day aggregates if needed
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '59 days' AND _calculation_date THEN units_sold ELSE 0 END) AS sales_60d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '59 days' AND _calculation_date THEN net_revenue ELSE 0 END) AS revenue_60d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '59 days' AND _calculation_date THEN cogs ELSE 0 END) AS cogs_60d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '59 days' AND _calculation_date THEN profit ELSE 0 END) AS profit_60d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '364 days' AND _calculation_date THEN units_sold ELSE 0 END) AS sales_365d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '364 days' AND _calculation_date THEN net_revenue ELSE 0 END) AS revenue_365d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN units_received ELSE 0 END) AS received_qty_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN cost_received ELSE 0 END) AS received_cost_30d,
|
||||
|
||||
-- Averages over the last 30 days ending _calculation_date
|
||||
AVG(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN eod_stock_quantity END) AS avg_stock_units_30d,
|
||||
AVG(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN eod_stock_cost END) AS avg_stock_cost_30d,
|
||||
AVG(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN eod_stock_retail END) AS avg_stock_retail_30d,
|
||||
AVG(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN eod_stock_gross END) AS avg_stock_gross_30d,
|
||||
|
||||
-- Lifetime (Sum over ALL available snapshots up to calculation date)
|
||||
SUM(units_sold) AS lifetime_sales,
|
||||
SUM(net_revenue) AS lifetime_revenue,
|
||||
|
||||
-- Yesterday (Sales for the specific _calculation_date)
|
||||
SUM(CASE WHEN snapshot_date = _calculation_date THEN units_sold ELSE 0 END) as yesterday_sales
|
||||
|
||||
FROM public.daily_product_snapshots
|
||||
WHERE snapshot_date <= _calculation_date -- Ensure we only use data up to the calculation point
|
||||
GROUP BY pid
|
||||
),
|
||||
FirstPeriodMetrics AS (
|
||||
-- Calculates sales/revenue for first X days after first sale date
|
||||
-- Uses HistoricalDates CTE to get the first sale date
|
||||
SELECT
|
||||
pid, date_first_sold,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '6 days' THEN units_sold ELSE 0 END) AS first_7_days_sales,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '6 days' THEN net_revenue ELSE 0 END) AS first_7_days_revenue,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '29 days' THEN units_sold ELSE 0 END) AS first_30_days_sales,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '29 days' THEN net_revenue ELSE 0 END) AS first_30_days_revenue,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '59 days' THEN units_sold ELSE 0 END) AS first_60_days_sales,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '59 days' THEN net_revenue ELSE 0 END) AS first_60_days_revenue,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '89 days' THEN units_sold ELSE 0 END) AS first_90_days_sales,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '89 days' THEN net_revenue ELSE 0 END) AS first_90_days_revenue
|
||||
FROM public.daily_product_snapshots ds
|
||||
JOIN HistoricalDates hd USING(pid)
|
||||
WHERE date_first_sold IS NOT NULL
|
||||
AND snapshot_date >= date_first_sold -- Only consider snapshots after first sale
|
||||
AND snapshot_date <= _calculation_date -- Only up to the overall calculation date
|
||||
GROUP BY pid, date_first_sold
|
||||
),
|
||||
Settings AS (
|
||||
-- Fetches effective configuration settings (Product > Vendor > Global)
|
||||
SELECT
|
||||
p.pid,
|
||||
COALESCE(sp.lead_time_days, sv.default_lead_time_days, (SELECT setting_value FROM settings_global WHERE setting_key = 'default_lead_time_days')::int, 14) AS effective_lead_time,
|
||||
COALESCE(sp.days_of_stock, sv.default_days_of_stock, (SELECT setting_value FROM settings_global WHERE setting_key = 'default_days_of_stock')::int, 30) AS effective_days_of_stock,
|
||||
COALESCE(sp.safety_stock, (SELECT setting_value::int FROM settings_global WHERE setting_key = 'default_safety_stock_units'), 0) AS effective_safety_stock,
|
||||
COALESCE(sp.exclude_from_forecast, FALSE) AS exclude_forecast
|
||||
FROM public.products p
|
||||
LEFT JOIN public.settings_product sp ON p.pid = sp.pid
|
||||
LEFT JOIN public.settings_vendor sv ON p.vendor = sv.vendor
|
||||
),
|
||||
AvgLeadTime AS (
|
||||
-- Calculate Average Lead Time from historical POs
|
||||
SELECT
|
||||
pid,
|
||||
AVG(GREATEST(1,
|
||||
CASE
|
||||
WHEN last_received_date IS NOT NULL AND date IS NOT NULL
|
||||
THEN (last_received_date::date - date::date)
|
||||
ELSE 1
|
||||
END
|
||||
))::int AS avg_lead_time_days_calc
|
||||
FROM public.purchase_orders
|
||||
WHERE status = 'received' -- Assumes 'received' marks full receipt
|
||||
AND last_received_date IS NOT NULL
|
||||
AND date IS NOT NULL
|
||||
AND last_received_date >= date
|
||||
GROUP BY pid
|
||||
),
|
||||
RankedForABC AS (
|
||||
-- Ranks products based on the configured ABC metric (using historical data)
|
||||
SELECT
|
||||
p.pid,
|
||||
CASE COALESCE((SELECT setting_value FROM settings_global WHERE setting_key = 'abc_calculation_basis'), 'revenue_30d')
|
||||
WHEN 'sales_30d' THEN COALESCE(sa.sales_30d, 0)
|
||||
WHEN 'lifetime_revenue' THEN COALESCE(sa.lifetime_revenue, 0)::numeric
|
||||
ELSE COALESCE(sa.revenue_30d, 0) -- Default to revenue_30d
|
||||
END AS metric_value
|
||||
FROM public.products p -- Use products as the base
|
||||
JOIN SnapshotAggregates sa ON p.pid = sa.pid
|
||||
WHERE p.replenishable = TRUE -- Only rank replenishable products
|
||||
AND (CASE COALESCE((SELECT setting_value FROM settings_global WHERE setting_key = 'abc_calculation_basis'), 'revenue_30d')
|
||||
WHEN 'sales_30d' THEN COALESCE(sa.sales_30d, 0)
|
||||
WHEN 'lifetime_revenue' THEN COALESCE(sa.lifetime_revenue, 0)::numeric
|
||||
ELSE COALESCE(sa.revenue_30d, 0)
|
||||
END) > 0 -- Exclude zero-value products from ranking
|
||||
),
|
||||
CumulativeABC AS (
|
||||
-- Calculates cumulative metric values for ABC ranking
|
||||
SELECT
|
||||
pid, metric_value,
|
||||
SUM(metric_value) OVER (ORDER BY metric_value DESC NULLS LAST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) as cumulative_metric,
|
||||
SUM(metric_value) OVER () as total_metric
|
||||
FROM RankedForABC
|
||||
),
|
||||
FinalABC AS (
|
||||
-- Assigns A, B, or C class based on thresholds
|
||||
SELECT
|
||||
pid,
|
||||
CASE
|
||||
WHEN cumulative_metric / NULLIF(total_metric, 0) <= COALESCE((SELECT setting_value::numeric FROM settings_global WHERE setting_key = 'abc_revenue_threshold_a'), 0.8) THEN 'A'::char(1)
|
||||
WHEN cumulative_metric / NULLIF(total_metric, 0) <= COALESCE((SELECT setting_value::numeric FROM settings_global WHERE setting_key = 'abc_revenue_threshold_b'), 0.95) THEN 'B'::char(1)
|
||||
ELSE 'C'::char(1)
|
||||
END AS abc_class_calc
|
||||
FROM CumulativeABC
|
||||
)
|
||||
-- Final INSERT/UPDATE statement using all the prepared CTEs
|
||||
INSERT INTO public.product_metrics (
|
||||
pid, last_calculated, sku, title, brand, vendor, image_url, is_visible, is_replenishable,
|
||||
current_price, current_regular_price, current_cost_price, current_landing_cost_price,
|
||||
current_stock, current_stock_cost, current_stock_retail, current_stock_gross,
|
||||
on_order_qty, on_order_cost, on_order_retail, earliest_expected_date,
|
||||
date_created, date_first_received, date_last_received, date_first_sold, date_last_sold, age_days,
|
||||
sales_7d, revenue_7d, sales_14d, revenue_14d, sales_30d, revenue_30d, cogs_30d, profit_30d,
|
||||
sales_60d, revenue_60d, cogs_60d, profit_60d,
|
||||
sales_90d, revenue_90d, cogs_90d, profit_90d,
|
||||
returns_units_30d, returns_revenue_30d, discounts_30d, gross_revenue_30d, gross_regular_revenue_30d,
|
||||
stockout_days_30d, sales_365d, revenue_365d,
|
||||
avg_stock_units_30d, avg_stock_cost_30d, avg_stock_retail_30d, avg_stock_gross_30d,
|
||||
received_qty_30d, received_cost_30d,
|
||||
lifetime_sales, lifetime_revenue,
|
||||
first_7_days_sales, first_7_days_revenue, first_30_days_sales, first_30_days_revenue,
|
||||
first_60_days_sales, first_60_days_revenue, first_90_days_sales, first_90_days_revenue,
|
||||
asp_30d, acp_30d, avg_ros_30d, avg_sales_per_day_30d,
|
||||
margin_30d, markup_30d, gmroi_30d, stockturn_30d, return_rate_30d, discount_rate_30d,
|
||||
stockout_rate_30d, markdown_30d, markdown_rate_30d, sell_through_30d,
|
||||
avg_lead_time_days, abc_class,
|
||||
sales_velocity_daily, config_lead_time, config_days_of_stock, config_safety_stock,
|
||||
planning_period_days, lead_time_forecast_units, days_of_stock_forecast_units,
|
||||
planning_period_forecast_units, lead_time_closing_stock, days_of_stock_closing_stock,
|
||||
replenishment_needed_raw, replenishment_units, replenishment_cost, replenishment_retail, replenishment_profit,
|
||||
to_order_units, forecast_lost_sales_units, forecast_lost_revenue,
|
||||
stock_cover_in_days, po_cover_in_days, sells_out_in_days, replenish_date,
|
||||
overstocked_units, overstocked_cost, overstocked_retail, is_old_stock,
|
||||
yesterday_sales
|
||||
)
|
||||
SELECT
|
||||
-- Select columns in order, joining all CTEs by pid
|
||||
ci.pid, _start_time, ci.sku, ci.title, ci.brand, ci.vendor, ci.image_url, ci.is_visible, ci.replenishable,
|
||||
ci.current_price, ci.current_regular_price, ci.current_cost_price, ci.current_effective_cost,
|
||||
ci.current_stock, (ci.current_stock * COALESCE(ci.current_effective_cost, 0.00))::numeric(12,2), (ci.current_stock * COALESCE(ci.current_price, 0.00))::numeric(12,2), (ci.current_stock * COALESCE(ci.current_regular_price, 0.00))::numeric(12,2),
|
||||
COALESCE(ooi.on_order_qty, 0), COALESCE(ooi.on_order_cost, 0.00)::numeric(12,2), (COALESCE(ooi.on_order_qty, 0) * COALESCE(ci.current_price, 0.00))::numeric(12,2), ooi.earliest_expected_date,
|
||||
|
||||
-- Fix type issue with date calculation - properly cast timestamps to dates before arithmetic
|
||||
ci.created_at::date,
|
||||
COALESCE(ci.first_received::date, hd.date_first_received_calc),
|
||||
hd.date_last_received_calc,
|
||||
hd.date_first_sold,
|
||||
COALESCE(ci.date_last_sold, hd.max_order_date),
|
||||
-- Fix timestamp + integer error by ensuring we work only with dates
|
||||
CASE
|
||||
WHEN LEAST(ci.created_at::date, COALESCE(hd.date_first_sold, ci.created_at::date)) IS NOT NULL
|
||||
THEN (_calculation_date::date - LEAST(ci.created_at::date, COALESCE(hd.date_first_sold, ci.created_at::date)))::int
|
||||
ELSE NULL
|
||||
END,
|
||||
|
||||
COALESCE(sa.sales_7d, 0), COALESCE(sa.revenue_7d, 0), COALESCE(sa.sales_14d, 0), COALESCE(sa.revenue_14d, 0), COALESCE(sa.sales_30d, 0), COALESCE(sa.revenue_30d, 0), COALESCE(sa.cogs_30d, 0), COALESCE(sa.profit_30d, 0),
|
||||
COALESCE(sa.sales_60d, 0), COALESCE(sa.revenue_60d, 0), COALESCE(sa.cogs_60d, 0), COALESCE(sa.profit_60d, 0),
|
||||
COALESCE(sa.sales_90d, 0), COALESCE(sa.revenue_90d, 0), COALESCE(sa.cogs_90d, 0), COALESCE(sa.profit_90d, 0),
|
||||
COALESCE(sa.returns_units_30d, 0), COALESCE(sa.returns_revenue_30d, 0), COALESCE(sa.discounts_30d, 0), COALESCE(sa.gross_revenue_30d, 0), COALESCE(sa.gross_regular_revenue_30d, 0),
|
||||
COALESCE(sa.stockout_days_30d, 0), COALESCE(sa.sales_365d, 0), COALESCE(sa.revenue_365d, 0),
|
||||
sa.avg_stock_units_30d, sa.avg_stock_cost_30d, sa.avg_stock_retail_30d, sa.avg_stock_gross_30d, -- Averages can be NULL if no data
|
||||
COALESCE(sa.received_qty_30d, 0), COALESCE(sa.received_cost_30d, 0),
|
||||
COALESCE(sa.lifetime_sales, 0), COALESCE(sa.lifetime_revenue, 0),
|
||||
fpm.first_7_days_sales, fpm.first_7_days_revenue, fpm.first_30_days_sales, fpm.first_30_days_revenue,
|
||||
fpm.first_60_days_sales, fpm.first_60_days_revenue, fpm.first_90_days_sales, fpm.first_90_days_revenue,
|
||||
|
||||
-- Calculated KPIs (using COALESCE on inputs where appropriate)
|
||||
sa.revenue_30d / NULLIF(sa.sales_30d, 0) AS asp_30d,
|
||||
sa.cogs_30d / NULLIF(sa.sales_30d, 0) AS acp_30d,
|
||||
sa.profit_30d / NULLIF(sa.sales_30d, 0) AS avg_ros_30d,
|
||||
COALESCE(sa.sales_30d, 0) / 30.0 AS avg_sales_per_day_30d,
|
||||
|
||||
-- Fix for percentages - cast to numeric with appropriate precision
|
||||
((sa.profit_30d / NULLIF(sa.revenue_30d, 0)) * 100)::numeric(8,2) AS margin_30d,
|
||||
((sa.profit_30d / NULLIF(sa.cogs_30d, 0)) * 100)::numeric(8,2) AS markup_30d,
|
||||
sa.profit_30d / NULLIF(sa.avg_stock_cost_30d, 0) AS gmroi_30d,
|
||||
sa.sales_30d / NULLIF(sa.avg_stock_units_30d, 0) AS stockturn_30d,
|
||||
((sa.returns_units_30d / NULLIF(COALESCE(sa.sales_30d, 0) + COALESCE(sa.returns_units_30d, 0), 0)) * 100)::numeric(8,2) AS return_rate_30d,
|
||||
((sa.discounts_30d / NULLIF(sa.gross_revenue_30d, 0)) * 100)::numeric(8,2) AS discount_rate_30d,
|
||||
((COALESCE(sa.stockout_days_30d, 0) / 30.0) * 100)::numeric(8,2) AS stockout_rate_30d,
|
||||
GREATEST(0, sa.gross_regular_revenue_30d - sa.gross_revenue_30d) AS markdown_30d, -- Ensure markdown isn't negative
|
||||
((GREATEST(0, sa.gross_regular_revenue_30d - sa.gross_revenue_30d) / NULLIF(sa.gross_regular_revenue_30d, 0)) * 100)::numeric(8,2) AS markdown_rate_30d,
|
||||
-- Sell Through Rate: Sales / (Stock at end of period + Sales). This is one definition proxying for Sales / Beginning Stock.
|
||||
((sa.sales_30d / NULLIF(
|
||||
(SELECT eod_stock_quantity FROM daily_product_snapshots WHERE snapshot_date = _calculation_date AND pid = ci.pid LIMIT 1) + COALESCE(sa.sales_30d, 0)
|
||||
, 0)) * 100)::numeric(8,2) AS sell_through_30d,
|
||||
|
||||
-- Use calculated periodic metrics
|
||||
alt.avg_lead_time_days_calc,
|
||||
CASE
|
||||
WHEN ci.replenishable = FALSE THEN NULL -- Non-replenishable don't get a class
|
||||
ELSE COALESCE(fa.abc_class_calc, 'C') -- Default ranked replenishable but non-contributing to C
|
||||
END,
|
||||
|
||||
-- Forecasting intermediate values (based on historical aggregates ending _calculation_date)
|
||||
(COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) AS sales_velocity_daily, -- Ensure divisor > 0
|
||||
s.effective_lead_time AS config_lead_time, s.effective_days_of_stock AS config_days_of_stock, s.effective_safety_stock AS config_safety_stock,
|
||||
(s.effective_lead_time + s.effective_days_of_stock) AS planning_period_days,
|
||||
-- Calculate raw forecast need components (using safe velocity)
|
||||
(COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time AS lead_time_forecast_units,
|
||||
(COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock AS days_of_stock_forecast_units,
|
||||
-- Planning period forecast units (sum of lead time and DOS units)
|
||||
CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock) AS planning_period_forecast_units,
|
||||
-- Closing stock calculations (using raw forecast components for accuracy before rounding)
|
||||
(ci.current_stock + COALESCE(ooi.on_order_qty, 0) - ((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)) AS lead_time_closing_stock,
|
||||
((ci.current_stock + COALESCE(ooi.on_order_qty, 0) - ((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)))
|
||||
- ((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock) AS days_of_stock_closing_stock,
|
||||
-- Raw replenishment needed
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time) -- Use rounded forecast units
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0) AS replenishment_needed_raw,
|
||||
|
||||
-- Final Forecasting Metrics
|
||||
-- Replenishment Units (calculated need, before MOQ)
|
||||
CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
))::int AS replenishment_units,
|
||||
-- Replenishment Cost/Retail/Profit (based on replenishment_units)
|
||||
(CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
))::int) * COALESCE(ci.current_effective_cost, 0.00)::numeric(12,2) AS replenishment_cost,
|
||||
(CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
))::int) * COALESCE(ci.current_price, 0.00)::numeric(12,2) AS replenishment_retail,
|
||||
(CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
))::int) * (COALESCE(ci.current_price, 0.00) - COALESCE(ci.current_effective_cost, 0.00))::numeric(12,2) AS replenishment_profit,
|
||||
|
||||
-- *** FIX: To Order Units (Apply MOQ rounding) ***
|
||||
CASE
|
||||
WHEN COALESCE(ci.moq, 0) <= 1 THEN -- Treat no/invalid MOQ or MOQ=1 as no rounding needed
|
||||
CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
))::int
|
||||
ELSE -- Apply MOQ rounding: Round UP to nearest multiple of MOQ
|
||||
(CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
) / NULLIF(ci.moq::numeric, 0)) * COALESCE(ci.moq, 1))::int
|
||||
END AS to_order_units,
|
||||
|
||||
-- Forecast Lost Sales (Units occurring during lead time if current+on_order is insufficient)
|
||||
CEILING(GREATEST(0,
|
||||
((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time) -- Demand during lead time
|
||||
- (ci.current_stock + COALESCE(ooi.on_order_qty, 0)) -- Supply available before order arrives
|
||||
))::int AS forecast_lost_sales_units,
|
||||
-- Forecast Lost Revenue
|
||||
(CEILING(GREATEST(0,
|
||||
((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
- (ci.current_stock + COALESCE(ooi.on_order_qty, 0))
|
||||
))::int) * COALESCE(ci.current_price, 0.00)::numeric(12,2) AS forecast_lost_revenue,
|
||||
|
||||
-- Stock Cover etc (using safe velocity)
|
||||
ci.current_stock / NULLIF((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)), 0) AS stock_cover_in_days,
|
||||
COALESCE(ooi.on_order_qty, 0) / NULLIF((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)), 0) AS po_cover_in_days,
|
||||
(ci.current_stock + COALESCE(ooi.on_order_qty, 0)) / NULLIF((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)), 0) AS sells_out_in_days,
|
||||
-- Replenish Date (Project forward from 'today', which is _calculation_date + 1 day)
|
||||
CASE
|
||||
WHEN (COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) > 0 -- Check for positive velocity
|
||||
THEN
|
||||
_calculation_date + INTERVAL '1 day' -- Today
|
||||
+ FLOOR(GREATEST(0, ci.current_stock - s.effective_safety_stock) -- Stock above safety
|
||||
/ (COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) -- divided by velocity
|
||||
)::integer * INTERVAL '1 day' -- Gives date safety stock is hit
|
||||
- s.effective_lead_time * INTERVAL '1 day' -- Subtract lead time
|
||||
ELSE NULL -- Cannot calculate if no sales velocity
|
||||
END AS replenish_date,
|
||||
-- Overstocked Units (Stock above safety + planning period demand)
|
||||
GREATEST(0, ci.current_stock - s.effective_safety_stock -
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time) -- Demand during lead time
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock)) -- Demand during DOS
|
||||
)::int AS overstocked_units,
|
||||
(GREATEST(0, ci.current_stock - s.effective_safety_stock -
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
)::int) * COALESCE(ci.current_effective_cost, 0.00)::numeric(12,2) AS overstocked_cost,
|
||||
(GREATEST(0, ci.current_stock - s.effective_safety_stock -
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
)::int) * COALESCE(ci.current_price, 0.00)::numeric(12,2) AS overstocked_retail,
|
||||
-- Old Stock Flag
|
||||
(ci.created_at::date < (_calculation_date - INTERVAL '60 day')::date) AND
|
||||
(COALESCE(ci.date_last_sold, hd.max_order_date) IS NULL OR COALESCE(ci.date_last_sold, hd.max_order_date) < (_calculation_date - INTERVAL '60 day')::date) AND
|
||||
(hd.date_last_received_calc IS NULL OR hd.date_last_received_calc < (_calculation_date - INTERVAL '60 day')::date) AND
|
||||
COALESCE(ooi.on_order_qty, 0) = 0 AS is_old_stock,
|
||||
COALESCE(sa.yesterday_sales, 0) -- Sales for _calculation_date
|
||||
|
||||
FROM CurrentInfo ci
|
||||
LEFT JOIN OnOrderInfo ooi ON ci.pid = ooi.pid
|
||||
LEFT JOIN HistoricalDates hd ON ci.pid = hd.pid
|
||||
LEFT JOIN SnapshotAggregates sa ON ci.pid = sa.pid
|
||||
LEFT JOIN FirstPeriodMetrics fpm ON ci.pid = fpm.pid
|
||||
LEFT JOIN Settings s ON ci.pid = s.pid
|
||||
LEFT JOIN AvgLeadTime alt ON ci.pid = alt.pid -- Join calculated avg lead time
|
||||
LEFT JOIN FinalABC fa ON ci.pid = fa.pid -- Join calculated ABC class
|
||||
WHERE s.exclude_forecast IS FALSE OR s.exclude_forecast IS NULL
|
||||
|
||||
ON CONFLICT (pid) DO UPDATE SET
|
||||
-- *** IMPORTANT: List ALL columns here, ensuring order matches INSERT list ***
|
||||
-- Update ALL columns to ensure entire row is refreshed
|
||||
last_calculated = EXCLUDED.last_calculated, sku = EXCLUDED.sku, title = EXCLUDED.title, brand = EXCLUDED.brand, vendor = EXCLUDED.vendor, image_url = EXCLUDED.image_url, is_visible = EXCLUDED.is_visible, is_replenishable = EXCLUDED.is_replenishable,
|
||||
current_price = EXCLUDED.current_price, current_regular_price = EXCLUDED.current_regular_price, current_cost_price = EXCLUDED.current_cost_price, current_landing_cost_price = EXCLUDED.current_landing_cost_price,
|
||||
current_stock = EXCLUDED.current_stock, current_stock_cost = EXCLUDED.current_stock_cost, current_stock_retail = EXCLUDED.current_stock_retail, current_stock_gross = EXCLUDED.current_stock_gross,
|
||||
on_order_qty = EXCLUDED.on_order_qty, on_order_cost = EXCLUDED.on_order_cost, on_order_retail = EXCLUDED.on_order_retail, earliest_expected_date = EXCLUDED.earliest_expected_date,
|
||||
date_created = EXCLUDED.date_created, date_first_received = EXCLUDED.date_first_received, date_last_received = EXCLUDED.date_last_received, date_first_sold = EXCLUDED.date_first_sold, date_last_sold = EXCLUDED.date_last_sold, age_days = EXCLUDED.age_days,
|
||||
sales_7d = EXCLUDED.sales_7d, revenue_7d = EXCLUDED.revenue_7d, sales_14d = EXCLUDED.sales_14d, revenue_14d = EXCLUDED.revenue_14d, sales_30d = EXCLUDED.sales_30d, revenue_30d = EXCLUDED.revenue_30d, cogs_30d = EXCLUDED.cogs_30d, profit_30d = EXCLUDED.profit_30d,
|
||||
-- Add 60d/90d columns
|
||||
sales_60d = EXCLUDED.sales_60d, revenue_60d = EXCLUDED.revenue_60d, cogs_60d = EXCLUDED.cogs_60d, profit_60d = EXCLUDED.profit_60d,
|
||||
sales_90d = EXCLUDED.sales_90d, revenue_90d = EXCLUDED.revenue_90d, cogs_90d = EXCLUDED.cogs_90d, profit_90d = EXCLUDED.profit_90d,
|
||||
returns_units_30d = EXCLUDED.returns_units_30d, returns_revenue_30d = EXCLUDED.returns_revenue_30d, discounts_30d = EXCLUDED.discounts_30d, gross_revenue_30d = EXCLUDED.gross_revenue_30d, gross_regular_revenue_30d = EXCLUDED.gross_regular_revenue_30d,
|
||||
stockout_days_30d = EXCLUDED.stockout_days_30d, sales_365d = EXCLUDED.sales_365d, revenue_365d = EXCLUDED.revenue_365d,
|
||||
avg_stock_units_30d = EXCLUDED.avg_stock_units_30d, avg_stock_cost_30d = EXCLUDED.avg_stock_cost_30d, avg_stock_retail_30d = EXCLUDED.avg_stock_retail_30d, avg_stock_gross_30d = EXCLUDED.avg_stock_gross_30d,
|
||||
received_qty_30d = EXCLUDED.received_qty_30d, received_cost_30d = EXCLUDED.received_cost_30d,
|
||||
lifetime_sales = EXCLUDED.lifetime_sales, lifetime_revenue = EXCLUDED.lifetime_revenue,
|
||||
first_7_days_sales = EXCLUDED.first_7_days_sales, first_7_days_revenue = EXCLUDED.first_7_days_revenue, first_30_days_sales = EXCLUDED.first_30_days_sales, first_30_days_revenue = EXCLUDED.first_30_days_revenue,
|
||||
first_60_days_sales = EXCLUDED.first_60_days_sales, first_60_days_revenue = EXCLUDED.first_60_days_revenue, first_90_days_sales = EXCLUDED.first_90_days_sales, first_90_days_revenue = EXCLUDED.first_90_days_revenue,
|
||||
asp_30d = EXCLUDED.asp_30d, acp_30d = EXCLUDED.acp_30d, avg_ros_30d = EXCLUDED.avg_ros_30d, avg_sales_per_day_30d = EXCLUDED.avg_sales_per_day_30d,
|
||||
-- *** REMOVED avg_sales_per_month_30d ***
|
||||
margin_30d = EXCLUDED.margin_30d, markup_30d = EXCLUDED.markup_30d, gmroi_30d = EXCLUDED.gmroi_30d, stockturn_30d = EXCLUDED.stockturn_30d, return_rate_30d = EXCLUDED.return_rate_30d, discount_rate_30d = EXCLUDED.discount_rate_30d,
|
||||
stockout_rate_30d = EXCLUDED.stockout_rate_30d, markdown_30d = EXCLUDED.markdown_30d, markdown_rate_30d = EXCLUDED.markdown_rate_30d, sell_through_30d = EXCLUDED.sell_through_30d,
|
||||
avg_lead_time_days = EXCLUDED.avg_lead_time_days, abc_class = EXCLUDED.abc_class,
|
||||
sales_velocity_daily = EXCLUDED.sales_velocity_daily, config_lead_time = EXCLUDED.config_lead_time, config_days_of_stock = EXCLUDED.config_days_of_stock, config_safety_stock = EXCLUDED.config_safety_stock,
|
||||
planning_period_days = EXCLUDED.planning_period_days, lead_time_forecast_units = EXCLUDED.lead_time_forecast_units, days_of_stock_forecast_units = EXCLUDED.days_of_stock_forecast_units,
|
||||
planning_period_forecast_units = EXCLUDED.planning_period_forecast_units, lead_time_closing_stock = EXCLUDED.lead_time_closing_stock, days_of_stock_closing_stock = EXCLUDED.days_of_stock_closing_stock,
|
||||
replenishment_needed_raw = EXCLUDED.replenishment_needed_raw, replenishment_units = EXCLUDED.replenishment_units, replenishment_cost = EXCLUDED.replenishment_cost, replenishment_retail = EXCLUDED.replenishment_retail, replenishment_profit = EXCLUDED.replenishment_profit,
|
||||
to_order_units = EXCLUDED.to_order_units, -- *** Update to use EXCLUDED ***
|
||||
forecast_lost_sales_units = EXCLUDED.forecast_lost_sales_units, forecast_lost_revenue = EXCLUDED.forecast_lost_revenue,
|
||||
stock_cover_in_days = EXCLUDED.stock_cover_in_days, po_cover_in_days = EXCLUDED.po_cover_in_days, sells_out_in_days = EXCLUDED.sells_out_in_days, replenish_date = EXCLUDED.replenish_date,
|
||||
overstocked_units = EXCLUDED.overstocked_units, overstocked_cost = EXCLUDED.overstocked_cost, overstocked_retail = EXCLUDED.overstocked_retail, is_old_stock = EXCLUDED.is_old_stock,
|
||||
yesterday_sales = EXCLUDED.yesterday_sales;
|
||||
RAISE NOTICE 'Finished % module. Duration: %', _module_name, clock_timestamp() - _start_time;
|
||||
END $$;
|
||||
@@ -1,306 +0,0 @@
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
|
||||
// Change working directory to script directory
|
||||
process.chdir(path.dirname(__filename));
|
||||
|
||||
// Load environment variables
|
||||
require('dotenv').config({ path: path.resolve(__dirname, '../..', '.env') });
|
||||
|
||||
// Add error handler for uncaught exceptions
|
||||
process.on('uncaughtException', (error) => {
|
||||
console.error('Uncaught Exception:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// Add error handler for unhandled promise rejections
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// Load progress module
|
||||
const progress = require('./utils/progress');
|
||||
|
||||
// Store progress functions in global scope to ensure availability
|
||||
global.formatElapsedTime = progress.formatElapsedTime;
|
||||
global.estimateRemaining = progress.estimateRemaining;
|
||||
global.calculateRate = progress.calculateRate;
|
||||
global.outputProgress = progress.outputProgress;
|
||||
global.clearProgress = progress.clearProgress;
|
||||
global.getProgress = progress.getProgress;
|
||||
global.logError = progress.logError;
|
||||
|
||||
// Load database module
|
||||
const { getConnection, closePool } = require('./utils/db');
|
||||
|
||||
// Add cancel handler
|
||||
let isCancelled = false;
|
||||
|
||||
function cancelCalculation() {
|
||||
isCancelled = true;
|
||||
console.log('Calculation has been cancelled by user');
|
||||
|
||||
// Force-terminate any query that's been running for more than 5 seconds
|
||||
try {
|
||||
const connection = getConnection();
|
||||
connection.then(async (conn) => {
|
||||
try {
|
||||
// Identify and terminate long-running queries from our application
|
||||
await conn.query(`
|
||||
SELECT pg_cancel_backend(pid)
|
||||
FROM pg_stat_activity
|
||||
WHERE query_start < now() - interval '5 seconds'
|
||||
AND application_name LIKE '%node%'
|
||||
AND query NOT LIKE '%pg_cancel_backend%'
|
||||
`);
|
||||
|
||||
// Release connection
|
||||
conn.release();
|
||||
} catch (err) {
|
||||
console.error('Error during force cancellation:', err);
|
||||
conn.release();
|
||||
}
|
||||
}).catch(err => {
|
||||
console.error('Could not get connection for cancellation:', err);
|
||||
});
|
||||
} catch (err) {
|
||||
console.error('Failed to terminate running queries:', err);
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: 'Calculation has been cancelled'
|
||||
};
|
||||
}
|
||||
|
||||
// Handle SIGTERM signal for cancellation
|
||||
process.on('SIGTERM', cancelCalculation);
|
||||
|
||||
async function updateDailySnapshots() {
|
||||
let connection;
|
||||
const startTime = Date.now();
|
||||
let calculateHistoryId;
|
||||
|
||||
// Set a maximum execution time (30 minutes)
|
||||
const MAX_EXECUTION_TIME = 30 * 60 * 1000;
|
||||
const timeout = setTimeout(() => {
|
||||
console.error(`Calculation timed out after ${MAX_EXECUTION_TIME/1000} seconds, forcing termination`);
|
||||
// Call cancel and force exit
|
||||
cancelCalculation();
|
||||
process.exit(1);
|
||||
}, MAX_EXECUTION_TIME);
|
||||
|
||||
try {
|
||||
// Read the SQL file
|
||||
const sqlFilePath = path.resolve(__dirname, 'update_daily_snapshots.sql');
|
||||
const sqlQuery = fs.readFileSync(sqlFilePath, 'utf8');
|
||||
|
||||
// Clean up any previously running calculations
|
||||
connection = await getConnection();
|
||||
|
||||
// Ensure the calculate_status table exists and has the correct structure
|
||||
await connection.query(`
|
||||
CREATE TABLE IF NOT EXISTS calculate_status (
|
||||
module_name TEXT PRIMARY KEY,
|
||||
last_calculation_timestamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
`);
|
||||
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
status = 'cancelled',
|
||||
end_time = NOW(),
|
||||
duration_seconds = EXTRACT(EPOCH FROM (NOW() - start_time))::INTEGER,
|
||||
error_message = 'Previous calculation was not completed properly'
|
||||
WHERE status = 'running' AND additional_info->>'type' = 'daily_snapshots'
|
||||
`);
|
||||
|
||||
// Create history record for this calculation
|
||||
const historyResult = await connection.query(`
|
||||
INSERT INTO calculate_history (
|
||||
start_time,
|
||||
status,
|
||||
additional_info
|
||||
) VALUES (
|
||||
NOW(),
|
||||
'running',
|
||||
jsonb_build_object(
|
||||
'type', 'daily_snapshots',
|
||||
'sql_file', 'update_daily_snapshots.sql'
|
||||
)
|
||||
) RETURNING id
|
||||
`);
|
||||
calculateHistoryId = historyResult.rows[0].id;
|
||||
|
||||
// Initialize progress
|
||||
global.outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting daily snapshots calculation',
|
||||
current: 0,
|
||||
total: 100,
|
||||
elapsed: '0s',
|
||||
remaining: 'Calculating...',
|
||||
rate: 0,
|
||||
percentage: '0',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// Execute the SQL query
|
||||
global.outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Executing daily snapshots SQL query',
|
||||
current: 25,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: 'Calculating...',
|
||||
rate: 0,
|
||||
percentage: '25',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
await connection.query(sqlQuery);
|
||||
|
||||
// Update calculate_status table
|
||||
await connection.query(`
|
||||
INSERT INTO calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES ($1, $2)
|
||||
ON CONFLICT (module_name) DO UPDATE
|
||||
SET last_calculation_timestamp = EXCLUDED.last_calculation_timestamp
|
||||
`, ['daily_snapshots', new Date()]);
|
||||
|
||||
// Update progress to 100%
|
||||
global.outputProgress({
|
||||
status: 'complete',
|
||||
operation: 'Daily snapshots calculation complete',
|
||||
current: 100,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: '0s',
|
||||
rate: 0,
|
||||
percentage: '100',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// Update history with completion
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1,
|
||||
status = 'completed'
|
||||
WHERE id = $2
|
||||
`, [Math.round((Date.now() - startTime) / 1000), calculateHistoryId]);
|
||||
|
||||
// Clear progress file on successful completion
|
||||
global.clearProgress();
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: 'Daily snapshots calculation completed successfully',
|
||||
duration: Math.round((Date.now() - startTime) / 1000)
|
||||
};
|
||||
} catch (error) {
|
||||
const endTime = Date.now();
|
||||
const totalElapsedSeconds = Math.round((endTime - startTime) / 1000);
|
||||
|
||||
// Update history with error
|
||||
if (connection && calculateHistoryId) {
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1,
|
||||
status = $2,
|
||||
error_message = $3
|
||||
WHERE id = $4
|
||||
`, [
|
||||
totalElapsedSeconds,
|
||||
isCancelled ? 'cancelled' : 'failed',
|
||||
error.message,
|
||||
calculateHistoryId
|
||||
]);
|
||||
}
|
||||
|
||||
if (isCancelled) {
|
||||
global.outputProgress({
|
||||
status: 'cancelled',
|
||||
operation: 'Calculation cancelled',
|
||||
current: 50,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: null,
|
||||
rate: 0,
|
||||
percentage: '50',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
} else {
|
||||
global.outputProgress({
|
||||
status: 'error',
|
||||
operation: 'Error: ' + error.message,
|
||||
current: 50,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: null,
|
||||
rate: 0,
|
||||
percentage: '50',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
}
|
||||
throw error;
|
||||
} finally {
|
||||
// Clear the timeout to prevent forced termination
|
||||
clearTimeout(timeout);
|
||||
|
||||
// Always release connection
|
||||
if (connection) {
|
||||
try {
|
||||
connection.release();
|
||||
} catch (err) {
|
||||
console.error('Error in final cleanup:', err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Export as a module with all necessary functions
|
||||
module.exports = {
|
||||
updateDailySnapshots,
|
||||
cancelCalculation,
|
||||
getProgress: global.getProgress
|
||||
};
|
||||
|
||||
// Run directly if called from command line
|
||||
if (require.main === module) {
|
||||
updateDailySnapshots().then(() => {
|
||||
closePool().then(() => {
|
||||
process.exit(0);
|
||||
});
|
||||
}).catch(error => {
|
||||
console.error('Error:', error);
|
||||
closePool().then(() => {
|
||||
process.exit(1);
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -1,306 +0,0 @@
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
|
||||
// Change working directory to script directory
|
||||
process.chdir(path.dirname(__filename));
|
||||
|
||||
// Load environment variables
|
||||
require('dotenv').config({ path: path.resolve(__dirname, '../..', '.env') });
|
||||
|
||||
// Add error handler for uncaught exceptions
|
||||
process.on('uncaughtException', (error) => {
|
||||
console.error('Uncaught Exception:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// Add error handler for unhandled promise rejections
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// Load progress module
|
||||
const progress = require('./utils/progress');
|
||||
|
||||
// Store progress functions in global scope to ensure availability
|
||||
global.formatElapsedTime = progress.formatElapsedTime;
|
||||
global.estimateRemaining = progress.estimateRemaining;
|
||||
global.calculateRate = progress.calculateRate;
|
||||
global.outputProgress = progress.outputProgress;
|
||||
global.clearProgress = progress.clearProgress;
|
||||
global.getProgress = progress.getProgress;
|
||||
global.logError = progress.logError;
|
||||
|
||||
// Load database module
|
||||
const { getConnection, closePool } = require('./utils/db');
|
||||
|
||||
// Add cancel handler
|
||||
let isCancelled = false;
|
||||
|
||||
function cancelCalculation() {
|
||||
isCancelled = true;
|
||||
console.log('Calculation has been cancelled by user');
|
||||
|
||||
// Force-terminate any query that's been running for more than 5 seconds
|
||||
try {
|
||||
const connection = getConnection();
|
||||
connection.then(async (conn) => {
|
||||
try {
|
||||
// Identify and terminate long-running queries from our application
|
||||
await conn.query(`
|
||||
SELECT pg_cancel_backend(pid)
|
||||
FROM pg_stat_activity
|
||||
WHERE query_start < now() - interval '5 seconds'
|
||||
AND application_name LIKE '%node%'
|
||||
AND query NOT LIKE '%pg_cancel_backend%'
|
||||
`);
|
||||
|
||||
// Release connection
|
||||
conn.release();
|
||||
} catch (err) {
|
||||
console.error('Error during force cancellation:', err);
|
||||
conn.release();
|
||||
}
|
||||
}).catch(err => {
|
||||
console.error('Could not get connection for cancellation:', err);
|
||||
});
|
||||
} catch (err) {
|
||||
console.error('Failed to terminate running queries:', err);
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: 'Calculation has been cancelled'
|
||||
};
|
||||
}
|
||||
|
||||
// Handle SIGTERM signal for cancellation
|
||||
process.on('SIGTERM', cancelCalculation);
|
||||
|
||||
async function updatePeriodicMetrics() {
|
||||
let connection;
|
||||
const startTime = Date.now();
|
||||
let calculateHistoryId;
|
||||
|
||||
// Set a maximum execution time (30 minutes)
|
||||
const MAX_EXECUTION_TIME = 30 * 60 * 1000;
|
||||
const timeout = setTimeout(() => {
|
||||
console.error(`Calculation timed out after ${MAX_EXECUTION_TIME/1000} seconds, forcing termination`);
|
||||
// Call cancel and force exit
|
||||
cancelCalculation();
|
||||
process.exit(1);
|
||||
}, MAX_EXECUTION_TIME);
|
||||
|
||||
try {
|
||||
// Read the SQL file
|
||||
const sqlFilePath = path.resolve(__dirname, 'update_periodic_metrics.sql');
|
||||
const sqlQuery = fs.readFileSync(sqlFilePath, 'utf8');
|
||||
|
||||
// Clean up any previously running calculations
|
||||
connection = await getConnection();
|
||||
|
||||
// Ensure the calculate_status table exists and has the correct structure
|
||||
await connection.query(`
|
||||
CREATE TABLE IF NOT EXISTS calculate_status (
|
||||
module_name TEXT PRIMARY KEY,
|
||||
last_calculation_timestamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
`);
|
||||
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
status = 'cancelled',
|
||||
end_time = NOW(),
|
||||
duration_seconds = EXTRACT(EPOCH FROM (NOW() - start_time))::INTEGER,
|
||||
error_message = 'Previous calculation was not completed properly'
|
||||
WHERE status = 'running' AND additional_info->>'type' = 'periodic_metrics'
|
||||
`);
|
||||
|
||||
// Create history record for this calculation
|
||||
const historyResult = await connection.query(`
|
||||
INSERT INTO calculate_history (
|
||||
start_time,
|
||||
status,
|
||||
additional_info
|
||||
) VALUES (
|
||||
NOW(),
|
||||
'running',
|
||||
jsonb_build_object(
|
||||
'type', 'periodic_metrics',
|
||||
'sql_file', 'update_periodic_metrics.sql'
|
||||
)
|
||||
) RETURNING id
|
||||
`);
|
||||
calculateHistoryId = historyResult.rows[0].id;
|
||||
|
||||
// Initialize progress
|
||||
global.outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting periodic metrics calculation',
|
||||
current: 0,
|
||||
total: 100,
|
||||
elapsed: '0s',
|
||||
remaining: 'Calculating...',
|
||||
rate: 0,
|
||||
percentage: '0',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// Execute the SQL query
|
||||
global.outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Executing periodic metrics SQL query',
|
||||
current: 25,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: 'Calculating...',
|
||||
rate: 0,
|
||||
percentage: '25',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
await connection.query(sqlQuery);
|
||||
|
||||
// Update calculate_status table
|
||||
await connection.query(`
|
||||
INSERT INTO calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES ($1, $2)
|
||||
ON CONFLICT (module_name) DO UPDATE
|
||||
SET last_calculation_timestamp = EXCLUDED.last_calculation_timestamp
|
||||
`, ['periodic_metrics', new Date()]);
|
||||
|
||||
// Update progress to 100%
|
||||
global.outputProgress({
|
||||
status: 'complete',
|
||||
operation: 'Periodic metrics calculation complete',
|
||||
current: 100,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: '0s',
|
||||
rate: 0,
|
||||
percentage: '100',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// Update history with completion
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1,
|
||||
status = 'completed'
|
||||
WHERE id = $2
|
||||
`, [Math.round((Date.now() - startTime) / 1000), calculateHistoryId]);
|
||||
|
||||
// Clear progress file on successful completion
|
||||
global.clearProgress();
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: 'Periodic metrics calculation completed successfully',
|
||||
duration: Math.round((Date.now() - startTime) / 1000)
|
||||
};
|
||||
} catch (error) {
|
||||
const endTime = Date.now();
|
||||
const totalElapsedSeconds = Math.round((endTime - startTime) / 1000);
|
||||
|
||||
// Update history with error
|
||||
if (connection && calculateHistoryId) {
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1,
|
||||
status = $2,
|
||||
error_message = $3
|
||||
WHERE id = $4
|
||||
`, [
|
||||
totalElapsedSeconds,
|
||||
isCancelled ? 'cancelled' : 'failed',
|
||||
error.message,
|
||||
calculateHistoryId
|
||||
]);
|
||||
}
|
||||
|
||||
if (isCancelled) {
|
||||
global.outputProgress({
|
||||
status: 'cancelled',
|
||||
operation: 'Calculation cancelled',
|
||||
current: 50,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: null,
|
||||
rate: 0,
|
||||
percentage: '50',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
} else {
|
||||
global.outputProgress({
|
||||
status: 'error',
|
||||
operation: 'Error: ' + error.message,
|
||||
current: 50,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: null,
|
||||
rate: 0,
|
||||
percentage: '50',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
}
|
||||
throw error;
|
||||
} finally {
|
||||
// Clear the timeout to prevent forced termination
|
||||
clearTimeout(timeout);
|
||||
|
||||
// Always release connection
|
||||
if (connection) {
|
||||
try {
|
||||
connection.release();
|
||||
} catch (err) {
|
||||
console.error('Error in final cleanup:', err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Export as a module with all necessary functions
|
||||
module.exports = {
|
||||
updatePeriodicMetrics,
|
||||
cancelCalculation,
|
||||
getProgress: global.getProgress
|
||||
};
|
||||
|
||||
// Run directly if called from command line
|
||||
if (require.main === module) {
|
||||
updatePeriodicMetrics().then(() => {
|
||||
closePool().then(() => {
|
||||
process.exit(0);
|
||||
});
|
||||
}).catch(error => {
|
||||
console.error('Error:', error);
|
||||
closePool().then(() => {
|
||||
process.exit(1);
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -1,306 +0,0 @@
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
|
||||
// Change working directory to script directory
|
||||
process.chdir(path.dirname(__filename));
|
||||
|
||||
// Load environment variables
|
||||
require('dotenv').config({ path: path.resolve(__dirname, '../..', '.env') });
|
||||
|
||||
// Add error handler for uncaught exceptions
|
||||
process.on('uncaughtException', (error) => {
|
||||
console.error('Uncaught Exception:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// Add error handler for unhandled promise rejections
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// Load progress module
|
||||
const progress = require('./utils/progress');
|
||||
|
||||
// Store progress functions in global scope to ensure availability
|
||||
global.formatElapsedTime = progress.formatElapsedTime;
|
||||
global.estimateRemaining = progress.estimateRemaining;
|
||||
global.calculateRate = progress.calculateRate;
|
||||
global.outputProgress = progress.outputProgress;
|
||||
global.clearProgress = progress.clearProgress;
|
||||
global.getProgress = progress.getProgress;
|
||||
global.logError = progress.logError;
|
||||
|
||||
// Load database module
|
||||
const { getConnection, closePool } = require('./utils/db');
|
||||
|
||||
// Add cancel handler
|
||||
let isCancelled = false;
|
||||
|
||||
function cancelCalculation() {
|
||||
isCancelled = true;
|
||||
console.log('Calculation has been cancelled by user');
|
||||
|
||||
// Force-terminate any query that's been running for more than 5 seconds
|
||||
try {
|
||||
const connection = getConnection();
|
||||
connection.then(async (conn) => {
|
||||
try {
|
||||
// Identify and terminate long-running queries from our application
|
||||
await conn.query(`
|
||||
SELECT pg_cancel_backend(pid)
|
||||
FROM pg_stat_activity
|
||||
WHERE query_start < now() - interval '5 seconds'
|
||||
AND application_name LIKE '%node%'
|
||||
AND query NOT LIKE '%pg_cancel_backend%'
|
||||
`);
|
||||
|
||||
// Release connection
|
||||
conn.release();
|
||||
} catch (err) {
|
||||
console.error('Error during force cancellation:', err);
|
||||
conn.release();
|
||||
}
|
||||
}).catch(err => {
|
||||
console.error('Could not get connection for cancellation:', err);
|
||||
});
|
||||
} catch (err) {
|
||||
console.error('Failed to terminate running queries:', err);
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: 'Calculation has been cancelled'
|
||||
};
|
||||
}
|
||||
|
||||
// Handle SIGTERM signal for cancellation
|
||||
process.on('SIGTERM', cancelCalculation);
|
||||
|
||||
async function updateProductMetrics() {
|
||||
let connection;
|
||||
const startTime = Date.now();
|
||||
let calculateHistoryId;
|
||||
|
||||
// Set a maximum execution time (30 minutes)
|
||||
const MAX_EXECUTION_TIME = 30 * 60 * 1000;
|
||||
const timeout = setTimeout(() => {
|
||||
console.error(`Calculation timed out after ${MAX_EXECUTION_TIME/1000} seconds, forcing termination`);
|
||||
// Call cancel and force exit
|
||||
cancelCalculation();
|
||||
process.exit(1);
|
||||
}, MAX_EXECUTION_TIME);
|
||||
|
||||
try {
|
||||
// Read the SQL file
|
||||
const sqlFilePath = path.resolve(__dirname, 'update_product_metrics.sql');
|
||||
const sqlQuery = fs.readFileSync(sqlFilePath, 'utf8');
|
||||
|
||||
// Clean up any previously running calculations
|
||||
connection = await getConnection();
|
||||
|
||||
// Ensure the calculate_status table exists and has the correct structure
|
||||
await connection.query(`
|
||||
CREATE TABLE IF NOT EXISTS calculate_status (
|
||||
module_name TEXT PRIMARY KEY,
|
||||
last_calculation_timestamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
`);
|
||||
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
status = 'cancelled',
|
||||
end_time = NOW(),
|
||||
duration_seconds = EXTRACT(EPOCH FROM (NOW() - start_time))::INTEGER,
|
||||
error_message = 'Previous calculation was not completed properly'
|
||||
WHERE status = 'running' AND additional_info->>'type' = 'product_metrics'
|
||||
`);
|
||||
|
||||
// Create history record for this calculation
|
||||
const historyResult = await connection.query(`
|
||||
INSERT INTO calculate_history (
|
||||
start_time,
|
||||
status,
|
||||
additional_info
|
||||
) VALUES (
|
||||
NOW(),
|
||||
'running',
|
||||
jsonb_build_object(
|
||||
'type', 'product_metrics',
|
||||
'sql_file', 'update_product_metrics.sql'
|
||||
)
|
||||
) RETURNING id
|
||||
`);
|
||||
calculateHistoryId = historyResult.rows[0].id;
|
||||
|
||||
// Initialize progress
|
||||
global.outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting product metrics calculation',
|
||||
current: 0,
|
||||
total: 100,
|
||||
elapsed: '0s',
|
||||
remaining: 'Calculating...',
|
||||
rate: 0,
|
||||
percentage: '0',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// Execute the SQL query
|
||||
global.outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Executing product metrics SQL query',
|
||||
current: 25,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: 'Calculating...',
|
||||
rate: 0,
|
||||
percentage: '25',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
await connection.query(sqlQuery);
|
||||
|
||||
// Update calculate_status table
|
||||
await connection.query(`
|
||||
INSERT INTO calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES ($1, $2)
|
||||
ON CONFLICT (module_name) DO UPDATE
|
||||
SET last_calculation_timestamp = EXCLUDED.last_calculation_timestamp
|
||||
`, ['product_metrics', new Date()]);
|
||||
|
||||
// Update progress to 100%
|
||||
global.outputProgress({
|
||||
status: 'complete',
|
||||
operation: 'Product metrics calculation complete',
|
||||
current: 100,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: '0s',
|
||||
rate: 0,
|
||||
percentage: '100',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// Update history with completion
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1,
|
||||
status = 'completed'
|
||||
WHERE id = $2
|
||||
`, [Math.round((Date.now() - startTime) / 1000), calculateHistoryId]);
|
||||
|
||||
// Clear progress file on successful completion
|
||||
global.clearProgress();
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: 'Product metrics calculation completed successfully',
|
||||
duration: Math.round((Date.now() - startTime) / 1000)
|
||||
};
|
||||
} catch (error) {
|
||||
const endTime = Date.now();
|
||||
const totalElapsedSeconds = Math.round((endTime - startTime) / 1000);
|
||||
|
||||
// Update history with error
|
||||
if (connection && calculateHistoryId) {
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1,
|
||||
status = $2,
|
||||
error_message = $3
|
||||
WHERE id = $4
|
||||
`, [
|
||||
totalElapsedSeconds,
|
||||
isCancelled ? 'cancelled' : 'failed',
|
||||
error.message,
|
||||
calculateHistoryId
|
||||
]);
|
||||
}
|
||||
|
||||
if (isCancelled) {
|
||||
global.outputProgress({
|
||||
status: 'cancelled',
|
||||
operation: 'Calculation cancelled',
|
||||
current: 50,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: null,
|
||||
rate: 0,
|
||||
percentage: '50',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
} else {
|
||||
global.outputProgress({
|
||||
status: 'error',
|
||||
operation: 'Error: ' + error.message,
|
||||
current: 50,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: null,
|
||||
rate: 0,
|
||||
percentage: '50',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
}
|
||||
throw error;
|
||||
} finally {
|
||||
// Clear the timeout to prevent forced termination
|
||||
clearTimeout(timeout);
|
||||
|
||||
// Always release connection
|
||||
if (connection) {
|
||||
try {
|
||||
connection.release();
|
||||
} catch (err) {
|
||||
console.error('Error in final cleanup:', err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Export as a module with all necessary functions
|
||||
module.exports = {
|
||||
updateProductMetrics,
|
||||
cancelCalculation,
|
||||
getProgress: global.getProgress
|
||||
};
|
||||
|
||||
// Run directly if called from command line
|
||||
if (require.main === module) {
|
||||
updateProductMetrics().then(() => {
|
||||
closePool().then(() => {
|
||||
process.exit(0);
|
||||
});
|
||||
}).catch(error => {
|
||||
console.error('Error:', error);
|
||||
closePool().then(() => {
|
||||
process.exit(1);
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -35,18 +35,59 @@ BEGIN
|
||||
FROM public.products p -- Start from products to include those with no orders today
|
||||
LEFT JOIN public.orders o
|
||||
ON p.pid = o.pid
|
||||
AND o.date >= _target_date -- Filter orders for the target date
|
||||
AND o.date < _target_date + INTERVAL '1 day'
|
||||
AND o.date::date = _target_date -- Cast to date to ensure compatibility regardless of original type
|
||||
GROUP BY p.pid, p.sku
|
||||
),
|
||||
ReceivingData AS (
|
||||
SELECT
|
||||
po.pid,
|
||||
COALESCE(SUM((rh.item->>'qty')::numeric), 0) AS units_received,
|
||||
COALESCE(SUM((rh.item->>'qty')::numeric * COALESCE((rh.item->>'cost')::numeric, po.cost_price)), 0.00) AS cost_received
|
||||
-- Prioritize the actual table fields over the JSON data
|
||||
COALESCE(
|
||||
-- First try the received field from purchase_orders table
|
||||
SUM(CASE WHEN po.date::date = _target_date THEN po.received ELSE 0 END),
|
||||
|
||||
-- Otherwise fall back to the receiving_history JSON as secondary source
|
||||
SUM(
|
||||
CASE
|
||||
WHEN (rh.item->>'date')::date = _target_date THEN (rh.item->>'qty')::numeric
|
||||
WHEN (rh.item->>'received_at')::date = _target_date THEN (rh.item->>'qty')::numeric
|
||||
WHEN (rh.item->>'receipt_date')::date = _target_date THEN (rh.item->>'qty')::numeric
|
||||
ELSE 0
|
||||
END
|
||||
),
|
||||
0
|
||||
) AS units_received,
|
||||
|
||||
COALESCE(
|
||||
-- First try the actual cost_price from purchase_orders
|
||||
SUM(CASE WHEN po.date::date = _target_date THEN po.received * po.cost_price ELSE 0 END),
|
||||
|
||||
-- Otherwise fall back to receiving_history JSON
|
||||
SUM(
|
||||
CASE
|
||||
WHEN (rh.item->>'date')::date = _target_date THEN (rh.item->>'qty')::numeric
|
||||
WHEN (rh.item->>'received_at')::date = _target_date THEN (rh.item->>'qty')::numeric
|
||||
WHEN (rh.item->>'receipt_date')::date = _target_date THEN (rh.item->>'qty')::numeric
|
||||
ELSE 0
|
||||
END
|
||||
* COALESCE((rh.item->>'cost')::numeric, po.cost_price)
|
||||
),
|
||||
0.00
|
||||
) AS cost_received
|
||||
FROM public.purchase_orders po
|
||||
CROSS JOIN LATERAL jsonb_array_elements(po.receiving_history) AS rh(item)
|
||||
WHERE (rh.item->>'received_at')::date = _target_date -- Filter receipts for the target date
|
||||
LEFT JOIN LATERAL jsonb_array_elements(po.receiving_history) AS rh(item) ON
|
||||
jsonb_typeof(po.receiving_history) = 'array' AND
|
||||
jsonb_array_length(po.receiving_history) > 0 AND
|
||||
(
|
||||
(rh.item->>'date')::date = _target_date OR
|
||||
(rh.item->>'received_at')::date = _target_date OR
|
||||
(rh.item->>'receipt_date')::date = _target_date
|
||||
)
|
||||
-- Include POs with the current date or relevant receiving_history
|
||||
WHERE
|
||||
po.date::date = _target_date OR
|
||||
jsonb_typeof(po.receiving_history) = 'array' AND
|
||||
jsonb_array_length(po.receiving_history) > 0
|
||||
GROUP BY po.pid
|
||||
),
|
||||
CurrentStock AS (
|
||||
|
||||
@@ -58,15 +58,41 @@ BEGIN
|
||||
p.pid,
|
||||
MIN(o.date)::date AS date_first_sold,
|
||||
MAX(o.date)::date AS max_order_date, -- Use MAX for potential recalc of date_last_sold
|
||||
MIN(rh.first_receipt_date) AS date_first_received_calc,
|
||||
MAX(rh.last_receipt_date) AS date_last_received_calc
|
||||
|
||||
-- For first received date, try table data first then fall back to JSON
|
||||
COALESCE(
|
||||
MIN(po.date)::date, -- Try purchase_order date first
|
||||
MIN(rh.first_receipt_date) -- Fall back to JSON data if needed
|
||||
) AS date_first_received_calc,
|
||||
|
||||
-- If we only have one receipt date (first = last), use that for last_received too
|
||||
COALESCE(
|
||||
MAX(po.date)::date, -- Try purchase_order date first
|
||||
NULLIF(MAX(rh.last_receipt_date), NULL),
|
||||
MIN(rh.first_receipt_date)
|
||||
) AS date_last_received_calc
|
||||
FROM public.products p
|
||||
LEFT JOIN public.orders o ON p.pid = o.pid AND o.quantity > 0 AND o.status NOT IN ('canceled', 'returned')
|
||||
LEFT JOIN public.purchase_orders po ON p.pid = po.pid AND po.received > 0
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
po.pid,
|
||||
MIN((rh.item->>'received_at')::date) as first_receipt_date,
|
||||
MAX((rh.item->>'received_at')::date) as last_receipt_date
|
||||
MIN(
|
||||
CASE
|
||||
WHEN rh.item->>'date' IS NOT NULL THEN (rh.item->>'date')::date
|
||||
WHEN rh.item->>'received_at' IS NOT NULL THEN (rh.item->>'received_at')::date
|
||||
WHEN rh.item->>'receipt_date' IS NOT NULL THEN (rh.item->>'receipt_date')::date
|
||||
ELSE NULL
|
||||
END
|
||||
) as first_receipt_date,
|
||||
MAX(
|
||||
CASE
|
||||
WHEN rh.item->>'date' IS NOT NULL THEN (rh.item->>'date')::date
|
||||
WHEN rh.item->>'received_at' IS NOT NULL THEN (rh.item->>'received_at')::date
|
||||
WHEN rh.item->>'receipt_date' IS NOT NULL THEN (rh.item->>'receipt_date')::date
|
||||
ELSE NULL
|
||||
END
|
||||
) as last_receipt_date
|
||||
FROM public.purchase_orders po
|
||||
CROSS JOIN LATERAL jsonb_array_elements(po.receiving_history) AS rh(item)
|
||||
WHERE jsonb_typeof(po.receiving_history) = 'array' AND jsonb_array_length(po.receiving_history) > 0
|
||||
@@ -77,7 +103,14 @@ BEGIN
|
||||
SnapshotAggregates AS (
|
||||
SELECT
|
||||
pid,
|
||||
-- Rolling periods (ensure dates are inclusive/exclusive as needed)
|
||||
-- Get the counts of all available data
|
||||
COUNT(DISTINCT snapshot_date) AS available_days,
|
||||
|
||||
-- Rolling periods with no time constraint - just sum everything we have
|
||||
SUM(units_sold) AS total_units_sold,
|
||||
SUM(net_revenue) AS total_net_revenue,
|
||||
|
||||
-- Specific time windows if we have enough data
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '6 days' THEN units_sold ELSE 0 END) AS sales_7d,
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '6 days' THEN net_revenue ELSE 0 END) AS revenue_7d,
|
||||
SUM(CASE WHEN snapshot_date >= _current_date - INTERVAL '13 days' THEN units_sold ELSE 0 END) AS sales_14d,
|
||||
@@ -103,7 +136,7 @@ BEGIN
|
||||
AVG(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' THEN eod_stock_retail END) AS avg_stock_retail_30d,
|
||||
AVG(CASE WHEN snapshot_date >= _current_date - INTERVAL '29 days' THEN eod_stock_gross END) AS avg_stock_gross_30d,
|
||||
|
||||
-- Lifetime
|
||||
-- Lifetime - should match total values above
|
||||
SUM(units_sold) AS lifetime_sales,
|
||||
SUM(net_revenue) AS lifetime_revenue,
|
||||
|
||||
@@ -111,8 +144,6 @@ BEGIN
|
||||
SUM(CASE WHEN snapshot_date = _current_date - INTERVAL '1 day' THEN units_sold ELSE 0 END) as yesterday_sales
|
||||
|
||||
FROM public.daily_product_snapshots
|
||||
WHERE snapshot_date <= _current_date -- Include today's snapshot
|
||||
AND snapshot_date >= _current_date - INTERVAL '365 days' -- Limit history scan slightly
|
||||
GROUP BY pid
|
||||
),
|
||||
FirstPeriodMetrics AS (
|
||||
@@ -191,7 +222,9 @@ BEGIN
|
||||
sa.stockout_days_30d, sa.sales_365d, sa.revenue_365d,
|
||||
sa.avg_stock_units_30d, sa.avg_stock_cost_30d, sa.avg_stock_retail_30d, sa.avg_stock_gross_30d,
|
||||
sa.received_qty_30d, sa.received_cost_30d,
|
||||
sa.lifetime_sales, sa.lifetime_revenue,
|
||||
-- Use total counts for lifetime values to ensure we have data even with limited history
|
||||
COALESCE(sa.total_units_sold, sa.lifetime_sales) AS lifetime_sales,
|
||||
COALESCE(sa.total_net_revenue, sa.lifetime_revenue) AS lifetime_revenue,
|
||||
fpm.first_7_days_sales, fpm.first_7_days_revenue, fpm.first_30_days_sales, fpm.first_30_days_revenue,
|
||||
fpm.first_60_days_sales, fpm.first_60_days_revenue, fpm.first_90_days_sales, fpm.first_90_days_revenue,
|
||||
|
||||
|
||||
@@ -1,321 +0,0 @@
|
||||
const { outputProgress, formatElapsedTime, estimateRemaining, calculateRate, logError } = require('./utils/progress');
|
||||
const { getConnection } = require('./utils/db');
|
||||
|
||||
async function calculateBrandMetrics(startTime, totalProducts, processedCount = 0, isCancelled = false) {
|
||||
const connection = await getConnection();
|
||||
let success = false;
|
||||
let processedOrders = 0;
|
||||
|
||||
try {
|
||||
if (isCancelled) {
|
||||
outputProgress({
|
||||
status: 'cancelled',
|
||||
operation: 'Brand metrics calculation cancelled',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: null,
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders: 0,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
}
|
||||
|
||||
// Get order count that will be processed
|
||||
const orderCount = await connection.query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM orders o
|
||||
WHERE o.canceled = false
|
||||
`);
|
||||
processedOrders = parseInt(orderCount.rows[0].count);
|
||||
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting brand metrics calculation',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// Calculate brand metrics with optimized queries
|
||||
await connection.query(`
|
||||
INSERT INTO brand_metrics (
|
||||
brand,
|
||||
product_count,
|
||||
active_products,
|
||||
total_stock_units,
|
||||
total_stock_cost,
|
||||
total_stock_retail,
|
||||
total_revenue,
|
||||
avg_margin,
|
||||
growth_rate
|
||||
)
|
||||
WITH filtered_products AS (
|
||||
SELECT
|
||||
p.*,
|
||||
CASE
|
||||
WHEN p.stock_quantity <= 5000 AND p.stock_quantity >= 0
|
||||
THEN p.pid
|
||||
END as valid_pid,
|
||||
CASE
|
||||
WHEN p.visible = true
|
||||
AND p.stock_quantity <= 5000
|
||||
AND p.stock_quantity >= 0
|
||||
THEN p.pid
|
||||
END as active_pid,
|
||||
CASE
|
||||
WHEN p.stock_quantity IS NULL
|
||||
OR p.stock_quantity < 0
|
||||
OR p.stock_quantity > 5000
|
||||
THEN 0
|
||||
ELSE p.stock_quantity
|
||||
END as valid_stock
|
||||
FROM products p
|
||||
WHERE p.brand IS NOT NULL
|
||||
),
|
||||
sales_periods AS (
|
||||
SELECT
|
||||
p.brand,
|
||||
SUM(o.quantity * (o.price - COALESCE(o.discount, 0))) as period_revenue,
|
||||
SUM(o.quantity * (o.price - COALESCE(o.discount, 0) - p.cost_price)) as period_margin,
|
||||
COUNT(DISTINCT DATE(o.date)) as period_days,
|
||||
CASE
|
||||
WHEN o.date >= CURRENT_DATE - INTERVAL '3 months' THEN 'current'
|
||||
WHEN o.date BETWEEN CURRENT_DATE - INTERVAL '15 months'
|
||||
AND CURRENT_DATE - INTERVAL '12 months' THEN 'previous'
|
||||
END as period_type
|
||||
FROM filtered_products p
|
||||
JOIN orders o ON p.pid = o.pid
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '15 months'
|
||||
GROUP BY p.brand, period_type
|
||||
),
|
||||
brand_data AS (
|
||||
SELECT
|
||||
p.brand,
|
||||
COUNT(DISTINCT p.valid_pid) as product_count,
|
||||
COUNT(DISTINCT p.active_pid) as active_products,
|
||||
SUM(p.valid_stock) as total_stock_units,
|
||||
SUM(p.valid_stock * p.cost_price) as total_stock_cost,
|
||||
SUM(p.valid_stock * p.price) as total_stock_retail,
|
||||
COALESCE(SUM(o.quantity * (o.price - COALESCE(o.discount, 0))), 0) as total_revenue,
|
||||
CASE
|
||||
WHEN SUM(o.quantity * o.price) > 0
|
||||
THEN GREATEST(
|
||||
-100.0,
|
||||
LEAST(
|
||||
100.0,
|
||||
(
|
||||
SUM(o.quantity * o.price) - -- Use gross revenue (before discounts)
|
||||
SUM(o.quantity * COALESCE(p.cost_price, 0)) -- Total costs
|
||||
) * 100.0 /
|
||||
NULLIF(SUM(o.quantity * o.price), 0) -- Divide by gross revenue
|
||||
)
|
||||
)
|
||||
ELSE 0
|
||||
END as avg_margin
|
||||
FROM filtered_products p
|
||||
LEFT JOIN orders o ON p.pid = o.pid AND o.canceled = false
|
||||
GROUP BY p.brand
|
||||
)
|
||||
SELECT
|
||||
bd.brand,
|
||||
bd.product_count,
|
||||
bd.active_products,
|
||||
bd.total_stock_units,
|
||||
bd.total_stock_cost,
|
||||
bd.total_stock_retail,
|
||||
bd.total_revenue,
|
||||
bd.avg_margin,
|
||||
CASE
|
||||
WHEN MAX(CASE WHEN sp.period_type = 'previous' THEN sp.period_revenue END) = 0
|
||||
AND MAX(CASE WHEN sp.period_type = 'current' THEN sp.period_revenue END) > 0
|
||||
THEN 100.0
|
||||
WHEN MAX(CASE WHEN sp.period_type = 'previous' THEN sp.period_revenue END) = 0
|
||||
THEN 0.0
|
||||
ELSE GREATEST(
|
||||
-100.0,
|
||||
LEAST(
|
||||
((MAX(CASE WHEN sp.period_type = 'current' THEN sp.period_revenue END) -
|
||||
MAX(CASE WHEN sp.period_type = 'previous' THEN sp.period_revenue END)) /
|
||||
NULLIF(ABS(MAX(CASE WHEN sp.period_type = 'previous' THEN sp.period_revenue END)), 0)) * 100.0,
|
||||
999.99
|
||||
)
|
||||
)
|
||||
END as growth_rate
|
||||
FROM brand_data bd
|
||||
LEFT JOIN sales_periods sp ON bd.brand = sp.brand
|
||||
GROUP BY bd.brand, bd.product_count, bd.active_products, bd.total_stock_units,
|
||||
bd.total_stock_cost, bd.total_stock_retail, bd.total_revenue, bd.avg_margin
|
||||
ON CONFLICT (brand) DO UPDATE
|
||||
SET
|
||||
product_count = EXCLUDED.product_count,
|
||||
active_products = EXCLUDED.active_products,
|
||||
total_stock_units = EXCLUDED.total_stock_units,
|
||||
total_stock_cost = EXCLUDED.total_stock_cost,
|
||||
total_stock_retail = EXCLUDED.total_stock_retail,
|
||||
total_revenue = EXCLUDED.total_revenue,
|
||||
avg_margin = EXCLUDED.avg_margin,
|
||||
growth_rate = EXCLUDED.growth_rate,
|
||||
last_calculated_at = CURRENT_TIMESTAMP
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.97);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Brand metrics calculated, starting time-based metrics',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
if (isCancelled) return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
|
||||
// Calculate brand time-based metrics with optimized query
|
||||
await connection.query(`
|
||||
INSERT INTO brand_time_metrics (
|
||||
brand,
|
||||
year,
|
||||
month,
|
||||
product_count,
|
||||
active_products,
|
||||
total_stock_units,
|
||||
total_stock_cost,
|
||||
total_stock_retail,
|
||||
total_revenue,
|
||||
avg_margin
|
||||
)
|
||||
WITH filtered_products AS (
|
||||
SELECT
|
||||
p.*,
|
||||
CASE WHEN p.stock_quantity <= 5000 THEN p.pid END as valid_pid,
|
||||
CASE WHEN p.visible = true AND p.stock_quantity <= 5000 THEN p.pid END as active_pid,
|
||||
CASE
|
||||
WHEN p.stock_quantity IS NULL OR p.stock_quantity < 0 OR p.stock_quantity > 5000 THEN 0
|
||||
ELSE p.stock_quantity
|
||||
END as valid_stock
|
||||
FROM products p
|
||||
WHERE p.brand IS NOT NULL
|
||||
),
|
||||
monthly_metrics AS (
|
||||
SELECT
|
||||
p.brand,
|
||||
EXTRACT(YEAR FROM o.date::timestamp with time zone) as year,
|
||||
EXTRACT(MONTH FROM o.date::timestamp with time zone) as month,
|
||||
COUNT(DISTINCT p.valid_pid) as product_count,
|
||||
COUNT(DISTINCT p.active_pid) as active_products,
|
||||
SUM(p.valid_stock) as total_stock_units,
|
||||
SUM(p.valid_stock * p.cost_price) as total_stock_cost,
|
||||
SUM(p.valid_stock * p.price) as total_stock_retail,
|
||||
SUM(o.quantity * o.price) as total_revenue,
|
||||
CASE
|
||||
WHEN SUM(o.quantity * o.price) > 0
|
||||
THEN GREATEST(
|
||||
-100.0,
|
||||
LEAST(
|
||||
100.0,
|
||||
(
|
||||
SUM(o.quantity * o.price) - -- Use gross revenue (before discounts)
|
||||
SUM(o.quantity * COALESCE(p.cost_price, 0)) -- Total costs
|
||||
) * 100.0 /
|
||||
NULLIF(SUM(o.quantity * o.price), 0) -- Divide by gross revenue
|
||||
)
|
||||
)
|
||||
ELSE 0
|
||||
END as avg_margin
|
||||
FROM filtered_products p
|
||||
LEFT JOIN orders o ON p.pid = o.pid AND o.canceled = false
|
||||
WHERE o.date >= CURRENT_DATE - INTERVAL '12 months'
|
||||
GROUP BY p.brand, EXTRACT(YEAR FROM o.date::timestamp with time zone), EXTRACT(MONTH FROM o.date::timestamp with time zone)
|
||||
)
|
||||
SELECT *
|
||||
FROM monthly_metrics
|
||||
ON CONFLICT (brand, year, month) DO UPDATE
|
||||
SET
|
||||
product_count = EXCLUDED.product_count,
|
||||
active_products = EXCLUDED.active_products,
|
||||
total_stock_units = EXCLUDED.total_stock_units,
|
||||
total_stock_cost = EXCLUDED.total_stock_cost,
|
||||
total_stock_retail = EXCLUDED.total_stock_retail,
|
||||
total_revenue = EXCLUDED.total_revenue,
|
||||
avg_margin = EXCLUDED.avg_margin
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.99);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Brand time-based metrics calculated',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// If we get here, everything completed successfully
|
||||
success = true;
|
||||
|
||||
// Update calculate_status
|
||||
await connection.query(`
|
||||
INSERT INTO calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES ('brand_metrics', NOW())
|
||||
ON CONFLICT (module_name) DO UPDATE
|
||||
SET last_calculation_timestamp = NOW()
|
||||
`);
|
||||
|
||||
return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
success = false;
|
||||
logError(error, 'Error calculating brand metrics');
|
||||
throw error;
|
||||
} finally {
|
||||
if (connection) {
|
||||
connection.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = calculateBrandMetrics;
|
||||
@@ -1,554 +0,0 @@
|
||||
const { outputProgress, formatElapsedTime, estimateRemaining, calculateRate, logError } = require('./utils/progress');
|
||||
const { getConnection } = require('./utils/db');
|
||||
|
||||
async function calculateCategoryMetrics(startTime, totalProducts, processedCount = 0, isCancelled = false) {
|
||||
const connection = await getConnection();
|
||||
let success = false;
|
||||
let processedOrders = 0;
|
||||
|
||||
try {
|
||||
if (isCancelled) {
|
||||
outputProgress({
|
||||
status: 'cancelled',
|
||||
operation: 'Category metrics calculation cancelled',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: null,
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders: 0,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
}
|
||||
|
||||
// Get order count that will be processed
|
||||
const orderCount = await connection.query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM orders o
|
||||
WHERE o.canceled = false
|
||||
`);
|
||||
processedOrders = parseInt(orderCount.rows[0].count);
|
||||
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting category metrics calculation',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// First, calculate base category metrics
|
||||
await connection.query(`
|
||||
INSERT INTO category_metrics (
|
||||
category_id,
|
||||
product_count,
|
||||
active_products,
|
||||
total_value,
|
||||
status,
|
||||
last_calculated_at
|
||||
)
|
||||
SELECT
|
||||
c.cat_id,
|
||||
COUNT(DISTINCT p.pid) as product_count,
|
||||
COUNT(DISTINCT CASE WHEN p.visible = true THEN p.pid END) as active_products,
|
||||
COALESCE(SUM(p.stock_quantity * p.cost_price), 0) as total_value,
|
||||
c.status,
|
||||
NOW() as last_calculated_at
|
||||
FROM categories c
|
||||
LEFT JOIN product_categories pc ON c.cat_id = pc.cat_id
|
||||
LEFT JOIN products p ON pc.pid = p.pid
|
||||
GROUP BY c.cat_id, c.status
|
||||
ON CONFLICT (category_id) DO UPDATE
|
||||
SET
|
||||
product_count = EXCLUDED.product_count,
|
||||
active_products = EXCLUDED.active_products,
|
||||
total_value = EXCLUDED.total_value,
|
||||
status = EXCLUDED.status,
|
||||
last_calculated_at = EXCLUDED.last_calculated_at
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.90);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Base category metrics calculated, updating with margin data',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
if (isCancelled) return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
|
||||
// Then update with margin and turnover data
|
||||
await connection.query(`
|
||||
WITH category_sales AS (
|
||||
SELECT
|
||||
pc.cat_id,
|
||||
SUM(o.quantity * o.price) as total_sales,
|
||||
SUM(o.quantity * (o.price - p.cost_price)) as total_margin,
|
||||
SUM(o.quantity) as units_sold,
|
||||
AVG(GREATEST(p.stock_quantity, 0)) as avg_stock,
|
||||
COUNT(DISTINCT DATE(o.date)) as active_days
|
||||
FROM product_categories pc
|
||||
JOIN products p ON pc.pid = p.pid
|
||||
JOIN orders o ON p.pid = o.pid
|
||||
LEFT JOIN turnover_config tc ON
|
||||
(tc.category_id = pc.cat_id AND tc.vendor = p.vendor) OR
|
||||
(tc.category_id = pc.cat_id AND tc.vendor IS NULL) OR
|
||||
(tc.category_id IS NULL AND tc.vendor = p.vendor) OR
|
||||
(tc.category_id IS NULL AND tc.vendor IS NULL)
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= CURRENT_DATE - (COALESCE(tc.calculation_period_days, 30) || ' days')::INTERVAL
|
||||
GROUP BY pc.cat_id
|
||||
)
|
||||
UPDATE category_metrics
|
||||
SET
|
||||
avg_margin = COALESCE(cs.total_margin * 100.0 / NULLIF(cs.total_sales, 0), 0),
|
||||
turnover_rate = CASE
|
||||
WHEN cs.avg_stock > 0 AND cs.active_days > 0
|
||||
THEN LEAST(
|
||||
(cs.units_sold / cs.avg_stock) * (365.0 / cs.active_days),
|
||||
999.99
|
||||
)
|
||||
ELSE 0
|
||||
END,
|
||||
last_calculated_at = NOW()
|
||||
FROM category_sales cs
|
||||
WHERE category_id = cs.cat_id
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.95);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Margin data updated, calculating growth rates',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
if (isCancelled) return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
|
||||
// Finally update growth rates
|
||||
await connection.query(`
|
||||
WITH current_period AS (
|
||||
SELECT
|
||||
pc.cat_id,
|
||||
SUM(o.quantity * (o.price - COALESCE(o.discount, 0)) /
|
||||
(1 + COALESCE(ss.seasonality_factor, 0))) as revenue,
|
||||
SUM(o.quantity * (o.price - COALESCE(o.discount, 0) - p.cost_price)) as gross_profit,
|
||||
COUNT(DISTINCT DATE(o.date)) as days
|
||||
FROM product_categories pc
|
||||
JOIN products p ON pc.pid = p.pid
|
||||
JOIN orders o ON p.pid = o.pid
|
||||
LEFT JOIN sales_seasonality ss ON EXTRACT(MONTH FROM o.date) = ss.month
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '3 months'
|
||||
GROUP BY pc.cat_id
|
||||
),
|
||||
previous_period AS (
|
||||
SELECT
|
||||
pc.cat_id,
|
||||
SUM(o.quantity * (o.price - COALESCE(o.discount, 0)) /
|
||||
(1 + COALESCE(ss.seasonality_factor, 0))) as revenue,
|
||||
COUNT(DISTINCT DATE(o.date)) as days
|
||||
FROM product_categories pc
|
||||
JOIN products p ON pc.pid = p.pid
|
||||
JOIN orders o ON p.pid = o.pid
|
||||
LEFT JOIN sales_seasonality ss ON EXTRACT(MONTH FROM o.date) = ss.month
|
||||
WHERE o.canceled = false
|
||||
AND o.date BETWEEN CURRENT_DATE - INTERVAL '15 months'
|
||||
AND CURRENT_DATE - INTERVAL '12 months'
|
||||
GROUP BY pc.cat_id
|
||||
),
|
||||
trend_data AS (
|
||||
SELECT
|
||||
pc.cat_id,
|
||||
EXTRACT(MONTH FROM o.date) as month,
|
||||
SUM(o.quantity * (o.price - COALESCE(o.discount, 0)) /
|
||||
(1 + COALESCE(ss.seasonality_factor, 0))) as revenue,
|
||||
COUNT(DISTINCT DATE(o.date)) as days_in_month
|
||||
FROM product_categories pc
|
||||
JOIN products p ON pc.pid = p.pid
|
||||
JOIN orders o ON p.pid = o.pid
|
||||
LEFT JOIN sales_seasonality ss ON EXTRACT(MONTH FROM o.date) = ss.month
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '15 months'
|
||||
GROUP BY pc.cat_id, EXTRACT(MONTH FROM o.date)
|
||||
),
|
||||
trend_stats AS (
|
||||
SELECT
|
||||
cat_id,
|
||||
COUNT(*) as n,
|
||||
AVG(month) as avg_x,
|
||||
AVG(revenue / NULLIF(days_in_month, 0)) as avg_y,
|
||||
SUM(month * (revenue / NULLIF(days_in_month, 0))) as sum_xy,
|
||||
SUM(month * month) as sum_xx
|
||||
FROM trend_data
|
||||
GROUP BY cat_id
|
||||
HAVING COUNT(*) >= 6
|
||||
),
|
||||
trend_analysis AS (
|
||||
SELECT
|
||||
cat_id,
|
||||
((n * sum_xy) - (avg_x * n * avg_y)) /
|
||||
NULLIF((n * sum_xx) - (n * avg_x * avg_x), 0) as trend_slope,
|
||||
avg_y as avg_daily_revenue
|
||||
FROM trend_stats
|
||||
),
|
||||
margin_calc AS (
|
||||
SELECT
|
||||
pc.cat_id,
|
||||
CASE
|
||||
WHEN SUM(o.quantity * o.price) > 0 THEN
|
||||
GREATEST(
|
||||
-100.0,
|
||||
LEAST(
|
||||
100.0,
|
||||
(
|
||||
SUM(o.quantity * o.price) - -- Use gross revenue (before discounts)
|
||||
SUM(o.quantity * COALESCE(p.cost_price, 0)) -- Total costs
|
||||
) * 100.0 /
|
||||
NULLIF(SUM(o.quantity * o.price), 0) -- Divide by gross revenue
|
||||
)
|
||||
)
|
||||
ELSE NULL
|
||||
END as avg_margin
|
||||
FROM product_categories pc
|
||||
JOIN products p ON pc.pid = p.pid
|
||||
JOIN orders o ON p.pid = o.pid
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '3 months'
|
||||
GROUP BY pc.cat_id
|
||||
),
|
||||
combined_metrics AS (
|
||||
SELECT
|
||||
COALESCE(cp.cat_id, pp.cat_id) as category_id,
|
||||
CASE
|
||||
WHEN pp.revenue = 0 AND COALESCE(cp.revenue, 0) > 0 THEN 100.0
|
||||
WHEN pp.revenue = 0 OR cp.revenue IS NULL THEN 0.0
|
||||
WHEN ta.trend_slope IS NOT NULL THEN
|
||||
GREATEST(
|
||||
-100.0,
|
||||
LEAST(
|
||||
(ta.trend_slope / NULLIF(ta.avg_daily_revenue, 0)) * 365 * 100,
|
||||
999.99
|
||||
)
|
||||
)
|
||||
ELSE
|
||||
GREATEST(
|
||||
-100.0,
|
||||
LEAST(
|
||||
((COALESCE(cp.revenue, 0) - pp.revenue) /
|
||||
NULLIF(ABS(pp.revenue), 0)) * 100.0,
|
||||
999.99
|
||||
)
|
||||
)
|
||||
END as growth_rate,
|
||||
mc.avg_margin
|
||||
FROM current_period cp
|
||||
FULL OUTER JOIN previous_period pp ON cp.cat_id = pp.cat_id
|
||||
LEFT JOIN trend_analysis ta ON COALESCE(cp.cat_id, pp.cat_id) = ta.cat_id
|
||||
LEFT JOIN margin_calc mc ON COALESCE(cp.cat_id, pp.cat_id) = mc.cat_id
|
||||
)
|
||||
UPDATE category_metrics cm
|
||||
SET
|
||||
growth_rate = CASE
|
||||
WHEN pp.revenue = 0 AND COALESCE(cp.revenue, 0) > 0 THEN 100.0
|
||||
WHEN pp.revenue = 0 OR cp.revenue IS NULL THEN 0.0
|
||||
WHEN ta.trend_slope IS NOT NULL THEN
|
||||
GREATEST(
|
||||
-100.0,
|
||||
LEAST(
|
||||
(ta.trend_slope / NULLIF(ta.avg_daily_revenue, 0)) * 365 * 100,
|
||||
999.99
|
||||
)
|
||||
)
|
||||
ELSE
|
||||
GREATEST(
|
||||
-100.0,
|
||||
LEAST(
|
||||
((COALESCE(cp.revenue, 0) - pp.revenue) /
|
||||
NULLIF(ABS(pp.revenue), 0)) * 100.0,
|
||||
999.99
|
||||
)
|
||||
)
|
||||
END,
|
||||
avg_margin = COALESCE(mc.avg_margin, cm.avg_margin),
|
||||
last_calculated_at = NOW()
|
||||
FROM current_period cp
|
||||
FULL OUTER JOIN previous_period pp ON cp.cat_id = pp.cat_id
|
||||
LEFT JOIN trend_analysis ta ON COALESCE(cp.cat_id, pp.cat_id) = ta.cat_id
|
||||
LEFT JOIN margin_calc mc ON COALESCE(cp.cat_id, pp.cat_id) = mc.cat_id
|
||||
WHERE cm.category_id = COALESCE(cp.cat_id, pp.cat_id)
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.97);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Growth rates calculated, updating time-based metrics',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
if (isCancelled) return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
|
||||
// Calculate time-based metrics
|
||||
await connection.query(`
|
||||
INSERT INTO category_time_metrics (
|
||||
category_id,
|
||||
year,
|
||||
month,
|
||||
product_count,
|
||||
active_products,
|
||||
total_value,
|
||||
total_revenue,
|
||||
avg_margin,
|
||||
turnover_rate
|
||||
)
|
||||
SELECT
|
||||
pc.cat_id,
|
||||
EXTRACT(YEAR FROM o.date::timestamp with time zone) as year,
|
||||
EXTRACT(MONTH FROM o.date::timestamp with time zone) as month,
|
||||
COUNT(DISTINCT p.pid) as product_count,
|
||||
COUNT(DISTINCT CASE WHEN p.visible = true THEN p.pid END) as active_products,
|
||||
SUM(p.stock_quantity * p.cost_price) as total_value,
|
||||
SUM(o.quantity * o.price) as total_revenue,
|
||||
CASE
|
||||
WHEN SUM(o.quantity * o.price) > 0 THEN
|
||||
LEAST(
|
||||
GREATEST(
|
||||
SUM(o.quantity * (o.price - GREATEST(p.cost_price, 0))) * 100.0 /
|
||||
SUM(o.quantity * o.price),
|
||||
-100
|
||||
),
|
||||
100
|
||||
)
|
||||
ELSE 0
|
||||
END as avg_margin,
|
||||
COALESCE(
|
||||
LEAST(
|
||||
SUM(o.quantity) / NULLIF(AVG(GREATEST(p.stock_quantity, 0)), 0),
|
||||
999.99
|
||||
),
|
||||
0
|
||||
) as turnover_rate
|
||||
FROM product_categories pc
|
||||
JOIN products p ON pc.pid = p.pid
|
||||
JOIN orders o ON p.pid = o.pid
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '12 months'
|
||||
GROUP BY pc.cat_id, EXTRACT(YEAR FROM o.date::timestamp with time zone), EXTRACT(MONTH FROM o.date::timestamp with time zone)
|
||||
ON CONFLICT (category_id, year, month) DO UPDATE
|
||||
SET
|
||||
product_count = EXCLUDED.product_count,
|
||||
active_products = EXCLUDED.active_products,
|
||||
total_value = EXCLUDED.total_value,
|
||||
total_revenue = EXCLUDED.total_revenue,
|
||||
avg_margin = EXCLUDED.avg_margin,
|
||||
turnover_rate = EXCLUDED.turnover_rate
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.99);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Time-based metrics calculated, updating category-sales metrics',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
if (isCancelled) return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
|
||||
// Calculate category-sales metrics
|
||||
await connection.query(`
|
||||
INSERT INTO category_sales_metrics (
|
||||
category_id,
|
||||
brand,
|
||||
period_start,
|
||||
period_end,
|
||||
avg_daily_sales,
|
||||
total_sold,
|
||||
num_products,
|
||||
avg_price,
|
||||
last_calculated_at
|
||||
)
|
||||
WITH date_ranges AS (
|
||||
SELECT
|
||||
CURRENT_DATE - INTERVAL '30 days' as period_start,
|
||||
CURRENT_DATE as period_end
|
||||
UNION ALL
|
||||
SELECT
|
||||
CURRENT_DATE - INTERVAL '90 days',
|
||||
CURRENT_DATE - INTERVAL '31 days'
|
||||
UNION ALL
|
||||
SELECT
|
||||
CURRENT_DATE - INTERVAL '180 days',
|
||||
CURRENT_DATE - INTERVAL '91 days'
|
||||
UNION ALL
|
||||
SELECT
|
||||
CURRENT_DATE - INTERVAL '365 days',
|
||||
CURRENT_DATE - INTERVAL '181 days'
|
||||
),
|
||||
sales_data AS (
|
||||
SELECT
|
||||
pc.cat_id,
|
||||
COALESCE(p.brand, 'Unknown') as brand,
|
||||
dr.period_start,
|
||||
dr.period_end,
|
||||
COUNT(DISTINCT p.pid) as num_products,
|
||||
SUM(o.quantity) as total_sold,
|
||||
SUM(o.quantity * o.price) as total_revenue,
|
||||
COUNT(DISTINCT DATE(o.date)) as num_days
|
||||
FROM products p
|
||||
JOIN product_categories pc ON p.pid = pc.pid
|
||||
JOIN orders o ON p.pid = o.pid
|
||||
CROSS JOIN date_ranges dr
|
||||
WHERE o.canceled = false
|
||||
AND o.date BETWEEN dr.period_start AND dr.period_end
|
||||
GROUP BY pc.cat_id, p.brand, dr.period_start, dr.period_end
|
||||
)
|
||||
SELECT
|
||||
cat_id as category_id,
|
||||
brand,
|
||||
period_start,
|
||||
period_end,
|
||||
CASE
|
||||
WHEN num_days > 0
|
||||
THEN total_sold / num_days
|
||||
ELSE 0
|
||||
END as avg_daily_sales,
|
||||
total_sold,
|
||||
num_products,
|
||||
CASE
|
||||
WHEN total_sold > 0
|
||||
THEN total_revenue / total_sold
|
||||
ELSE 0
|
||||
END as avg_price,
|
||||
NOW() as last_calculated_at
|
||||
FROM sales_data
|
||||
ON CONFLICT (category_id, brand, period_start, period_end) DO UPDATE
|
||||
SET
|
||||
avg_daily_sales = EXCLUDED.avg_daily_sales,
|
||||
total_sold = EXCLUDED.total_sold,
|
||||
num_products = EXCLUDED.num_products,
|
||||
avg_price = EXCLUDED.avg_price,
|
||||
last_calculated_at = EXCLUDED.last_calculated_at
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 1.0);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Category-sales metrics calculated',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// If we get here, everything completed successfully
|
||||
success = true;
|
||||
|
||||
// Update calculate_status
|
||||
await connection.query(`
|
||||
INSERT INTO calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES ('category_metrics', NOW())
|
||||
ON CONFLICT (module_name) DO UPDATE
|
||||
SET last_calculation_timestamp = NOW()
|
||||
`);
|
||||
|
||||
return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
success = false;
|
||||
logError(error, 'Error calculating category metrics');
|
||||
throw error;
|
||||
} finally {
|
||||
if (connection) {
|
||||
connection.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = calculateCategoryMetrics;
|
||||
@@ -1,214 +0,0 @@
|
||||
const { outputProgress, formatElapsedTime, estimateRemaining, calculateRate, logError } = require('./utils/progress');
|
||||
const { getConnection } = require('./utils/db');
|
||||
|
||||
async function calculateFinancialMetrics(startTime, totalProducts, processedCount = 0, isCancelled = false) {
|
||||
const connection = await getConnection();
|
||||
let success = false;
|
||||
let processedOrders = 0;
|
||||
|
||||
try {
|
||||
if (isCancelled) {
|
||||
outputProgress({
|
||||
status: 'cancelled',
|
||||
operation: 'Financial metrics calculation cancelled',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: null,
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders: 0,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
}
|
||||
|
||||
// Get order count that will be processed
|
||||
const orderCount = await connection.query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM orders o
|
||||
WHERE o.canceled = false
|
||||
AND DATE(o.date) >= CURRENT_DATE - INTERVAL '12 months'
|
||||
`);
|
||||
processedOrders = parseInt(orderCount.rows[0].count);
|
||||
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting financial metrics calculation',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// First, calculate beginning inventory values (12 months ago)
|
||||
await connection.query(`
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS temp_beginning_inventory AS
|
||||
WITH beginning_inventory_calc AS (
|
||||
SELECT
|
||||
p.pid,
|
||||
p.stock_quantity as current_quantity,
|
||||
COALESCE(SUM(o.quantity), 0) as sold_quantity,
|
||||
COALESCE(SUM(po.received), 0) as received_quantity,
|
||||
GREATEST(0, (p.stock_quantity + COALESCE(SUM(o.quantity), 0) - COALESCE(SUM(po.received), 0))) as beginning_quantity,
|
||||
p.cost_price
|
||||
FROM
|
||||
products p
|
||||
LEFT JOIN
|
||||
orders o ON p.pid = o.pid
|
||||
AND o.canceled = false
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '12 months'::interval
|
||||
LEFT JOIN
|
||||
purchase_orders po ON p.pid = po.pid
|
||||
AND po.received_date IS NOT NULL
|
||||
AND po.received_date >= CURRENT_DATE - INTERVAL '12 months'::interval
|
||||
GROUP BY
|
||||
p.pid, p.stock_quantity, p.cost_price
|
||||
)
|
||||
SELECT
|
||||
pid,
|
||||
beginning_quantity,
|
||||
beginning_quantity * cost_price as beginning_value,
|
||||
current_quantity * cost_price as current_value,
|
||||
((beginning_quantity * cost_price) + (current_quantity * cost_price)) / 2 as average_inventory_value
|
||||
FROM
|
||||
beginning_inventory_calc
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.60);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Beginning inventory values calculated, computing financial metrics',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// Calculate financial metrics with optimized query and standard formulas
|
||||
await connection.query(`
|
||||
WITH product_financials AS (
|
||||
SELECT
|
||||
p.pid,
|
||||
COALESCE(bi.average_inventory_value, p.cost_price * p.stock_quantity) as avg_inventory_value,
|
||||
p.cost_price * p.stock_quantity as current_inventory_value,
|
||||
SUM(o.quantity * (o.price - COALESCE(o.discount, 0))) as total_revenue,
|
||||
SUM(o.quantity * COALESCE(o.costeach, 0)) as cost_of_goods_sold,
|
||||
SUM(o.quantity * (o.price - COALESCE(o.discount, 0) - COALESCE(o.costeach, 0))) as gross_profit,
|
||||
MIN(o.date) as first_sale_date,
|
||||
MAX(o.date) as last_sale_date,
|
||||
EXTRACT(DAY FROM (MAX(o.date)::timestamp with time zone - MIN(o.date)::timestamp with time zone)) + 1 as calculation_period_days,
|
||||
COUNT(DISTINCT DATE(o.date)) as active_days
|
||||
FROM products p
|
||||
LEFT JOIN orders o ON p.pid = o.pid
|
||||
LEFT JOIN temp_beginning_inventory bi ON p.pid = bi.pid
|
||||
WHERE o.canceled = false
|
||||
AND DATE(o.date) >= CURRENT_DATE - INTERVAL '12 months'::interval
|
||||
GROUP BY p.pid, p.cost_price, p.stock_quantity, bi.average_inventory_value
|
||||
)
|
||||
UPDATE product_metrics pm
|
||||
SET
|
||||
inventory_value = COALESCE(pf.current_inventory_value, 0)::decimal(10,3),
|
||||
total_revenue = COALESCE(pf.total_revenue, 0)::decimal(10,3),
|
||||
cost_of_goods_sold = COALESCE(pf.cost_of_goods_sold, 0)::decimal(10,3),
|
||||
gross_profit = COALESCE(pf.gross_profit, 0)::decimal(10,3),
|
||||
turnover_rate = CASE
|
||||
WHEN COALESCE(pf.avg_inventory_value, 0) > 0 THEN
|
||||
COALESCE(pf.cost_of_goods_sold, 0) / NULLIF(pf.avg_inventory_value, 0)
|
||||
ELSE 0
|
||||
END::decimal(12,3),
|
||||
gmroi = CASE
|
||||
WHEN COALESCE(pf.avg_inventory_value, 0) > 0 THEN
|
||||
COALESCE(pf.gross_profit, 0) / NULLIF(pf.avg_inventory_value, 0)
|
||||
ELSE 0
|
||||
END::decimal(10,3),
|
||||
last_calculated_at = CURRENT_TIMESTAMP
|
||||
FROM product_financials pf
|
||||
WHERE pm.pid = pf.pid
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.65);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Base financial metrics calculated, updating time aggregates',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
if (isCancelled) return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
|
||||
// Clean up temporary tables
|
||||
await connection.query('DROP TABLE IF EXISTS temp_beginning_inventory');
|
||||
|
||||
// If we get here, everything completed successfully
|
||||
success = true;
|
||||
|
||||
// Update calculate_status
|
||||
await connection.query(`
|
||||
INSERT INTO calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES ('financial_metrics', NOW())
|
||||
ON CONFLICT (module_name) DO UPDATE
|
||||
SET last_calculation_timestamp = NOW()
|
||||
`);
|
||||
|
||||
return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
success = false;
|
||||
logError(error, 'Error calculating financial metrics');
|
||||
throw error;
|
||||
} finally {
|
||||
if (connection) {
|
||||
try {
|
||||
// Make sure temporary tables are always cleaned up
|
||||
await connection.query('DROP TABLE IF EXISTS temp_beginning_inventory');
|
||||
} catch (err) {
|
||||
console.error('Error cleaning up temp tables:', err);
|
||||
}
|
||||
connection.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = calculateFinancialMetrics;
|
||||
@@ -1,736 +0,0 @@
|
||||
const { outputProgress, formatElapsedTime, estimateRemaining, calculateRate, logError } = require('./utils/progress');
|
||||
const { getConnection } = require('./utils/db');
|
||||
|
||||
// Helper function to handle NaN and undefined values
|
||||
function sanitizeValue(value) {
|
||||
if (value === undefined || value === null || Number.isNaN(value)) {
|
||||
return null;
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
async function calculateProductMetrics(startTime, totalProducts, processedCount = 0, isCancelled = false) {
|
||||
let connection;
|
||||
let success = false;
|
||||
let processedOrders = 0;
|
||||
const BATCH_SIZE = 5000;
|
||||
|
||||
try {
|
||||
connection = await getConnection();
|
||||
// Skip flags are inherited from the parent scope
|
||||
const SKIP_PRODUCT_BASE_METRICS = 0;
|
||||
const SKIP_PRODUCT_TIME_AGGREGATES = 0;
|
||||
|
||||
// Get total product count if not provided
|
||||
if (!totalProducts) {
|
||||
const productCount = await connection.query('SELECT COUNT(*) as count FROM products');
|
||||
totalProducts = parseInt(productCount.rows[0].count);
|
||||
}
|
||||
|
||||
if (isCancelled) {
|
||||
outputProgress({
|
||||
status: 'cancelled',
|
||||
operation: 'Product metrics calculation cancelled',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: null,
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
}
|
||||
|
||||
// First ensure all products have a metrics record
|
||||
await connection.query(`
|
||||
INSERT INTO product_metrics (pid, last_calculated_at)
|
||||
SELECT pid, NOW()
|
||||
FROM products
|
||||
ON CONFLICT (pid) DO NOTHING
|
||||
`);
|
||||
|
||||
// Get threshold settings once
|
||||
const thresholds = await connection.query(`
|
||||
SELECT critical_days, reorder_days, overstock_days, low_stock_threshold
|
||||
FROM stock_thresholds
|
||||
WHERE category_id IS NULL AND vendor IS NULL
|
||||
LIMIT 1
|
||||
`);
|
||||
|
||||
// Check if threshold data was returned
|
||||
if (!thresholds.rows || thresholds.rows.length === 0) {
|
||||
console.warn('No default thresholds found in the database. Using explicit type casting in the query.');
|
||||
}
|
||||
|
||||
const defaultThresholds = thresholds.rows[0];
|
||||
|
||||
// Get financial calculation configuration parameters
|
||||
const financialConfig = await connection.query(`
|
||||
SELECT
|
||||
order_cost,
|
||||
holding_rate,
|
||||
service_level_z_score,
|
||||
min_reorder_qty,
|
||||
default_reorder_qty,
|
||||
default_safety_stock
|
||||
FROM financial_calc_config
|
||||
WHERE id = 1
|
||||
LIMIT 1
|
||||
`);
|
||||
const finConfig = financialConfig.rows[0] || {
|
||||
order_cost: 25.00,
|
||||
holding_rate: 0.25,
|
||||
service_level_z_score: 1.96,
|
||||
min_reorder_qty: 1,
|
||||
default_reorder_qty: 5,
|
||||
default_safety_stock: 5
|
||||
};
|
||||
|
||||
// Calculate base product metrics
|
||||
if (!SKIP_PRODUCT_BASE_METRICS) {
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting base product metrics calculation',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// Get order count that will be processed
|
||||
const orderCount = await connection.query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM orders o
|
||||
WHERE o.canceled = false
|
||||
`);
|
||||
processedOrders = parseInt(orderCount.rows[0].count);
|
||||
|
||||
// Clear temporary tables
|
||||
await connection.query('DROP TABLE IF EXISTS temp_sales_metrics');
|
||||
await connection.query('DROP TABLE IF EXISTS temp_purchase_metrics');
|
||||
|
||||
// Create temp_sales_metrics
|
||||
await connection.query(`
|
||||
CREATE TEMPORARY TABLE temp_sales_metrics (
|
||||
pid BIGINT NOT NULL,
|
||||
daily_sales_avg DECIMAL(10,3),
|
||||
weekly_sales_avg DECIMAL(10,3),
|
||||
monthly_sales_avg DECIMAL(10,3),
|
||||
total_revenue DECIMAL(10,3),
|
||||
avg_margin_percent DECIMAL(10,3),
|
||||
first_sale_date DATE,
|
||||
last_sale_date DATE,
|
||||
stddev_daily_sales DECIMAL(10,3),
|
||||
PRIMARY KEY (pid)
|
||||
)
|
||||
`);
|
||||
|
||||
// Create temp_purchase_metrics
|
||||
await connection.query(`
|
||||
CREATE TEMPORARY TABLE temp_purchase_metrics (
|
||||
pid BIGINT NOT NULL,
|
||||
avg_lead_time_days DECIMAL(10,2),
|
||||
last_purchase_date DATE,
|
||||
first_received_date DATE,
|
||||
last_received_date DATE,
|
||||
stddev_lead_time_days DECIMAL(10,2),
|
||||
PRIMARY KEY (pid)
|
||||
)
|
||||
`);
|
||||
|
||||
// Populate temp_sales_metrics with base stats and sales averages
|
||||
await connection.query(`
|
||||
INSERT INTO temp_sales_metrics
|
||||
SELECT
|
||||
p.pid,
|
||||
COALESCE(SUM(o.quantity) / NULLIF(COUNT(DISTINCT DATE(o.date)), 0), 0) as daily_sales_avg,
|
||||
COALESCE(SUM(o.quantity) / NULLIF(CEIL(COUNT(DISTINCT DATE(o.date)) / 7), 0), 0) as weekly_sales_avg,
|
||||
COALESCE(SUM(o.quantity) / NULLIF(CEIL(COUNT(DISTINCT DATE(o.date)) / 30), 0), 0) as monthly_sales_avg,
|
||||
COALESCE(SUM(o.quantity * o.price), 0) as total_revenue,
|
||||
CASE
|
||||
WHEN SUM(o.quantity * o.price) > 0
|
||||
THEN ((SUM(o.quantity * o.price) - SUM(o.quantity * p.cost_price)) / SUM(o.quantity * o.price)) * 100
|
||||
ELSE 0
|
||||
END as avg_margin_percent,
|
||||
MIN(o.date) as first_sale_date,
|
||||
MAX(o.date) as last_sale_date,
|
||||
COALESCE(STDDEV_SAMP(daily_qty.quantity), 0) as stddev_daily_sales
|
||||
FROM products p
|
||||
LEFT JOIN orders o ON p.pid = o.pid
|
||||
AND o.canceled = false
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '90 days'
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
pid,
|
||||
DATE(date) as sale_date,
|
||||
SUM(quantity) as quantity
|
||||
FROM orders
|
||||
WHERE canceled = false
|
||||
AND date >= CURRENT_DATE - INTERVAL '90 days'
|
||||
GROUP BY pid, DATE(date)
|
||||
) daily_qty ON p.pid = daily_qty.pid
|
||||
GROUP BY p.pid
|
||||
`);
|
||||
|
||||
// Populate temp_purchase_metrics with timeout protection
|
||||
await Promise.race([
|
||||
connection.query(`
|
||||
INSERT INTO temp_purchase_metrics
|
||||
SELECT
|
||||
p.pid,
|
||||
AVG(
|
||||
CASE
|
||||
WHEN po.received_date IS NOT NULL AND po.date IS NOT NULL
|
||||
THEN EXTRACT(EPOCH FROM (po.received_date::timestamp with time zone - po.date::timestamp with time zone)) / 86400.0
|
||||
ELSE NULL
|
||||
END
|
||||
) as avg_lead_time_days,
|
||||
MAX(po.date) as last_purchase_date,
|
||||
MIN(po.received_date) as first_received_date,
|
||||
MAX(po.received_date) as last_received_date,
|
||||
STDDEV_SAMP(
|
||||
CASE
|
||||
WHEN po.received_date IS NOT NULL AND po.date IS NOT NULL
|
||||
THEN EXTRACT(EPOCH FROM (po.received_date::timestamp with time zone - po.date::timestamp with time zone)) / 86400.0
|
||||
ELSE NULL
|
||||
END
|
||||
) as stddev_lead_time_days
|
||||
FROM products p
|
||||
LEFT JOIN purchase_orders po ON p.pid = po.pid
|
||||
AND po.received_date IS NOT NULL
|
||||
AND po.date IS NOT NULL
|
||||
AND po.date >= CURRENT_DATE - INTERVAL '365 days'
|
||||
GROUP BY p.pid
|
||||
`),
|
||||
new Promise((_, reject) =>
|
||||
setTimeout(() => reject(new Error('Timeout: temp_purchase_metrics query took too long')), 60000)
|
||||
)
|
||||
]).catch(async (err) => {
|
||||
logError(err, 'Error populating temp_purchase_metrics, continuing with empty table');
|
||||
// Create an empty fallback to continue processing
|
||||
await connection.query(`
|
||||
INSERT INTO temp_purchase_metrics
|
||||
SELECT
|
||||
p.pid,
|
||||
30.0 as avg_lead_time_days,
|
||||
NULL as last_purchase_date,
|
||||
NULL as first_received_date,
|
||||
NULL as last_received_date,
|
||||
0.0 as stddev_lead_time_days
|
||||
FROM products p
|
||||
LEFT JOIN temp_purchase_metrics tpm ON p.pid = tpm.pid
|
||||
WHERE tpm.pid IS NULL
|
||||
`);
|
||||
});
|
||||
|
||||
// Process updates in batches
|
||||
let lastPid = 0;
|
||||
let batchCount = 0;
|
||||
const MAX_BATCHES = 1000; // Safety limit for number of batches to prevent infinite loops
|
||||
|
||||
while (batchCount < MAX_BATCHES) {
|
||||
if (isCancelled) break;
|
||||
|
||||
batchCount++;
|
||||
const batch = await connection.query(
|
||||
'SELECT pid FROM products WHERE pid > $1 ORDER BY pid LIMIT $2',
|
||||
[lastPid, BATCH_SIZE]
|
||||
);
|
||||
|
||||
if (batch.rows.length === 0) break;
|
||||
|
||||
// Process the entire batch in a single efficient query
|
||||
const lowStockThreshold = parseInt(defaultThresholds?.low_stock_threshold) || 5;
|
||||
const criticalDays = parseInt(defaultThresholds?.critical_days) || 7;
|
||||
const reorderDays = parseInt(defaultThresholds?.reorder_days) || 14;
|
||||
const overstockDays = parseInt(defaultThresholds?.overstock_days) || 90;
|
||||
const serviceLevel = parseFloat(finConfig?.service_level_z_score) || 1.96;
|
||||
const defaultSafetyStock = parseInt(finConfig?.default_safety_stock) || 5;
|
||||
const defaultReorderQty = parseInt(finConfig?.default_reorder_qty) || 5;
|
||||
const orderCost = parseFloat(finConfig?.order_cost) || 25.00;
|
||||
const holdingRate = parseFloat(finConfig?.holding_rate) || 0.25;
|
||||
const minReorderQty = parseInt(finConfig?.min_reorder_qty) || 1;
|
||||
|
||||
await connection.query(`
|
||||
UPDATE product_metrics pm
|
||||
SET
|
||||
inventory_value = p.stock_quantity * NULLIF(p.cost_price, 0),
|
||||
daily_sales_avg = COALESCE(sm.daily_sales_avg, 0),
|
||||
weekly_sales_avg = COALESCE(sm.weekly_sales_avg, 0),
|
||||
monthly_sales_avg = COALESCE(sm.monthly_sales_avg, 0),
|
||||
total_revenue = COALESCE(sm.total_revenue, 0),
|
||||
avg_margin_percent = COALESCE(sm.avg_margin_percent, 0),
|
||||
first_sale_date = sm.first_sale_date,
|
||||
last_sale_date = sm.last_sale_date,
|
||||
avg_lead_time_days = COALESCE(lm.avg_lead_time_days, 30.0),
|
||||
days_of_inventory = CASE
|
||||
WHEN COALESCE(sm.daily_sales_avg, 0) > 0
|
||||
THEN FLOOR(p.stock_quantity / NULLIF(sm.daily_sales_avg, 0))
|
||||
ELSE NULL
|
||||
END,
|
||||
weeks_of_inventory = CASE
|
||||
WHEN COALESCE(sm.weekly_sales_avg, 0) > 0
|
||||
THEN FLOOR(p.stock_quantity / NULLIF(sm.weekly_sales_avg, 0))
|
||||
ELSE NULL
|
||||
END,
|
||||
stock_status = CASE
|
||||
WHEN p.stock_quantity <= 0 THEN 'Out of Stock'
|
||||
WHEN COALESCE(sm.daily_sales_avg, 0) = 0 AND p.stock_quantity <= ${lowStockThreshold} THEN 'Low Stock'
|
||||
WHEN COALESCE(sm.daily_sales_avg, 0) = 0 THEN 'In Stock'
|
||||
WHEN p.stock_quantity / NULLIF(sm.daily_sales_avg, 0) <= ${criticalDays} THEN 'Critical'
|
||||
WHEN p.stock_quantity / NULLIF(sm.daily_sales_avg, 0) <= ${reorderDays} THEN 'Reorder'
|
||||
WHEN p.stock_quantity / NULLIF(sm.daily_sales_avg, 0) > ${overstockDays} THEN 'Overstocked'
|
||||
ELSE 'Healthy'
|
||||
END,
|
||||
safety_stock = CASE
|
||||
WHEN COALESCE(sm.daily_sales_avg, 0) > 0 AND COALESCE(lm.avg_lead_time_days, 0) > 0 THEN
|
||||
CEIL(
|
||||
${serviceLevel} * SQRT(
|
||||
GREATEST(0, COALESCE(lm.avg_lead_time_days, 0)) * POWER(COALESCE(sm.stddev_daily_sales, 0), 2) +
|
||||
POWER(COALESCE(sm.daily_sales_avg, 0), 2) * POWER(COALESCE(lm.stddev_lead_time_days, 0), 2)
|
||||
)
|
||||
)
|
||||
ELSE ${defaultSafetyStock}
|
||||
END,
|
||||
reorder_point = CASE
|
||||
WHEN COALESCE(sm.daily_sales_avg, 0) > 0 THEN
|
||||
CEIL(sm.daily_sales_avg * GREATEST(0, COALESCE(lm.avg_lead_time_days, 30.0))) +
|
||||
(CASE
|
||||
WHEN COALESCE(sm.daily_sales_avg, 0) > 0 AND COALESCE(lm.avg_lead_time_days, 0) > 0 THEN
|
||||
CEIL(
|
||||
${serviceLevel} * SQRT(
|
||||
GREATEST(0, COALESCE(lm.avg_lead_time_days, 0)) * POWER(COALESCE(sm.stddev_daily_sales, 0), 2) +
|
||||
POWER(COALESCE(sm.daily_sales_avg, 0), 2) * POWER(COALESCE(lm.stddev_lead_time_days, 0), 2)
|
||||
)
|
||||
)
|
||||
ELSE ${defaultSafetyStock}
|
||||
END)
|
||||
ELSE ${lowStockThreshold}
|
||||
END,
|
||||
reorder_qty = CASE
|
||||
WHEN COALESCE(sm.daily_sales_avg, 0) > 0 AND NULLIF(p.cost_price, 0) IS NOT NULL AND NULLIF(p.cost_price, 0) > 0 THEN
|
||||
GREATEST(
|
||||
CEIL(SQRT(
|
||||
(2 * (sm.daily_sales_avg * 365) * ${orderCost}) /
|
||||
NULLIF(p.cost_price * ${holdingRate}, 0)
|
||||
)),
|
||||
${minReorderQty}
|
||||
)
|
||||
ELSE ${defaultReorderQty}
|
||||
END,
|
||||
overstocked_amt = CASE
|
||||
WHEN p.stock_quantity / NULLIF(sm.daily_sales_avg, 0) > ${overstockDays}
|
||||
THEN GREATEST(0, p.stock_quantity - CEIL(sm.daily_sales_avg * ${overstockDays}))
|
||||
ELSE 0
|
||||
END,
|
||||
last_calculated_at = NOW()
|
||||
FROM products p
|
||||
LEFT JOIN temp_sales_metrics sm ON p.pid = sm.pid
|
||||
LEFT JOIN temp_purchase_metrics lm ON p.pid = lm.pid
|
||||
WHERE p.pid = ANY($1::BIGINT[])
|
||||
AND pm.pid = p.pid
|
||||
`, [batch.rows.map(row => row.pid)]);
|
||||
|
||||
lastPid = batch.rows[batch.rows.length - 1].pid;
|
||||
processedCount += batch.rows.length;
|
||||
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Processing base metrics batch',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Add safety check if the loop processed MAX_BATCHES
|
||||
if (batchCount >= MAX_BATCHES) {
|
||||
logError(new Error(`Reached maximum batch count (${MAX_BATCHES}). Process may have entered an infinite loop.`), 'Batch processing safety limit reached');
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate forecast accuracy and bias in batches
|
||||
let forecastPid = 0;
|
||||
while (true) {
|
||||
if (isCancelled) break;
|
||||
|
||||
const forecastBatch = await connection.query(
|
||||
'SELECT pid FROM products WHERE pid > $1 ORDER BY pid LIMIT $2',
|
||||
[forecastPid, BATCH_SIZE]
|
||||
);
|
||||
|
||||
if (forecastBatch.rows.length === 0) break;
|
||||
|
||||
const forecastPidArray = forecastBatch.rows.map(row => row.pid);
|
||||
|
||||
// Use array_to_string to convert the array to a string of comma-separated values
|
||||
await connection.query(`
|
||||
WITH forecast_metrics AS (
|
||||
SELECT
|
||||
sf.pid,
|
||||
AVG(CASE
|
||||
WHEN o.quantity > 0
|
||||
THEN ABS(sf.forecast_quantity - o.quantity) / o.quantity * 100
|
||||
ELSE 100
|
||||
END) as avg_forecast_error,
|
||||
AVG(CASE
|
||||
WHEN o.quantity > 0
|
||||
THEN (sf.forecast_quantity - o.quantity) / o.quantity * 100
|
||||
ELSE 0
|
||||
END) as avg_forecast_bias,
|
||||
MAX(sf.forecast_date) as last_forecast_date
|
||||
FROM sales_forecasts sf
|
||||
JOIN orders o ON sf.pid = o.pid
|
||||
AND DATE(o.date) = sf.forecast_date
|
||||
WHERE o.canceled = false
|
||||
AND sf.forecast_date >= CURRENT_DATE - INTERVAL '90 days'
|
||||
AND sf.pid = ANY('{${forecastPidArray.join(',')}}'::BIGINT[])
|
||||
GROUP BY sf.pid
|
||||
)
|
||||
UPDATE product_metrics pm
|
||||
SET
|
||||
forecast_accuracy = GREATEST(0, 100 - LEAST(fm.avg_forecast_error, 100)),
|
||||
forecast_bias = GREATEST(-100, LEAST(fm.avg_forecast_bias, 100)),
|
||||
last_forecast_date = fm.last_forecast_date,
|
||||
last_calculated_at = NOW()
|
||||
FROM forecast_metrics fm
|
||||
WHERE pm.pid = fm.pid
|
||||
`);
|
||||
|
||||
forecastPid = forecastBatch.rows[forecastBatch.rows.length - 1].pid;
|
||||
}
|
||||
|
||||
// Calculate product time aggregates
|
||||
if (!SKIP_PRODUCT_TIME_AGGREGATES) {
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting product time aggregates calculation',
|
||||
current: processedCount || 0,
|
||||
total: totalProducts || 0,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount || 0, totalProducts || 0),
|
||||
rate: calculateRate(startTime, processedCount || 0),
|
||||
percentage: (((processedCount || 0) / (totalProducts || 1)) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// Note: The time-aggregates calculation has been moved to time-aggregates.js
|
||||
// This module will not duplicate that functionality
|
||||
processedCount = Math.floor(totalProducts * 0.6);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Product time aggregates calculation delegated to time-aggregates module',
|
||||
current: processedCount || 0,
|
||||
total: totalProducts || 0,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount || 0, totalProducts || 0),
|
||||
rate: calculateRate(startTime, processedCount || 0),
|
||||
percentage: (((processedCount || 0) / (totalProducts || 1)) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
} else {
|
||||
processedCount = Math.floor(totalProducts * 0.6);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Skipping product time aggregates calculation',
|
||||
current: processedCount || 0,
|
||||
total: totalProducts || 0,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount || 0, totalProducts || 0),
|
||||
rate: calculateRate(startTime, processedCount || 0),
|
||||
percentage: (((processedCount || 0) / (totalProducts || 1)) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Calculate ABC classification
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting ABC classification',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
if (isCancelled) return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0, // This module doesn't process POs
|
||||
success
|
||||
};
|
||||
|
||||
const abcConfig = await connection.query('SELECT a_threshold, b_threshold FROM abc_classification_config WHERE id = 1');
|
||||
const abcThresholds = abcConfig.rows[0] || { a_threshold: 20, b_threshold: 50 };
|
||||
|
||||
// Extract values and ensure they are valid numbers
|
||||
const aThreshold = parseFloat(abcThresholds.a_threshold) || 20;
|
||||
const bThreshold = parseFloat(abcThresholds.b_threshold) || 50;
|
||||
|
||||
// First, create and populate the rankings table with an index
|
||||
await connection.query('DROP TABLE IF EXISTS temp_revenue_ranks');
|
||||
await connection.query(`
|
||||
CREATE TEMPORARY TABLE temp_revenue_ranks (
|
||||
pid BIGINT NOT NULL,
|
||||
total_revenue DECIMAL(10,3),
|
||||
rank_num INT,
|
||||
dense_rank_num INT,
|
||||
percentile DECIMAL(5,2),
|
||||
total_count INT,
|
||||
PRIMARY KEY (pid)
|
||||
)
|
||||
`);
|
||||
await connection.query('CREATE INDEX ON temp_revenue_ranks (rank_num)');
|
||||
await connection.query('CREATE INDEX ON temp_revenue_ranks (dense_rank_num)');
|
||||
await connection.query('CREATE INDEX ON temp_revenue_ranks (percentile)');
|
||||
|
||||
// Calculate rankings with proper tie handling
|
||||
await connection.query(`
|
||||
INSERT INTO temp_revenue_ranks
|
||||
WITH revenue_data AS (
|
||||
SELECT
|
||||
pid,
|
||||
total_revenue,
|
||||
COUNT(*) OVER () as total_count,
|
||||
PERCENT_RANK() OVER (ORDER BY total_revenue DESC) * 100 as percentile,
|
||||
RANK() OVER (ORDER BY total_revenue DESC) as rank_num,
|
||||
DENSE_RANK() OVER (ORDER BY total_revenue DESC) as dense_rank_num
|
||||
FROM product_metrics
|
||||
WHERE total_revenue > 0
|
||||
)
|
||||
SELECT
|
||||
pid,
|
||||
total_revenue,
|
||||
rank_num,
|
||||
dense_rank_num,
|
||||
percentile,
|
||||
total_count
|
||||
FROM revenue_data
|
||||
`);
|
||||
|
||||
// Get total count for percentage calculation
|
||||
const rankingCount = await connection.query('SELECT MAX(rank_num) as total_count FROM temp_revenue_ranks');
|
||||
const totalCount = parseInt(rankingCount.rows[0].total_count) || 1;
|
||||
|
||||
// Process updates in batches
|
||||
let abcProcessedCount = 0;
|
||||
const batchSize = 5000;
|
||||
const maxPid = await connection.query('SELECT MAX(pid) as max_pid FROM products');
|
||||
const maxProductId = parseInt(maxPid.rows[0].max_pid);
|
||||
|
||||
while (abcProcessedCount < maxProductId) {
|
||||
if (isCancelled) return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
|
||||
// Get a batch of PIDs that need updating
|
||||
const pids = await connection.query(`
|
||||
SELECT pm.pid
|
||||
FROM product_metrics pm
|
||||
LEFT JOIN temp_revenue_ranks tr ON pm.pid = tr.pid
|
||||
WHERE pm.pid > $1
|
||||
AND (pm.abc_class IS NULL
|
||||
OR pm.abc_class !=
|
||||
CASE
|
||||
WHEN tr.pid IS NULL THEN 'C'
|
||||
WHEN tr.percentile <= ${aThreshold} THEN 'A'
|
||||
WHEN tr.percentile <= ${bThreshold} THEN 'B'
|
||||
ELSE 'C'
|
||||
END)
|
||||
ORDER BY pm.pid
|
||||
LIMIT $2
|
||||
`, [abcProcessedCount, batchSize]);
|
||||
|
||||
if (pids.rows.length === 0) break;
|
||||
|
||||
const pidValues = pids.rows.map(row => row.pid);
|
||||
|
||||
await connection.query(`
|
||||
UPDATE product_metrics pm
|
||||
SET abc_class =
|
||||
CASE
|
||||
WHEN tr.pid IS NULL THEN 'C'
|
||||
WHEN tr.percentile <= ${aThreshold} THEN 'A'
|
||||
WHEN tr.percentile <= ${bThreshold} THEN 'B'
|
||||
ELSE 'C'
|
||||
END,
|
||||
last_calculated_at = NOW()
|
||||
FROM (SELECT pid, percentile FROM temp_revenue_ranks) tr
|
||||
WHERE pm.pid = tr.pid AND pm.pid = ANY($1::BIGINT[])
|
||||
OR (pm.pid = ANY($1::BIGINT[]) AND tr.pid IS NULL)
|
||||
`, [pidValues]);
|
||||
|
||||
// Now update turnover rate with proper handling of zero inventory periods
|
||||
await connection.query(`
|
||||
UPDATE product_metrics pm
|
||||
SET
|
||||
turnover_rate = CASE
|
||||
WHEN sales.avg_nonzero_stock > 0 AND sales.active_days > 0
|
||||
THEN LEAST(
|
||||
(sales.total_sold / sales.avg_nonzero_stock) * (365.0 / sales.active_days),
|
||||
999.99
|
||||
)
|
||||
ELSE 0
|
||||
END,
|
||||
last_calculated_at = NOW()
|
||||
FROM (
|
||||
SELECT
|
||||
o.pid,
|
||||
SUM(o.quantity) as total_sold,
|
||||
COUNT(DISTINCT DATE(o.date)) as active_days,
|
||||
AVG(CASE
|
||||
WHEN p.stock_quantity > 0 THEN p.stock_quantity
|
||||
ELSE NULL
|
||||
END) as avg_nonzero_stock
|
||||
FROM orders o
|
||||
JOIN products p ON o.pid = p.pid
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '90 days'
|
||||
AND o.pid = ANY($1::BIGINT[])
|
||||
GROUP BY o.pid
|
||||
) sales
|
||||
WHERE pm.pid = sales.pid
|
||||
`, [pidValues]);
|
||||
|
||||
abcProcessedCount = pids.rows[pids.rows.length - 1].pid;
|
||||
|
||||
// Calculate progress proportionally to total products
|
||||
processedCount = Math.floor(totalProducts * (0.60 + (abcProcessedCount / maxProductId) * 0.2));
|
||||
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'ABC classification progress',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// If we get here, everything completed successfully
|
||||
success = true;
|
||||
|
||||
// Update calculate_status
|
||||
await connection.query(`
|
||||
INSERT INTO calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES ('product_metrics', NOW())
|
||||
ON CONFLICT (module_name) DO UPDATE
|
||||
SET last_calculation_timestamp = NOW()
|
||||
`);
|
||||
|
||||
return {
|
||||
processedProducts: processedCount || 0,
|
||||
processedOrders: processedOrders || 0,
|
||||
processedPurchaseOrders: 0, // This module doesn't process POs
|
||||
success
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
success = false;
|
||||
logError(error, 'Error calculating product metrics');
|
||||
throw error;
|
||||
} finally {
|
||||
// Always clean up temporary tables, even if an error occurred
|
||||
if (connection) {
|
||||
try {
|
||||
await connection.query('DROP TABLE IF EXISTS temp_sales_metrics');
|
||||
await connection.query('DROP TABLE IF EXISTS temp_purchase_metrics');
|
||||
} catch (err) {
|
||||
console.error('Error cleaning up temporary tables:', err);
|
||||
}
|
||||
|
||||
// Make sure to release the connection
|
||||
connection.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function calculateStockStatus(stock, config, daily_sales_avg, weekly_sales_avg, monthly_sales_avg) {
|
||||
if (stock <= 0) {
|
||||
return 'Out of Stock';
|
||||
}
|
||||
|
||||
// Use the most appropriate sales average based on data quality
|
||||
let sales_avg = daily_sales_avg;
|
||||
if (sales_avg === 0) {
|
||||
sales_avg = weekly_sales_avg / 7;
|
||||
}
|
||||
if (sales_avg === 0) {
|
||||
sales_avg = monthly_sales_avg / 30;
|
||||
}
|
||||
|
||||
if (sales_avg === 0) {
|
||||
return stock <= config.low_stock_threshold ? 'Low Stock' : 'In Stock';
|
||||
}
|
||||
|
||||
const days_of_stock = stock / sales_avg;
|
||||
|
||||
if (days_of_stock <= config.critical_days) {
|
||||
return 'Critical';
|
||||
} else if (days_of_stock <= config.reorder_days) {
|
||||
return 'Reorder';
|
||||
} else if (days_of_stock > config.overstock_days) {
|
||||
return 'Overstocked';
|
||||
}
|
||||
|
||||
return 'Healthy';
|
||||
}
|
||||
|
||||
// Note: calculateReorderQuantities function has been removed as its logic has been incorporated
|
||||
// in the main SQL query with configurable parameters
|
||||
|
||||
module.exports = calculateProductMetrics;
|
||||
@@ -1,440 +0,0 @@
|
||||
const { outputProgress, formatElapsedTime, estimateRemaining, calculateRate, logError } = require('./utils/progress');
|
||||
const { getConnection } = require('./utils/db');
|
||||
|
||||
async function calculateSalesForecasts(startTime, totalProducts, processedCount = 0, isCancelled = false) {
|
||||
const connection = await getConnection();
|
||||
let success = false;
|
||||
let processedOrders = 0;
|
||||
|
||||
try {
|
||||
if (isCancelled) {
|
||||
outputProgress({
|
||||
status: 'cancelled',
|
||||
operation: 'Sales forecasts calculation cancelled',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: null,
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders: 0,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
}
|
||||
|
||||
// Get order count that will be processed
|
||||
const orderCount = await connection.query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM orders o
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '90 days'
|
||||
`);
|
||||
processedOrders = parseInt(orderCount.rows[0].count);
|
||||
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting sales forecasts calculation',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// First, create a temporary table for forecast dates
|
||||
await connection.query(`
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS temp_forecast_dates (
|
||||
forecast_date DATE,
|
||||
day_of_week INT,
|
||||
month INT,
|
||||
PRIMARY KEY (forecast_date)
|
||||
)
|
||||
`);
|
||||
|
||||
await connection.query(`
|
||||
INSERT INTO temp_forecast_dates
|
||||
SELECT
|
||||
CURRENT_DATE + (n || ' days')::INTERVAL as forecast_date,
|
||||
EXTRACT(DOW FROM CURRENT_DATE + (n || ' days')::INTERVAL) + 1 as day_of_week,
|
||||
EXTRACT(MONTH FROM CURRENT_DATE + (n || ' days')::INTERVAL) as month
|
||||
FROM (
|
||||
SELECT a.n + b.n * 10 as n
|
||||
FROM
|
||||
(SELECT 0 as n UNION SELECT 1 UNION SELECT 2 UNION SELECT 3 UNION SELECT 4 UNION
|
||||
SELECT 5 UNION SELECT 6 UNION SELECT 7 UNION SELECT 8 UNION SELECT 9) a,
|
||||
(SELECT 0 as n UNION SELECT 1 UNION SELECT 2) b
|
||||
ORDER BY n
|
||||
LIMIT 31
|
||||
) numbers
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.92);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Forecast dates prepared, calculating daily sales stats',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
if (isCancelled) return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
|
||||
// Create temporary table for daily sales stats
|
||||
await connection.query(`
|
||||
CREATE TEMPORARY TABLE temp_daily_sales AS
|
||||
SELECT
|
||||
o.pid,
|
||||
EXTRACT(DOW FROM o.date) + 1 as day_of_week,
|
||||
SUM(o.quantity) as daily_quantity,
|
||||
SUM(o.price * o.quantity) as daily_revenue,
|
||||
COUNT(DISTINCT DATE(o.date)) as day_count
|
||||
FROM orders o
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '90 days'
|
||||
GROUP BY o.pid, EXTRACT(DOW FROM o.date) + 1
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.94);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Daily sales stats calculated, preparing product stats',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
if (isCancelled) return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
|
||||
// Create temporary table for product stats
|
||||
await connection.query(`
|
||||
CREATE TEMPORARY TABLE temp_product_stats AS
|
||||
SELECT
|
||||
pid,
|
||||
AVG(daily_revenue) as overall_avg_revenue,
|
||||
SUM(day_count) as total_days
|
||||
FROM temp_daily_sales
|
||||
GROUP BY pid
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.96);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Product stats prepared, calculating product-level forecasts',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
if (isCancelled) return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
|
||||
// Calculate product-level forecasts
|
||||
await connection.query(`
|
||||
INSERT INTO sales_forecasts (
|
||||
pid,
|
||||
forecast_date,
|
||||
forecast_quantity,
|
||||
confidence_level,
|
||||
created_at
|
||||
)
|
||||
WITH daily_stats AS (
|
||||
SELECT
|
||||
ds.pid,
|
||||
AVG(ds.daily_quantity) as avg_daily_qty,
|
||||
STDDEV(ds.daily_quantity) as std_daily_qty,
|
||||
COUNT(DISTINCT ds.day_count) as data_points,
|
||||
SUM(ds.day_count) as total_days,
|
||||
AVG(ds.daily_revenue) as avg_daily_revenue,
|
||||
STDDEV(ds.daily_revenue) as std_daily_revenue,
|
||||
MIN(ds.daily_quantity) as min_daily_qty,
|
||||
MAX(ds.daily_quantity) as max_daily_qty,
|
||||
-- Calculate variance without using LAG
|
||||
COALESCE(
|
||||
STDDEV(ds.daily_quantity) / NULLIF(AVG(ds.daily_quantity), 0),
|
||||
0
|
||||
) as daily_variance_ratio
|
||||
FROM temp_daily_sales ds
|
||||
GROUP BY ds.pid
|
||||
HAVING AVG(ds.daily_quantity) > 0
|
||||
)
|
||||
SELECT
|
||||
ds.pid,
|
||||
fd.forecast_date,
|
||||
GREATEST(0,
|
||||
ROUND(
|
||||
ds.avg_daily_qty *
|
||||
(1 + COALESCE(sf.seasonality_factor, 0))
|
||||
)
|
||||
) as forecast_quantity,
|
||||
CASE
|
||||
WHEN ds.total_days >= 60 AND ds.daily_variance_ratio < 0.5 THEN 90
|
||||
WHEN ds.total_days >= 60 THEN 85
|
||||
WHEN ds.total_days >= 30 AND ds.daily_variance_ratio < 0.5 THEN 80
|
||||
WHEN ds.total_days >= 30 THEN 75
|
||||
WHEN ds.total_days >= 14 AND ds.daily_variance_ratio < 0.5 THEN 70
|
||||
WHEN ds.total_days >= 14 THEN 65
|
||||
ELSE 60
|
||||
END as confidence_level,
|
||||
NOW() as created_at
|
||||
FROM daily_stats ds
|
||||
JOIN temp_product_stats ps ON ds.pid = ps.pid
|
||||
CROSS JOIN temp_forecast_dates fd
|
||||
LEFT JOIN sales_seasonality sf ON fd.month = sf.month
|
||||
GROUP BY ds.pid, fd.forecast_date, ps.overall_avg_revenue, sf.seasonality_factor,
|
||||
ds.avg_daily_qty, ds.std_daily_qty, ds.avg_daily_qty, ds.total_days, ds.daily_variance_ratio
|
||||
ON CONFLICT (pid, forecast_date) DO UPDATE
|
||||
SET
|
||||
forecast_quantity = EXCLUDED.forecast_quantity,
|
||||
confidence_level = EXCLUDED.confidence_level,
|
||||
created_at = NOW()
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.98);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Product forecasts calculated, preparing category stats',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
if (isCancelled) return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
|
||||
// Create temporary table for category stats
|
||||
await connection.query(`
|
||||
CREATE TEMPORARY TABLE temp_category_sales AS
|
||||
SELECT
|
||||
pc.cat_id,
|
||||
EXTRACT(DOW FROM o.date) + 1 as day_of_week,
|
||||
SUM(o.quantity) as daily_quantity,
|
||||
SUM(o.price * o.quantity) as daily_revenue,
|
||||
COUNT(DISTINCT DATE(o.date)) as day_count
|
||||
FROM orders o
|
||||
JOIN product_categories pc ON o.pid = pc.pid
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '90 days'
|
||||
GROUP BY pc.cat_id, EXTRACT(DOW FROM o.date) + 1
|
||||
`);
|
||||
|
||||
await connection.query(`
|
||||
CREATE TEMPORARY TABLE temp_category_stats AS
|
||||
SELECT
|
||||
cat_id,
|
||||
AVG(daily_revenue) as overall_avg_revenue,
|
||||
SUM(day_count) as total_days
|
||||
FROM temp_category_sales
|
||||
GROUP BY cat_id
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.99);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Category stats prepared, calculating category-level forecasts',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
if (isCancelled) return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
|
||||
// Calculate category-level forecasts
|
||||
await connection.query(`
|
||||
INSERT INTO category_forecasts (
|
||||
category_id,
|
||||
forecast_date,
|
||||
forecast_units,
|
||||
forecast_revenue,
|
||||
confidence_level,
|
||||
created_at
|
||||
)
|
||||
SELECT
|
||||
cs.cat_id::bigint as category_id,
|
||||
fd.forecast_date,
|
||||
GREATEST(0,
|
||||
ROUND(AVG(cs.daily_quantity) *
|
||||
(1 + COALESCE(sf.seasonality_factor, 0)))
|
||||
) as forecast_units,
|
||||
GREATEST(0,
|
||||
COALESCE(
|
||||
CASE
|
||||
WHEN SUM(cs.day_count) >= 4 THEN AVG(cs.daily_revenue)
|
||||
ELSE ct.overall_avg_revenue
|
||||
END *
|
||||
(1 + COALESCE(sf.seasonality_factor, 0)),
|
||||
0
|
||||
)
|
||||
) as forecast_revenue,
|
||||
CASE
|
||||
WHEN ct.total_days >= 60 THEN 90
|
||||
WHEN ct.total_days >= 30 THEN 80
|
||||
WHEN ct.total_days >= 14 THEN 70
|
||||
ELSE 60
|
||||
END as confidence_level,
|
||||
NOW() as created_at
|
||||
FROM temp_category_sales cs
|
||||
JOIN temp_category_stats ct ON cs.cat_id = ct.cat_id
|
||||
CROSS JOIN temp_forecast_dates fd
|
||||
LEFT JOIN sales_seasonality sf ON fd.month = sf.month
|
||||
GROUP BY
|
||||
cs.cat_id,
|
||||
fd.forecast_date,
|
||||
ct.overall_avg_revenue,
|
||||
ct.total_days,
|
||||
sf.seasonality_factor,
|
||||
sf.month
|
||||
HAVING AVG(cs.daily_quantity) > 0
|
||||
ON CONFLICT (category_id, forecast_date) DO UPDATE
|
||||
SET
|
||||
forecast_units = EXCLUDED.forecast_units,
|
||||
forecast_revenue = EXCLUDED.forecast_revenue,
|
||||
confidence_level = EXCLUDED.confidence_level,
|
||||
created_at = NOW()
|
||||
`);
|
||||
|
||||
// Clean up temporary tables
|
||||
await connection.query(`
|
||||
DROP TABLE IF EXISTS temp_forecast_dates;
|
||||
DROP TABLE IF EXISTS temp_daily_sales;
|
||||
DROP TABLE IF EXISTS temp_product_stats;
|
||||
DROP TABLE IF EXISTS temp_category_sales;
|
||||
DROP TABLE IF EXISTS temp_category_stats;
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 1.0);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Category forecasts calculated and temporary tables cleaned up',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// If we get here, everything completed successfully
|
||||
success = true;
|
||||
|
||||
// Update calculate_status
|
||||
await connection.query(`
|
||||
INSERT INTO calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES ('sales_forecasts', NOW())
|
||||
ON CONFLICT (module_name) DO UPDATE
|
||||
SET last_calculation_timestamp = NOW()
|
||||
`);
|
||||
|
||||
return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
success = false;
|
||||
logError(error, 'Error calculating sales forecasts');
|
||||
throw error;
|
||||
} finally {
|
||||
if (connection) {
|
||||
try {
|
||||
// Ensure temporary tables are cleaned up
|
||||
await connection.query(`
|
||||
DROP TABLE IF EXISTS temp_forecast_dates;
|
||||
DROP TABLE IF EXISTS temp_daily_sales;
|
||||
DROP TABLE IF EXISTS temp_product_stats;
|
||||
DROP TABLE IF EXISTS temp_category_sales;
|
||||
DROP TABLE IF EXISTS temp_category_stats;
|
||||
`);
|
||||
} catch (err) {
|
||||
console.error('Error cleaning up temporary tables:', err);
|
||||
}
|
||||
connection.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = calculateSalesForecasts;
|
||||
@@ -1,344 +0,0 @@
|
||||
const { outputProgress, formatElapsedTime, estimateRemaining, calculateRate, logError } = require('./utils/progress');
|
||||
const { getConnection } = require('./utils/db');
|
||||
|
||||
async function calculateTimeAggregates(startTime, totalProducts, processedCount = 0, isCancelled = false) {
|
||||
const connection = await getConnection();
|
||||
let success = false;
|
||||
let processedOrders = 0;
|
||||
|
||||
try {
|
||||
if (isCancelled) {
|
||||
outputProgress({
|
||||
status: 'cancelled',
|
||||
operation: 'Time aggregates calculation cancelled',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: null,
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders: 0,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
}
|
||||
|
||||
// Get order count that will be processed
|
||||
const orderCount = await connection.query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM orders o
|
||||
WHERE o.canceled = false
|
||||
`);
|
||||
processedOrders = parseInt(orderCount.rows[0].count);
|
||||
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting time aggregates calculation',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// Create a temporary table for end-of-month inventory values
|
||||
await connection.query(`
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS temp_monthly_inventory AS
|
||||
WITH months AS (
|
||||
-- Generate all year/month combinations for the last 12 months
|
||||
SELECT
|
||||
EXTRACT(YEAR FROM month_date)::INTEGER as year,
|
||||
EXTRACT(MONTH FROM month_date)::INTEGER as month,
|
||||
month_date as start_date,
|
||||
(month_date + INTERVAL '1 month'::interval - INTERVAL '1 day'::interval)::DATE as end_date
|
||||
FROM (
|
||||
SELECT generate_series(
|
||||
DATE_TRUNC('month', CURRENT_DATE - INTERVAL '12 months'::interval)::DATE,
|
||||
DATE_TRUNC('month', CURRENT_DATE)::DATE,
|
||||
INTERVAL '1 month'::interval
|
||||
) as month_date
|
||||
) dates
|
||||
),
|
||||
monthly_inventory_calc AS (
|
||||
SELECT
|
||||
p.pid,
|
||||
m.year,
|
||||
m.month,
|
||||
m.end_date,
|
||||
p.stock_quantity as current_quantity,
|
||||
-- Calculate sold during period (before end_date)
|
||||
COALESCE(SUM(
|
||||
CASE
|
||||
WHEN o.date <= m.end_date THEN o.quantity
|
||||
ELSE 0
|
||||
END
|
||||
), 0) as sold_after_end_date,
|
||||
-- Calculate received during period (before end_date)
|
||||
COALESCE(SUM(
|
||||
CASE
|
||||
WHEN po.received_date <= m.end_date THEN po.received
|
||||
ELSE 0
|
||||
END
|
||||
), 0) as received_after_end_date,
|
||||
p.cost_price
|
||||
FROM
|
||||
products p
|
||||
CROSS JOIN
|
||||
months m
|
||||
LEFT JOIN
|
||||
orders o ON p.pid = o.pid
|
||||
AND o.canceled = false
|
||||
AND o.date > m.end_date
|
||||
AND o.date <= CURRENT_DATE
|
||||
LEFT JOIN
|
||||
purchase_orders po ON p.pid = po.pid
|
||||
AND po.received_date IS NOT NULL
|
||||
AND po.received_date > m.end_date
|
||||
AND po.received_date <= CURRENT_DATE
|
||||
GROUP BY
|
||||
p.pid, m.year, m.month, m.end_date, p.stock_quantity, p.cost_price
|
||||
)
|
||||
SELECT
|
||||
pid,
|
||||
year,
|
||||
month,
|
||||
-- End of month quantity = current quantity - sold after + received after
|
||||
GREATEST(0, current_quantity - sold_after_end_date + received_after_end_date) as end_of_month_quantity,
|
||||
-- End of month inventory value
|
||||
GREATEST(0, current_quantity - sold_after_end_date + received_after_end_date) * cost_price as end_of_month_value,
|
||||
cost_price
|
||||
FROM
|
||||
monthly_inventory_calc
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.40);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Monthly inventory values calculated, processing time aggregates',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// Initial insert of time-based aggregates
|
||||
await connection.query(`
|
||||
INSERT INTO product_time_aggregates (
|
||||
pid,
|
||||
year,
|
||||
month,
|
||||
total_quantity_sold,
|
||||
total_revenue,
|
||||
total_cost,
|
||||
order_count,
|
||||
stock_received,
|
||||
stock_ordered,
|
||||
avg_price,
|
||||
profit_margin,
|
||||
inventory_value,
|
||||
gmroi
|
||||
)
|
||||
WITH monthly_sales AS (
|
||||
SELECT
|
||||
o.pid,
|
||||
EXTRACT(YEAR FROM o.date::timestamp with time zone)::INTEGER as year,
|
||||
EXTRACT(MONTH FROM o.date::timestamp with time zone)::INTEGER as month,
|
||||
SUM(o.quantity) as total_quantity_sold,
|
||||
SUM((o.price - COALESCE(o.discount, 0)) * o.quantity) as total_revenue,
|
||||
SUM(COALESCE(o.costeach, 0) * o.quantity) as total_cost,
|
||||
COUNT(DISTINCT o.order_number) as order_count,
|
||||
AVG(o.price - COALESCE(o.discount, 0)) as avg_price,
|
||||
CASE
|
||||
WHEN SUM((o.price - COALESCE(o.discount, 0)) * o.quantity) > 0
|
||||
THEN ((SUM((o.price - COALESCE(o.discount, 0)) * o.quantity) - SUM(COALESCE(o.costeach, 0) * o.quantity))
|
||||
/ SUM((o.price - COALESCE(o.discount, 0)) * o.quantity)) * 100
|
||||
ELSE 0
|
||||
END as profit_margin,
|
||||
COUNT(DISTINCT DATE(o.date)) as active_days
|
||||
FROM orders o
|
||||
JOIN products p ON o.pid = p.pid
|
||||
WHERE o.canceled = false
|
||||
GROUP BY o.pid, EXTRACT(YEAR FROM o.date::timestamp with time zone), EXTRACT(MONTH FROM o.date::timestamp with time zone)
|
||||
),
|
||||
monthly_stock AS (
|
||||
SELECT
|
||||
pid,
|
||||
EXTRACT(YEAR FROM date::timestamp with time zone)::INTEGER as year,
|
||||
EXTRACT(MONTH FROM date::timestamp with time zone)::INTEGER as month,
|
||||
SUM(received) as stock_received,
|
||||
SUM(ordered) as stock_ordered
|
||||
FROM purchase_orders
|
||||
GROUP BY pid, EXTRACT(YEAR FROM date::timestamp with time zone), EXTRACT(MONTH FROM date::timestamp with time zone)
|
||||
)
|
||||
SELECT
|
||||
COALESCE(s.pid, ms.pid, mi.pid) as pid,
|
||||
COALESCE(s.year, ms.year, mi.year) as year,
|
||||
COALESCE(s.month, ms.month, mi.month) as month,
|
||||
COALESCE(s.total_quantity_sold, 0)::INTEGER as total_quantity_sold,
|
||||
COALESCE(s.total_revenue, 0)::DECIMAL(10,3) as total_revenue,
|
||||
COALESCE(s.total_cost, 0)::DECIMAL(10,3) as total_cost,
|
||||
COALESCE(s.order_count, 0)::INTEGER as order_count,
|
||||
COALESCE(ms.stock_received, 0)::INTEGER as stock_received,
|
||||
COALESCE(ms.stock_ordered, 0)::INTEGER as stock_ordered,
|
||||
COALESCE(s.avg_price, 0)::DECIMAL(10,3) as avg_price,
|
||||
COALESCE(s.profit_margin, 0)::DECIMAL(10,3) as profit_margin,
|
||||
COALESCE(mi.end_of_month_value, 0)::DECIMAL(10,3) as inventory_value,
|
||||
CASE
|
||||
WHEN COALESCE(mi.end_of_month_value, 0) > 0
|
||||
THEN (COALESCE(s.total_revenue, 0) - COALESCE(s.total_cost, 0))
|
||||
/ NULLIF(COALESCE(mi.end_of_month_value, 0), 0)
|
||||
ELSE 0
|
||||
END::DECIMAL(10,3) as gmroi
|
||||
FROM (
|
||||
SELECT * FROM monthly_sales s
|
||||
UNION ALL
|
||||
SELECT
|
||||
pid,
|
||||
year,
|
||||
month,
|
||||
0 as total_quantity_sold,
|
||||
0 as total_revenue,
|
||||
0 as total_cost,
|
||||
0 as order_count,
|
||||
NULL as avg_price,
|
||||
0 as profit_margin,
|
||||
0 as active_days
|
||||
FROM monthly_stock ms
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM monthly_sales s2
|
||||
WHERE s2.pid = ms.pid
|
||||
AND s2.year = ms.year
|
||||
AND s2.month = ms.month
|
||||
)
|
||||
UNION ALL
|
||||
SELECT
|
||||
pid,
|
||||
year,
|
||||
month,
|
||||
0 as total_quantity_sold,
|
||||
0 as total_revenue,
|
||||
0 as total_cost,
|
||||
0 as order_count,
|
||||
NULL as avg_price,
|
||||
0 as profit_margin,
|
||||
0 as active_days
|
||||
FROM temp_monthly_inventory mi
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM monthly_sales s3
|
||||
WHERE s3.pid = mi.pid
|
||||
AND s3.year = mi.year
|
||||
AND s3.month = mi.month
|
||||
)
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM monthly_stock ms3
|
||||
WHERE ms3.pid = mi.pid
|
||||
AND ms3.year = mi.year
|
||||
AND ms3.month = mi.month
|
||||
)
|
||||
) s
|
||||
LEFT JOIN monthly_stock ms
|
||||
ON s.pid = ms.pid
|
||||
AND s.year = ms.year
|
||||
AND s.month = ms.month
|
||||
LEFT JOIN temp_monthly_inventory mi
|
||||
ON s.pid = mi.pid
|
||||
AND s.year = mi.year
|
||||
AND s.month = mi.month
|
||||
ON CONFLICT (pid, year, month) DO UPDATE
|
||||
SET
|
||||
total_quantity_sold = EXCLUDED.total_quantity_sold,
|
||||
total_revenue = EXCLUDED.total_revenue,
|
||||
total_cost = EXCLUDED.total_cost,
|
||||
order_count = EXCLUDED.order_count,
|
||||
stock_received = EXCLUDED.stock_received,
|
||||
stock_ordered = EXCLUDED.stock_ordered,
|
||||
avg_price = EXCLUDED.avg_price,
|
||||
profit_margin = EXCLUDED.profit_margin,
|
||||
inventory_value = EXCLUDED.inventory_value,
|
||||
gmroi = EXCLUDED.gmroi
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.60);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Base time aggregates calculated',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
if (isCancelled) return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
|
||||
// Clean up temporary tables
|
||||
await connection.query('DROP TABLE IF EXISTS temp_monthly_inventory');
|
||||
|
||||
// If we get here, everything completed successfully
|
||||
success = true;
|
||||
|
||||
// Update calculate_status
|
||||
await connection.query(`
|
||||
INSERT INTO calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES ('time_aggregates', NOW())
|
||||
ON CONFLICT (module_name) DO UPDATE
|
||||
SET last_calculation_timestamp = NOW()
|
||||
`);
|
||||
|
||||
return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
success = false;
|
||||
logError(error, 'Error calculating time aggregates');
|
||||
throw error;
|
||||
} finally {
|
||||
if (connection) {
|
||||
try {
|
||||
// Ensure temporary tables are cleaned up
|
||||
await connection.query('DROP TABLE IF EXISTS temp_monthly_inventory');
|
||||
} catch (err) {
|
||||
console.error('Error cleaning up temporary tables:', err);
|
||||
}
|
||||
connection.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = calculateTimeAggregates;
|
||||
@@ -1,39 +0,0 @@
|
||||
const { Pool } = require('pg');
|
||||
const path = require('path');
|
||||
require('dotenv').config({ path: path.resolve(__dirname, '../../..', '.env') });
|
||||
|
||||
// Database configuration
|
||||
const dbConfig = {
|
||||
host: process.env.DB_HOST,
|
||||
user: process.env.DB_USER,
|
||||
password: process.env.DB_PASSWORD,
|
||||
database: process.env.DB_NAME,
|
||||
port: process.env.DB_PORT || 5432,
|
||||
ssl: process.env.DB_SSL === 'true',
|
||||
// Add performance optimizations
|
||||
max: 10, // connection pool max size
|
||||
idleTimeoutMillis: 30000,
|
||||
connectionTimeoutMillis: 60000
|
||||
};
|
||||
|
||||
// Create a single pool instance to be reused
|
||||
const pool = new Pool(dbConfig);
|
||||
|
||||
// Add event handlers for pool
|
||||
pool.on('error', (err, client) => {
|
||||
console.error('Unexpected error on idle client', err);
|
||||
});
|
||||
|
||||
async function getConnection() {
|
||||
return await pool.connect();
|
||||
}
|
||||
|
||||
async function closePool() {
|
||||
await pool.end();
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
dbConfig,
|
||||
getConnection,
|
||||
closePool
|
||||
};
|
||||
@@ -1,158 +0,0 @@
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
// Helper function to format elapsed time
|
||||
function formatElapsedTime(elapsed) {
|
||||
// If elapsed is a timestamp, convert to elapsed milliseconds
|
||||
if (elapsed instanceof Date || elapsed > 1000000000000) {
|
||||
elapsed = Date.now() - elapsed;
|
||||
} else {
|
||||
// If elapsed is in seconds, convert to milliseconds
|
||||
elapsed = elapsed * 1000;
|
||||
}
|
||||
|
||||
const seconds = Math.floor(elapsed / 1000);
|
||||
const minutes = Math.floor(seconds / 60);
|
||||
const hours = Math.floor(minutes / 60);
|
||||
|
||||
if (hours > 0) {
|
||||
return `${hours}h ${minutes % 60}m`;
|
||||
} else if (minutes > 0) {
|
||||
return `${minutes}m ${seconds % 60}s`;
|
||||
} else {
|
||||
return `${seconds}s`;
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to estimate remaining time
|
||||
function estimateRemaining(startTime, current, total) {
|
||||
if (current === 0) return null;
|
||||
const elapsed = Date.now() - startTime;
|
||||
const rate = current / elapsed;
|
||||
const remaining = (total - current) / rate;
|
||||
|
||||
const minutes = Math.floor(remaining / 60000);
|
||||
const seconds = Math.floor((remaining % 60000) / 1000);
|
||||
|
||||
if (minutes > 0) {
|
||||
return `${minutes}m ${seconds}s`;
|
||||
} else {
|
||||
return `${seconds}s`;
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to calculate rate
|
||||
function calculateRate(startTime, current) {
|
||||
const elapsed = (Date.now() - startTime) / 1000; // Convert to seconds
|
||||
return elapsed > 0 ? Math.round(current / elapsed) : 0;
|
||||
}
|
||||
|
||||
// Set up logging
|
||||
const LOG_DIR = path.join(__dirname, '../../../logs');
|
||||
const ERROR_LOG = path.join(LOG_DIR, 'import-errors.log');
|
||||
const IMPORT_LOG = path.join(LOG_DIR, 'import.log');
|
||||
const STATUS_FILE = path.join(LOG_DIR, 'metrics-status.json');
|
||||
|
||||
// Ensure log directory exists
|
||||
if (!fs.existsSync(LOG_DIR)) {
|
||||
fs.mkdirSync(LOG_DIR, { recursive: true });
|
||||
}
|
||||
|
||||
// Helper function to log errors
|
||||
function logError(error, context = '') {
|
||||
const timestamp = new Date().toISOString();
|
||||
const errorMessage = `[${timestamp}] ${context}\nError: ${error.message}\nStack: ${error.stack}\n\n`;
|
||||
|
||||
// Log to error file
|
||||
fs.appendFileSync(ERROR_LOG, errorMessage);
|
||||
|
||||
// Also log to console
|
||||
console.error(`\n${context}\nError: ${error.message}`);
|
||||
}
|
||||
|
||||
// Helper function to log import progress
|
||||
function logImport(message) {
|
||||
const timestamp = new Date().toISOString();
|
||||
const logMessage = `[${timestamp}] ${message}\n`;
|
||||
fs.appendFileSync(IMPORT_LOG, logMessage);
|
||||
}
|
||||
|
||||
// Helper function to output progress
|
||||
function outputProgress(data) {
|
||||
// Save progress to file for resumption
|
||||
saveProgress(data);
|
||||
// Format as SSE event
|
||||
const event = {
|
||||
progress: data
|
||||
};
|
||||
// Always send to stdout for frontend
|
||||
process.stdout.write(JSON.stringify(event) + '\n');
|
||||
|
||||
// Log significant events to disk
|
||||
const isSignificant =
|
||||
// Operation starts
|
||||
(data.operation && !data.current) ||
|
||||
// Operation completions and errors
|
||||
data.status === 'complete' ||
|
||||
data.status === 'error' ||
|
||||
// Major phase changes
|
||||
data.operation?.includes('Starting ABC classification') ||
|
||||
data.operation?.includes('Starting time-based aggregates') ||
|
||||
data.operation?.includes('Starting vendor metrics');
|
||||
|
||||
if (isSignificant) {
|
||||
logImport(`${data.operation || 'Operation'}${data.message ? ': ' + data.message : ''}${data.error ? ' Error: ' + data.error : ''}${data.status ? ' Status: ' + data.status : ''}`);
|
||||
}
|
||||
}
|
||||
|
||||
function saveProgress(progress) {
|
||||
try {
|
||||
fs.writeFileSync(STATUS_FILE, JSON.stringify({
|
||||
...progress,
|
||||
timestamp: Date.now()
|
||||
}));
|
||||
} catch (err) {
|
||||
console.error('Failed to save progress:', err);
|
||||
}
|
||||
}
|
||||
|
||||
function clearProgress() {
|
||||
try {
|
||||
if (fs.existsSync(STATUS_FILE)) {
|
||||
fs.unlinkSync(STATUS_FILE);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Failed to clear progress:', err);
|
||||
}
|
||||
}
|
||||
|
||||
function getProgress() {
|
||||
try {
|
||||
if (fs.existsSync(STATUS_FILE)) {
|
||||
const progress = JSON.parse(fs.readFileSync(STATUS_FILE, 'utf8'));
|
||||
// Check if the progress is still valid (less than 1 hour old)
|
||||
if (progress.timestamp && Date.now() - progress.timestamp < 3600000) {
|
||||
return progress;
|
||||
} else {
|
||||
// Clear old progress
|
||||
clearProgress();
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Failed to read progress:', err);
|
||||
clearProgress();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
formatElapsedTime,
|
||||
estimateRemaining,
|
||||
calculateRate,
|
||||
logError,
|
||||
logImport,
|
||||
outputProgress,
|
||||
saveProgress,
|
||||
clearProgress,
|
||||
getProgress
|
||||
};
|
||||
@@ -1,378 +0,0 @@
|
||||
const { outputProgress, formatElapsedTime, estimateRemaining, calculateRate, logError } = require('./utils/progress');
|
||||
const { getConnection } = require('./utils/db');
|
||||
|
||||
async function calculateVendorMetrics(startTime, totalProducts, processedCount = 0, isCancelled = false) {
|
||||
const connection = await getConnection();
|
||||
let success = false;
|
||||
let processedOrders = 0;
|
||||
let processedPurchaseOrders = 0;
|
||||
|
||||
try {
|
||||
if (isCancelled) {
|
||||
outputProgress({
|
||||
status: 'cancelled',
|
||||
operation: 'Vendor metrics calculation cancelled',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: null,
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders,
|
||||
success
|
||||
};
|
||||
}
|
||||
|
||||
// Get counts of records that will be processed
|
||||
const [orderCountResult, poCountResult] = await Promise.all([
|
||||
connection.query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM orders o
|
||||
WHERE o.canceled = false
|
||||
`),
|
||||
connection.query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM purchase_orders po
|
||||
WHERE po.status != 0
|
||||
`)
|
||||
]);
|
||||
processedOrders = parseInt(orderCountResult.rows[0].count);
|
||||
processedPurchaseOrders = parseInt(poCountResult.rows[0].count);
|
||||
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting vendor metrics calculation',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// First ensure all vendors exist in vendor_details
|
||||
await connection.query(`
|
||||
INSERT INTO vendor_details (vendor, status, created_at, updated_at)
|
||||
SELECT DISTINCT
|
||||
vendor,
|
||||
'active' as status,
|
||||
NOW() as created_at,
|
||||
NOW() as updated_at
|
||||
FROM products
|
||||
WHERE vendor IS NOT NULL
|
||||
ON CONFLICT (vendor) DO NOTHING
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.8);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Vendor details updated, calculating metrics',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
if (isCancelled) return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders,
|
||||
success
|
||||
};
|
||||
|
||||
// Now calculate vendor metrics
|
||||
await connection.query(`
|
||||
INSERT INTO vendor_metrics (
|
||||
vendor,
|
||||
total_revenue,
|
||||
total_orders,
|
||||
total_late_orders,
|
||||
avg_lead_time_days,
|
||||
on_time_delivery_rate,
|
||||
order_fill_rate,
|
||||
avg_order_value,
|
||||
active_products,
|
||||
total_products,
|
||||
total_purchase_value,
|
||||
avg_margin_percent,
|
||||
status,
|
||||
last_calculated_at
|
||||
)
|
||||
WITH vendor_sales AS (
|
||||
SELECT
|
||||
p.vendor,
|
||||
SUM(o.quantity * o.price) as total_revenue,
|
||||
COUNT(DISTINCT o.id) as total_orders,
|
||||
COUNT(DISTINCT p.pid) as active_products,
|
||||
SUM(o.quantity * (o.price - p.cost_price)) as total_margin
|
||||
FROM products p
|
||||
JOIN orders o ON p.pid = o.pid
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '12 months'
|
||||
GROUP BY p.vendor
|
||||
),
|
||||
vendor_po AS (
|
||||
SELECT
|
||||
p.vendor,
|
||||
COUNT(DISTINCT CASE WHEN po.receiving_status = 40 THEN po.id END) as received_orders,
|
||||
COUNT(DISTINCT po.id) as total_orders,
|
||||
AVG(CASE
|
||||
WHEN po.receiving_status = 40
|
||||
AND po.received_date IS NOT NULL
|
||||
AND po.date IS NOT NULL
|
||||
THEN EXTRACT(EPOCH FROM (po.received_date::timestamp with time zone - po.date::timestamp with time zone)) / 86400.0
|
||||
ELSE NULL
|
||||
END) as avg_lead_time_days,
|
||||
SUM(po.ordered * po.po_cost_price) as total_purchase_value
|
||||
FROM products p
|
||||
JOIN purchase_orders po ON p.pid = po.pid
|
||||
WHERE po.date >= CURRENT_DATE - INTERVAL '12 months'
|
||||
GROUP BY p.vendor
|
||||
),
|
||||
vendor_products AS (
|
||||
SELECT
|
||||
vendor,
|
||||
COUNT(DISTINCT pid) as total_products
|
||||
FROM products
|
||||
GROUP BY vendor
|
||||
)
|
||||
SELECT
|
||||
vs.vendor,
|
||||
COALESCE(vs.total_revenue, 0) as total_revenue,
|
||||
COALESCE(vp.total_orders, 0) as total_orders,
|
||||
COALESCE(vp.total_orders - vp.received_orders, 0) as total_late_orders,
|
||||
COALESCE(vp.avg_lead_time_days, 0) as avg_lead_time_days,
|
||||
CASE
|
||||
WHEN vp.total_orders > 0
|
||||
THEN (vp.received_orders / vp.total_orders) * 100
|
||||
ELSE 0
|
||||
END as on_time_delivery_rate,
|
||||
CASE
|
||||
WHEN vp.total_orders > 0
|
||||
THEN (vp.received_orders / vp.total_orders) * 100
|
||||
ELSE 0
|
||||
END as order_fill_rate,
|
||||
CASE
|
||||
WHEN vs.total_orders > 0
|
||||
THEN vs.total_revenue / vs.total_orders
|
||||
ELSE 0
|
||||
END as avg_order_value,
|
||||
COALESCE(vs.active_products, 0) as active_products,
|
||||
COALESCE(vpr.total_products, 0) as total_products,
|
||||
COALESCE(vp.total_purchase_value, 0) as total_purchase_value,
|
||||
CASE
|
||||
WHEN vs.total_revenue > 0
|
||||
THEN (vs.total_margin / vs.total_revenue) * 100
|
||||
ELSE 0
|
||||
END as avg_margin_percent,
|
||||
'active' as status,
|
||||
NOW() as last_calculated_at
|
||||
FROM vendor_sales vs
|
||||
LEFT JOIN vendor_po vp ON vs.vendor = vp.vendor
|
||||
LEFT JOIN vendor_products vpr ON vs.vendor = vpr.vendor
|
||||
WHERE vs.vendor IS NOT NULL
|
||||
ON CONFLICT (vendor) DO UPDATE
|
||||
SET
|
||||
total_revenue = EXCLUDED.total_revenue,
|
||||
total_orders = EXCLUDED.total_orders,
|
||||
total_late_orders = EXCLUDED.total_late_orders,
|
||||
avg_lead_time_days = EXCLUDED.avg_lead_time_days,
|
||||
on_time_delivery_rate = EXCLUDED.on_time_delivery_rate,
|
||||
order_fill_rate = EXCLUDED.order_fill_rate,
|
||||
avg_order_value = EXCLUDED.avg_order_value,
|
||||
active_products = EXCLUDED.active_products,
|
||||
total_products = EXCLUDED.total_products,
|
||||
total_purchase_value = EXCLUDED.total_purchase_value,
|
||||
avg_margin_percent = EXCLUDED.avg_margin_percent,
|
||||
status = EXCLUDED.status,
|
||||
last_calculated_at = EXCLUDED.last_calculated_at
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.9);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Vendor metrics calculated, updating time-based metrics',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
if (isCancelled) return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders,
|
||||
success
|
||||
};
|
||||
|
||||
// Calculate time-based metrics
|
||||
await connection.query(`
|
||||
INSERT INTO vendor_time_metrics (
|
||||
vendor,
|
||||
year,
|
||||
month,
|
||||
total_orders,
|
||||
late_orders,
|
||||
avg_lead_time_days,
|
||||
total_purchase_value,
|
||||
total_revenue,
|
||||
avg_margin_percent
|
||||
)
|
||||
WITH monthly_orders AS (
|
||||
SELECT
|
||||
p.vendor,
|
||||
EXTRACT(YEAR FROM o.date::timestamp with time zone) as year,
|
||||
EXTRACT(MONTH FROM o.date::timestamp with time zone) as month,
|
||||
COUNT(DISTINCT o.id) as total_orders,
|
||||
SUM(o.quantity * o.price) as total_revenue,
|
||||
SUM(o.quantity * (o.price - p.cost_price)) as total_margin
|
||||
FROM products p
|
||||
JOIN orders o ON p.pid = o.pid
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '12 months'
|
||||
AND p.vendor IS NOT NULL
|
||||
GROUP BY p.vendor, EXTRACT(YEAR FROM o.date::timestamp with time zone), EXTRACT(MONTH FROM o.date::timestamp with time zone)
|
||||
),
|
||||
monthly_po AS (
|
||||
SELECT
|
||||
p.vendor,
|
||||
EXTRACT(YEAR FROM po.date::timestamp with time zone) as year,
|
||||
EXTRACT(MONTH FROM po.date::timestamp with time zone) as month,
|
||||
COUNT(DISTINCT po.id) as total_po,
|
||||
COUNT(DISTINCT CASE
|
||||
WHEN po.receiving_status = 40 AND po.received_date > po.expected_date
|
||||
THEN po.id
|
||||
END) as late_orders,
|
||||
AVG(CASE
|
||||
WHEN po.receiving_status = 40
|
||||
AND po.received_date IS NOT NULL
|
||||
AND po.date IS NOT NULL
|
||||
THEN EXTRACT(EPOCH FROM (po.received_date::timestamp with time zone - po.date::timestamp with time zone)) / 86400.0
|
||||
ELSE NULL
|
||||
END) as avg_lead_time_days,
|
||||
SUM(po.ordered * po.po_cost_price) as total_purchase_value
|
||||
FROM products p
|
||||
JOIN purchase_orders po ON p.pid = po.pid
|
||||
WHERE po.date >= CURRENT_DATE - INTERVAL '12 months'
|
||||
AND p.vendor IS NOT NULL
|
||||
GROUP BY p.vendor, EXTRACT(YEAR FROM po.date::timestamp with time zone), EXTRACT(MONTH FROM po.date::timestamp with time zone)
|
||||
)
|
||||
SELECT
|
||||
mo.vendor,
|
||||
mo.year,
|
||||
mo.month,
|
||||
COALESCE(mp.total_po, 0) as total_orders,
|
||||
COALESCE(mp.late_orders, 0) as late_orders,
|
||||
COALESCE(mp.avg_lead_time_days, 0) as avg_lead_time_days,
|
||||
COALESCE(mp.total_purchase_value, 0) as total_purchase_value,
|
||||
mo.total_revenue,
|
||||
CASE
|
||||
WHEN mo.total_revenue > 0
|
||||
THEN (mo.total_margin / mo.total_revenue) * 100
|
||||
ELSE 0
|
||||
END as avg_margin_percent
|
||||
FROM monthly_orders mo
|
||||
LEFT JOIN monthly_po mp ON mo.vendor = mp.vendor
|
||||
AND mo.year = mp.year
|
||||
AND mo.month = mp.month
|
||||
UNION
|
||||
SELECT
|
||||
mp.vendor,
|
||||
mp.year,
|
||||
mp.month,
|
||||
mp.total_po as total_orders,
|
||||
mp.late_orders,
|
||||
mp.avg_lead_time_days,
|
||||
mp.total_purchase_value,
|
||||
0 as total_revenue,
|
||||
0 as avg_margin_percent
|
||||
FROM monthly_po mp
|
||||
LEFT JOIN monthly_orders mo ON mp.vendor = mo.vendor
|
||||
AND mp.year = mo.year
|
||||
AND mp.month = mo.month
|
||||
WHERE mo.vendor IS NULL
|
||||
ON CONFLICT (vendor, year, month) DO UPDATE
|
||||
SET
|
||||
total_orders = EXCLUDED.total_orders,
|
||||
late_orders = EXCLUDED.late_orders,
|
||||
avg_lead_time_days = EXCLUDED.avg_lead_time_days,
|
||||
total_purchase_value = EXCLUDED.total_purchase_value,
|
||||
total_revenue = EXCLUDED.total_revenue,
|
||||
avg_margin_percent = EXCLUDED.avg_margin_percent
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.95);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Time-based vendor metrics calculated',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// If we get here, everything completed successfully
|
||||
success = true;
|
||||
|
||||
// Update calculate_status
|
||||
await connection.query(`
|
||||
INSERT INTO calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES ('vendor_metrics', NOW())
|
||||
ON CONFLICT (module_name) DO UPDATE
|
||||
SET last_calculation_timestamp = NOW()
|
||||
`);
|
||||
|
||||
return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders,
|
||||
success
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
success = false;
|
||||
logError(error, 'Error calculating vendor metrics');
|
||||
throw error;
|
||||
} finally {
|
||||
if (connection) {
|
||||
connection.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = calculateVendorMetrics;
|
||||
428
inventory-server/scripts/psql-csv-import.sh
Executable file
428
inventory-server/scripts/psql-csv-import.sh
Executable file
@@ -0,0 +1,428 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Simple script to import CSV to PostgreSQL using psql
|
||||
# Usage: ./psql-csv-import.sh <csv-file> <table-name> [start-batch]
|
||||
|
||||
# Exit on error
|
||||
set -e
|
||||
|
||||
# Get arguments
|
||||
CSV_FILE=$1
|
||||
TABLE_NAME=$2
|
||||
BATCH_SIZE=500000 # Process 500,000 rows at a time
|
||||
START_BATCH=${3:-1} # Optional third parameter to start from a specific batch
|
||||
|
||||
if [ -z "$CSV_FILE" ] || [ -z "$TABLE_NAME" ]; then
|
||||
echo "Usage: ./psql-csv-import.sh <csv-file> <table-name> [start-batch]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if file exists (only needed for batch 1)
|
||||
if [ "$START_BATCH" -eq 1 ] && [ ! -f "$CSV_FILE" ]; then
|
||||
echo "Error: CSV file '$CSV_FILE' not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Load environment variables
|
||||
if [ -f "../.env" ]; then
|
||||
source "../.env"
|
||||
else
|
||||
echo "Warning: .env file not found, using default connection parameters"
|
||||
fi
|
||||
|
||||
# Set default connection parameters if not from .env
|
||||
DB_HOST=${DB_HOST:-localhost}
|
||||
DB_PORT=${DB_PORT:-5432}
|
||||
DB_NAME=${DB_NAME:-inventory_db}
|
||||
DB_USER=${DB_USER:-postgres}
|
||||
export PGPASSWORD=${DB_PASSWORD:-} # Export password for psql
|
||||
|
||||
# Common psql parameters
|
||||
PSQL_OPTS="-h $DB_HOST -p $DB_PORT -U $DB_USER -d $DB_NAME"
|
||||
|
||||
# Function to clean up database state
|
||||
cleanup_and_optimize() {
|
||||
echo "Cleaning up and optimizing database state..."
|
||||
|
||||
# Analyze the target table to update statistics
|
||||
psql $PSQL_OPTS -c "ANALYZE $TABLE_NAME;"
|
||||
|
||||
# Perform vacuum to reclaim space and update stats
|
||||
psql $PSQL_OPTS -c "VACUUM $TABLE_NAME;"
|
||||
|
||||
# Reset connection pool
|
||||
psql $PSQL_OPTS -c "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = current_database() AND pid <> pg_backend_pid();"
|
||||
|
||||
# Clean up shared memory
|
||||
psql $PSQL_OPTS -c "DISCARD ALL;"
|
||||
|
||||
echo "Optimization complete."
|
||||
}
|
||||
|
||||
# Show connection info
|
||||
echo "Importing $CSV_FILE into $TABLE_NAME"
|
||||
echo "Database: $DB_NAME on $DB_HOST:$DB_PORT with batch size: $BATCH_SIZE starting at batch $START_BATCH"
|
||||
|
||||
# Start timer
|
||||
START_TIME=$(date +%s)
|
||||
|
||||
# Create progress tracking file
|
||||
PROGRESS_FILE="/tmp/import_progress_${TABLE_NAME}.txt"
|
||||
touch "$PROGRESS_FILE"
|
||||
echo "Starting import at $(date), batch $START_BATCH" >> "$PROGRESS_FILE"
|
||||
|
||||
# If we're resuming, run cleanup first
|
||||
if [ "$START_BATCH" -gt 1 ]; then
|
||||
cleanup_and_optimize
|
||||
fi
|
||||
|
||||
# For imported_product_stat_history, use optimized approach with hardcoded column names
|
||||
if [ "$TABLE_NAME" = "imported_product_stat_history" ]; then
|
||||
echo "Using optimized import for $TABLE_NAME"
|
||||
|
||||
# Only drop constraints/indexes and create staging table for batch 1
|
||||
if [ "$START_BATCH" -eq 1 ]; then
|
||||
# Extract CSV header
|
||||
CSV_HEADER=$(head -n 1 "$CSV_FILE")
|
||||
echo "CSV header: $CSV_HEADER"
|
||||
|
||||
# Step 1: Drop constraints and indexes
|
||||
echo "Dropping constraints and indexes..."
|
||||
psql $PSQL_OPTS -c "
|
||||
DO \$\$
|
||||
DECLARE
|
||||
constraint_name TEXT;
|
||||
BEGIN
|
||||
-- Drop primary key constraint if exists
|
||||
SELECT conname INTO constraint_name
|
||||
FROM pg_constraint
|
||||
WHERE conrelid = '$TABLE_NAME'::regclass AND contype = 'p';
|
||||
|
||||
IF FOUND THEN
|
||||
EXECUTE 'ALTER TABLE $TABLE_NAME DROP CONSTRAINT IF EXISTS ' || constraint_name;
|
||||
RAISE NOTICE 'Dropped primary key constraint: %', constraint_name;
|
||||
END IF;
|
||||
END \$\$;
|
||||
"
|
||||
|
||||
# Drop all indexes on the table
|
||||
psql $PSQL_OPTS -c "
|
||||
DO \$\$
|
||||
DECLARE
|
||||
index_name TEXT;
|
||||
index_record RECORD;
|
||||
BEGIN
|
||||
FOR index_record IN
|
||||
SELECT indexname
|
||||
FROM pg_indexes
|
||||
WHERE tablename = '$TABLE_NAME'
|
||||
LOOP
|
||||
EXECUTE 'DROP INDEX IF EXISTS ' || index_record.indexname;
|
||||
RAISE NOTICE 'Dropped index: %', index_record.indexname;
|
||||
END LOOP;
|
||||
END \$\$;
|
||||
"
|
||||
|
||||
# Step 2: Set maintenance_work_mem and disable triggers
|
||||
echo "Setting maintenance_work_mem and disabling triggers..."
|
||||
psql $PSQL_OPTS -c "
|
||||
SET maintenance_work_mem = '1GB';
|
||||
ALTER TABLE $TABLE_NAME DISABLE TRIGGER ALL;
|
||||
"
|
||||
|
||||
# Step 3: Create staging table
|
||||
echo "Creating staging table..."
|
||||
psql $PSQL_OPTS -c "
|
||||
DROP TABLE IF EXISTS staging_import;
|
||||
CREATE UNLOGGED TABLE staging_import (
|
||||
pid TEXT,
|
||||
date TEXT,
|
||||
score TEXT,
|
||||
score2 TEXT,
|
||||
qty_in_baskets TEXT,
|
||||
qty_sold TEXT,
|
||||
notifies_set TEXT,
|
||||
visibility_score TEXT,
|
||||
health_score TEXT,
|
||||
sold_view_score TEXT
|
||||
);
|
||||
|
||||
-- Create an index on staging_import to improve OFFSET performance
|
||||
CREATE INDEX ON staging_import (pid);
|
||||
"
|
||||
|
||||
# Step 4: Import CSV into staging table
|
||||
echo "Importing CSV into staging table..."
|
||||
psql $PSQL_OPTS -c "\copy staging_import FROM '$CSV_FILE' WITH CSV HEADER DELIMITER ','"
|
||||
else
|
||||
echo "Resuming import from batch $START_BATCH - skipping table creation and CSV import"
|
||||
|
||||
# Check if staging table exists
|
||||
STAGING_EXISTS=$(psql $PSQL_OPTS -t -c "SELECT EXISTS(SELECT 1 FROM pg_tables WHERE tablename='staging_import');" | tr -d '[:space:]')
|
||||
|
||||
if [ "$STAGING_EXISTS" != "t" ]; then
|
||||
echo "Error: Staging table 'staging_import' does not exist. Run without batch parameter first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ensure triggers are disabled
|
||||
psql $PSQL_OPTS -c "ALTER TABLE $TABLE_NAME DISABLE TRIGGER ALL;"
|
||||
|
||||
# Optimize PostgreSQL for better performance
|
||||
psql $PSQL_OPTS -c "
|
||||
-- Increase work mem for this session
|
||||
SET work_mem = '256MB';
|
||||
SET maintenance_work_mem = '1GB';
|
||||
"
|
||||
fi
|
||||
|
||||
# Step 5: Get total row count
|
||||
TOTAL_ROWS=$(psql $PSQL_OPTS -t -c "SELECT COUNT(*) FROM staging_import;" | tr -d '[:space:]')
|
||||
echo "Total rows to import: $TOTAL_ROWS"
|
||||
|
||||
# Calculate starting point
|
||||
PROCESSED=$(( ($START_BATCH - 1) * $BATCH_SIZE ))
|
||||
if [ $PROCESSED -ge $TOTAL_ROWS ]; then
|
||||
echo "Error: Start batch $START_BATCH is beyond the available rows ($TOTAL_ROWS)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Step 6: Process in batches with shell loop
|
||||
BATCH_NUM=$(( $START_BATCH - 1 ))
|
||||
|
||||
# We'll process batches in chunks of 10 before cleaning up
|
||||
CHUNKS_SINCE_CLEANUP=0
|
||||
|
||||
while [ $PROCESSED -lt $TOTAL_ROWS ]; do
|
||||
BATCH_NUM=$(( $BATCH_NUM + 1 ))
|
||||
BATCH_START=$(date +%s)
|
||||
MAX_ROWS=$(( $PROCESSED + $BATCH_SIZE ))
|
||||
if [ $MAX_ROWS -gt $TOTAL_ROWS ]; then
|
||||
MAX_ROWS=$TOTAL_ROWS
|
||||
fi
|
||||
|
||||
echo "Processing batch $BATCH_NUM (rows $PROCESSED to $MAX_ROWS)..."
|
||||
|
||||
# Optimize query buffer for this batch
|
||||
psql $PSQL_OPTS -c "SET work_mem = '256MB';"
|
||||
|
||||
# Insert batch with type casts
|
||||
psql $PSQL_OPTS -c "
|
||||
INSERT INTO $TABLE_NAME (
|
||||
pid, date, score, score2, qty_in_baskets, qty_sold,
|
||||
notifies_set, visibility_score, health_score, sold_view_score
|
||||
)
|
||||
SELECT
|
||||
pid::bigint,
|
||||
date::date,
|
||||
score::numeric,
|
||||
score2::numeric,
|
||||
qty_in_baskets::smallint,
|
||||
qty_sold::smallint,
|
||||
notifies_set::smallint,
|
||||
visibility_score::numeric,
|
||||
health_score::varchar,
|
||||
sold_view_score::numeric
|
||||
FROM staging_import
|
||||
LIMIT $BATCH_SIZE
|
||||
OFFSET $PROCESSED;
|
||||
"
|
||||
|
||||
# Update progress
|
||||
BATCH_END=$(date +%s)
|
||||
BATCH_ELAPSED=$(( $BATCH_END - $BATCH_START ))
|
||||
PROGRESS_PCT=$(echo "scale=2; $MAX_ROWS * 100 / $TOTAL_ROWS" | bc)
|
||||
|
||||
echo "Batch $BATCH_NUM committed in ${BATCH_ELAPSED}s, $MAX_ROWS of $TOTAL_ROWS rows processed ($PROGRESS_PCT%)" | tee -a "$PROGRESS_FILE"
|
||||
|
||||
# Increment counter
|
||||
PROCESSED=$(( $PROCESSED + $BATCH_SIZE ))
|
||||
CHUNKS_SINCE_CLEANUP=$(( $CHUNKS_SINCE_CLEANUP + 1 ))
|
||||
|
||||
# Check current row count every 10 batches
|
||||
if [ $(( $BATCH_NUM % 10 )) -eq 0 ]; then
|
||||
CURRENT_COUNT=$(psql $PSQL_OPTS -t -c "SELECT COUNT(*) FROM $TABLE_NAME;" | tr -d '[:space:]')
|
||||
echo "Current row count in $TABLE_NAME: $CURRENT_COUNT" | tee -a "$PROGRESS_FILE"
|
||||
|
||||
# Every 10 batches, run an intermediate cleanup
|
||||
if [ $CHUNKS_SINCE_CLEANUP -ge 10 ]; then
|
||||
echo "Running intermediate cleanup and optimization..."
|
||||
psql $PSQL_OPTS -c "VACUUM $TABLE_NAME;"
|
||||
CHUNKS_SINCE_CLEANUP=0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Optional - write a checkpoint file to know where to restart
|
||||
echo "$BATCH_NUM" > "/tmp/import_last_batch_${TABLE_NAME}.txt"
|
||||
done
|
||||
|
||||
# Only recreate indexes if we've completed the import
|
||||
if [ $PROCESSED -ge $TOTAL_ROWS ]; then
|
||||
# Step 7: Re-enable triggers and recreate primary key
|
||||
echo "Re-enabling triggers and recreating primary key..."
|
||||
psql $PSQL_OPTS -c "
|
||||
ALTER TABLE $TABLE_NAME ENABLE TRIGGER ALL;
|
||||
ALTER TABLE $TABLE_NAME ADD PRIMARY KEY (pid, date);
|
||||
"
|
||||
|
||||
# Step 8: Clean up and get final count
|
||||
echo "Cleaning up and getting final count..."
|
||||
psql $PSQL_OPTS -c "
|
||||
DROP TABLE staging_import;
|
||||
VACUUM ANALYZE $TABLE_NAME;
|
||||
SELECT COUNT(*) AS \"Total rows in $TABLE_NAME\" FROM $TABLE_NAME;
|
||||
"
|
||||
else
|
||||
echo "Import interrupted at batch $BATCH_NUM. To resume, run:"
|
||||
echo "./psql-csv-import.sh $CSV_FILE $TABLE_NAME $BATCH_NUM"
|
||||
fi
|
||||
|
||||
else
|
||||
# Generic approach for other tables
|
||||
if [ "$START_BATCH" -eq 1 ]; then
|
||||
# Extract CSV header
|
||||
CSV_HEADER=$(head -n 1 "$CSV_FILE")
|
||||
echo "CSV header: $CSV_HEADER"
|
||||
|
||||
# Extract CSV header and format it for SQL
|
||||
CSV_COLUMNS=$(echo "$CSV_HEADER" | tr ',' '\n' | sed 's/^/"/;s/$/"/' | tr '\n' ',' | sed 's/,$//')
|
||||
TEMP_COLUMNS=$(echo "$CSV_HEADER" | tr ',' '\n' | sed 's/$/ TEXT/' | tr '\n' ',' | sed 's/,$//')
|
||||
|
||||
echo "Importing columns: $CSV_COLUMNS"
|
||||
|
||||
# Step 1: Set maintenance_work_mem and disable triggers
|
||||
echo "Setting maintenance_work_mem and disabling triggers..."
|
||||
psql $PSQL_OPTS -c "
|
||||
SET maintenance_work_mem = '1GB';
|
||||
ALTER TABLE $TABLE_NAME DISABLE TRIGGER ALL;
|
||||
"
|
||||
|
||||
# Step 2: Create temp table
|
||||
echo "Creating temporary table..."
|
||||
psql $PSQL_OPTS -c "
|
||||
DROP TABLE IF EXISTS temp_import;
|
||||
CREATE UNLOGGED TABLE temp_import ($TEMP_COLUMNS);
|
||||
|
||||
-- Create an index on temp_import to improve OFFSET performance
|
||||
CREATE INDEX ON temp_import ((1)); -- Index on first column
|
||||
"
|
||||
|
||||
# Step 3: Import CSV into temp table
|
||||
echo "Importing CSV into temporary table..."
|
||||
psql $PSQL_OPTS -c "\copy temp_import FROM '$CSV_FILE' WITH CSV HEADER DELIMITER ','"
|
||||
else
|
||||
echo "Resuming import from batch $START_BATCH - skipping table creation and CSV import"
|
||||
|
||||
# Check if temp table exists
|
||||
TEMP_EXISTS=$(psql $PSQL_OPTS -t -c "SELECT EXISTS(SELECT 1 FROM pg_tables WHERE tablename='temp_import');" | tr -d '[:space:]')
|
||||
|
||||
if [ "$TEMP_EXISTS" != "t" ]; then
|
||||
echo "Error: Temporary table 'temp_import' does not exist. Run without batch parameter first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ensure triggers are disabled
|
||||
psql $PSQL_OPTS -c "ALTER TABLE $TABLE_NAME DISABLE TRIGGER ALL;"
|
||||
|
||||
# Optimize PostgreSQL for better performance
|
||||
psql $PSQL_OPTS -c "
|
||||
-- Increase work mem for this session
|
||||
SET work_mem = '256MB';
|
||||
SET maintenance_work_mem = '1GB';
|
||||
"
|
||||
|
||||
# Hard-code columns since we know them
|
||||
CSV_COLUMNS='"pid","date","score","score2","qty_in_baskets","qty_sold","notifies_set","visibility_score","health_score","sold_view_score"'
|
||||
|
||||
echo "Using standard columns: $CSV_COLUMNS"
|
||||
fi
|
||||
|
||||
# Step 4: Get total row count
|
||||
TOTAL_ROWS=$(psql $PSQL_OPTS -t -c "SELECT COUNT(*) FROM temp_import;" | tr -d '[:space:]')
|
||||
echo "Total rows to import: $TOTAL_ROWS"
|
||||
|
||||
# Calculate starting point
|
||||
PROCESSED=$(( ($START_BATCH - 1) * $BATCH_SIZE ))
|
||||
if [ $PROCESSED -ge $TOTAL_ROWS ]; then
|
||||
echo "Error: Start batch $START_BATCH is beyond the available rows ($TOTAL_ROWS)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Step 5: Process in batches with shell loop
|
||||
BATCH_NUM=$(( $START_BATCH - 1 ))
|
||||
|
||||
# We'll process batches in chunks of 10 before cleaning up
|
||||
CHUNKS_SINCE_CLEANUP=0
|
||||
|
||||
while [ $PROCESSED -lt $TOTAL_ROWS ]; do
|
||||
BATCH_NUM=$(( $BATCH_NUM + 1 ))
|
||||
BATCH_START=$(date +%s)
|
||||
MAX_ROWS=$(( $PROCESSED + $BATCH_SIZE ))
|
||||
if [ $MAX_ROWS -gt $TOTAL_ROWS ]; then
|
||||
MAX_ROWS=$TOTAL_ROWS
|
||||
fi
|
||||
|
||||
echo "Processing batch $BATCH_NUM (rows $PROCESSED to $MAX_ROWS)..."
|
||||
|
||||
# Optimize query buffer for this batch
|
||||
psql $PSQL_OPTS -c "SET work_mem = '256MB';"
|
||||
|
||||
# Insert batch
|
||||
psql $PSQL_OPTS -c "
|
||||
INSERT INTO $TABLE_NAME ($CSV_COLUMNS)
|
||||
SELECT $CSV_COLUMNS
|
||||
FROM temp_import
|
||||
LIMIT $BATCH_SIZE
|
||||
OFFSET $PROCESSED;
|
||||
"
|
||||
|
||||
# Update progress
|
||||
BATCH_END=$(date +%s)
|
||||
BATCH_ELAPSED=$(( $BATCH_END - $BATCH_START ))
|
||||
PROGRESS_PCT=$(echo "scale=2; $MAX_ROWS * 100 / $TOTAL_ROWS" | bc)
|
||||
|
||||
echo "Batch $BATCH_NUM committed in ${BATCH_ELAPSED}s, $MAX_ROWS of $TOTAL_ROWS rows processed ($PROGRESS_PCT%)" | tee -a "$PROGRESS_FILE"
|
||||
|
||||
# Increment counter
|
||||
PROCESSED=$(( $PROCESSED + $BATCH_SIZE ))
|
||||
CHUNKS_SINCE_CLEANUP=$(( $CHUNKS_SINCE_CLEANUP + 1 ))
|
||||
|
||||
# Check current row count every 10 batches
|
||||
if [ $(( $BATCH_NUM % 10 )) -eq 0 ]; then
|
||||
CURRENT_COUNT=$(psql $PSQL_OPTS -t -c "SELECT COUNT(*) FROM $TABLE_NAME;" | tr -d '[:space:]')
|
||||
echo "Current row count in $TABLE_NAME: $CURRENT_COUNT" | tee -a "$PROGRESS_FILE"
|
||||
|
||||
# Every 10 batches, run an intermediate cleanup
|
||||
if [ $CHUNKS_SINCE_CLEANUP -ge 10 ]; then
|
||||
echo "Running intermediate cleanup and optimization..."
|
||||
psql $PSQL_OPTS -c "VACUUM $TABLE_NAME;"
|
||||
CHUNKS_SINCE_CLEANUP=0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Optional - write a checkpoint file to know where to restart
|
||||
echo "$BATCH_NUM" > "/tmp/import_last_batch_${TABLE_NAME}.txt"
|
||||
done
|
||||
|
||||
# Only clean up if we've completed the import
|
||||
if [ $PROCESSED -ge $TOTAL_ROWS ]; then
|
||||
# Step 6: Re-enable triggers and clean up
|
||||
echo "Re-enabling triggers and cleaning up..."
|
||||
psql $PSQL_OPTS -c "
|
||||
ALTER TABLE $TABLE_NAME ENABLE TRIGGER ALL;
|
||||
DROP TABLE temp_import;
|
||||
VACUUM ANALYZE $TABLE_NAME;
|
||||
SELECT COUNT(*) AS \"Total rows in $TABLE_NAME\" FROM $TABLE_NAME;
|
||||
"
|
||||
else
|
||||
echo "Import interrupted at batch $BATCH_NUM. To resume, run:"
|
||||
echo "./psql-csv-import.sh $CSV_FILE $TABLE_NAME $BATCH_NUM"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Calculate elapsed time
|
||||
END_TIME=$(date +%s)
|
||||
ELAPSED=$((END_TIME - START_TIME))
|
||||
|
||||
echo "Import completed successfully in ${ELAPSED}s ($(($ELAPSED / 60)) minutes)"
|
||||
echo "Progress log saved to $PROGRESS_FILE"
|
||||
@@ -1,378 +0,0 @@
|
||||
const { Client } = require('pg');
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
require('dotenv').config({ path: path.resolve(__dirname, '../.env') });
|
||||
|
||||
const dbConfig = {
|
||||
host: process.env.DB_HOST,
|
||||
user: process.env.DB_USER,
|
||||
password: process.env.DB_PASSWORD,
|
||||
database: process.env.DB_NAME,
|
||||
port: process.env.DB_PORT || 5432
|
||||
};
|
||||
|
||||
function outputProgress(data) {
|
||||
if (!data.status) {
|
||||
data = {
|
||||
status: 'running',
|
||||
...data
|
||||
};
|
||||
}
|
||||
console.log(JSON.stringify(data));
|
||||
}
|
||||
|
||||
// Explicitly define all metrics-related tables in dependency order
|
||||
const METRICS_TABLES = [
|
||||
'brand_metrics',
|
||||
'brand_time_metrics',
|
||||
'category_forecasts',
|
||||
'category_metrics',
|
||||
'category_sales_metrics',
|
||||
'category_time_metrics',
|
||||
'product_metrics',
|
||||
'product_time_aggregates',
|
||||
'sales_forecasts',
|
||||
'temp_purchase_metrics',
|
||||
'temp_sales_metrics',
|
||||
'vendor_metrics',
|
||||
'vendor_time_metrics',
|
||||
'vendor_details'
|
||||
];
|
||||
|
||||
// Tables to always protect from being dropped
|
||||
const PROTECTED_TABLES = [
|
||||
'users',
|
||||
'permissions',
|
||||
'user_permissions',
|
||||
'calculate_history',
|
||||
'import_history',
|
||||
'ai_prompts',
|
||||
'ai_validation_performance',
|
||||
'templates',
|
||||
'reusable_images'
|
||||
];
|
||||
|
||||
// Split SQL into individual statements
|
||||
function splitSQLStatements(sql) {
|
||||
sql = sql.replace(/\r\n/g, '\n');
|
||||
let statements = [];
|
||||
let currentStatement = '';
|
||||
let inString = false;
|
||||
let stringChar = '';
|
||||
|
||||
for (let i = 0; i < sql.length; i++) {
|
||||
const char = sql[i];
|
||||
const nextChar = sql[i + 1] || '';
|
||||
|
||||
if ((char === "'" || char === '"') && sql[i - 1] !== '\\') {
|
||||
if (!inString) {
|
||||
inString = true;
|
||||
stringChar = char;
|
||||
} else if (char === stringChar) {
|
||||
inString = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!inString && char === '-' && nextChar === '-') {
|
||||
while (i < sql.length && sql[i] !== '\n') i++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!inString && char === '/' && nextChar === '*') {
|
||||
i += 2;
|
||||
while (i < sql.length && (sql[i] !== '*' || sql[i + 1] !== '/')) i++;
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!inString && char === ';') {
|
||||
if (currentStatement.trim()) {
|
||||
statements.push(currentStatement.trim());
|
||||
}
|
||||
currentStatement = '';
|
||||
} else {
|
||||
currentStatement += char;
|
||||
}
|
||||
}
|
||||
|
||||
if (currentStatement.trim()) {
|
||||
statements.push(currentStatement.trim());
|
||||
}
|
||||
|
||||
return statements;
|
||||
}
|
||||
|
||||
async function resetMetrics() {
|
||||
let client;
|
||||
try {
|
||||
outputProgress({
|
||||
operation: 'Starting metrics reset',
|
||||
message: 'Connecting to database...'
|
||||
});
|
||||
|
||||
client = new Client(dbConfig);
|
||||
await client.connect();
|
||||
|
||||
// Explicitly begin a transaction
|
||||
await client.query('BEGIN');
|
||||
|
||||
// First verify current state
|
||||
const initialTables = await client.query(`
|
||||
SELECT tablename as name
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename = ANY($1)
|
||||
AND tablename NOT IN (SELECT unnest($2::text[]))
|
||||
`, [METRICS_TABLES, PROTECTED_TABLES]);
|
||||
|
||||
outputProgress({
|
||||
operation: 'Initial state',
|
||||
message: `Found ${initialTables.rows.length} existing metrics tables: ${initialTables.rows.map(t => t.name).join(', ')}`
|
||||
});
|
||||
|
||||
// Disable foreign key checks at the start
|
||||
await client.query('SET session_replication_role = \'replica\'');
|
||||
|
||||
// Drop all metrics tables in reverse order to handle dependencies
|
||||
outputProgress({
|
||||
operation: 'Dropping metrics tables',
|
||||
message: 'Removing existing metrics tables...'
|
||||
});
|
||||
|
||||
for (const table of [...METRICS_TABLES].reverse()) {
|
||||
// Skip protected tables
|
||||
if (PROTECTED_TABLES.includes(table)) {
|
||||
outputProgress({
|
||||
operation: 'Protected table',
|
||||
message: `Skipping protected table: ${table}`
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
// Use NOWAIT to avoid hanging if there's a lock
|
||||
await client.query(`DROP TABLE IF EXISTS "${table}" CASCADE`);
|
||||
|
||||
// Verify the table was actually dropped
|
||||
const checkDrop = await client.query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename = $1
|
||||
`, [table]);
|
||||
|
||||
if (parseInt(checkDrop.rows[0].count) > 0) {
|
||||
throw new Error(`Failed to drop table ${table} - table still exists`);
|
||||
}
|
||||
|
||||
outputProgress({
|
||||
operation: 'Table dropped',
|
||||
message: `Successfully dropped table: ${table}`
|
||||
});
|
||||
|
||||
// Commit after each table drop to ensure locks are released
|
||||
await client.query('COMMIT');
|
||||
// Start a new transaction for the next table
|
||||
await client.query('BEGIN');
|
||||
// Re-disable foreign key constraints for the new transaction
|
||||
await client.query('SET session_replication_role = \'replica\'');
|
||||
} catch (err) {
|
||||
outputProgress({
|
||||
status: 'error',
|
||||
operation: 'Drop table error',
|
||||
message: `Error dropping table ${table}: ${err.message}`
|
||||
});
|
||||
await client.query('ROLLBACK');
|
||||
// Re-start transaction for next table
|
||||
await client.query('BEGIN');
|
||||
await client.query('SET session_replication_role = \'replica\'');
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all tables were dropped
|
||||
const afterDrop = await client.query(`
|
||||
SELECT tablename as name
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename = ANY($1)
|
||||
`, [METRICS_TABLES]);
|
||||
|
||||
if (afterDrop.rows.length > 0) {
|
||||
throw new Error(`Failed to drop all tables. Remaining tables: ${afterDrop.rows.map(t => t.name).join(', ')}`);
|
||||
}
|
||||
|
||||
// Make sure we have a fresh transaction here
|
||||
await client.query('COMMIT');
|
||||
await client.query('BEGIN');
|
||||
await client.query('SET session_replication_role = \'replica\'');
|
||||
|
||||
// Read metrics schema
|
||||
outputProgress({
|
||||
operation: 'Reading schema',
|
||||
message: 'Loading metrics schema file...'
|
||||
});
|
||||
|
||||
const schemaPath = path.resolve(__dirname, '../db/metrics-schema.sql');
|
||||
if (!fs.existsSync(schemaPath)) {
|
||||
throw new Error(`Schema file not found at: ${schemaPath}`);
|
||||
}
|
||||
|
||||
const schemaSQL = fs.readFileSync(schemaPath, 'utf8');
|
||||
const statements = splitSQLStatements(schemaSQL);
|
||||
|
||||
outputProgress({
|
||||
operation: 'Schema loaded',
|
||||
message: `Found ${statements.length} SQL statements to execute`
|
||||
});
|
||||
|
||||
// Execute schema statements
|
||||
for (let i = 0; i < statements.length; i++) {
|
||||
const stmt = statements[i];
|
||||
try {
|
||||
const result = await client.query(stmt);
|
||||
|
||||
// If this is a CREATE TABLE statement, verify the table was created
|
||||
if (stmt.trim().toLowerCase().startsWith('create table')) {
|
||||
const tableName = stmt.match(/create\s+table\s+(?:if\s+not\s+exists\s+)?["]?(\w+)["]?/i)?.[1];
|
||||
if (tableName) {
|
||||
const checkCreate = await client.query(`
|
||||
SELECT tablename as name
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename = $1
|
||||
`, [tableName]);
|
||||
|
||||
if (checkCreate.rows.length === 0) {
|
||||
throw new Error(`Failed to create table ${tableName} - table does not exist after CREATE statement`);
|
||||
}
|
||||
|
||||
outputProgress({
|
||||
operation: 'Table created',
|
||||
message: `Successfully created table: ${tableName}`
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
outputProgress({
|
||||
operation: 'SQL Progress',
|
||||
message: {
|
||||
statement: i + 1,
|
||||
total: statements.length,
|
||||
preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : ''),
|
||||
rowCount: result.rowCount
|
||||
}
|
||||
});
|
||||
|
||||
// Commit every 10 statements to avoid long-running transactions
|
||||
if (i > 0 && i % 10 === 0) {
|
||||
await client.query('COMMIT');
|
||||
await client.query('BEGIN');
|
||||
await client.query('SET session_replication_role = \'replica\'');
|
||||
}
|
||||
} catch (sqlError) {
|
||||
outputProgress({
|
||||
status: 'error',
|
||||
operation: 'SQL Error',
|
||||
message: {
|
||||
error: sqlError.message,
|
||||
statement: stmt,
|
||||
statementNumber: i + 1
|
||||
}
|
||||
});
|
||||
await client.query('ROLLBACK');
|
||||
throw sqlError;
|
||||
}
|
||||
}
|
||||
|
||||
// Final commit for any pending statements
|
||||
await client.query('COMMIT');
|
||||
|
||||
// Start new transaction for final checks
|
||||
await client.query('BEGIN');
|
||||
|
||||
// Re-enable foreign key checks after all tables are created
|
||||
await client.query('SET session_replication_role = \'origin\'');
|
||||
|
||||
// Verify metrics tables were created
|
||||
outputProgress({
|
||||
operation: 'Verifying metrics tables',
|
||||
message: 'Checking all metrics tables were created...'
|
||||
});
|
||||
|
||||
const metricsTablesResult = await client.query(`
|
||||
SELECT tablename as name
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename = ANY($1)
|
||||
`, [METRICS_TABLES]);
|
||||
|
||||
outputProgress({
|
||||
operation: 'Tables found',
|
||||
message: `Found ${metricsTablesResult.rows.length} tables: ${metricsTablesResult.rows.map(t => t.name).join(', ')}`
|
||||
});
|
||||
|
||||
const existingMetricsTables = metricsTablesResult.rows.map(t => t.name);
|
||||
const missingMetricsTables = METRICS_TABLES.filter(t => !existingMetricsTables.includes(t));
|
||||
|
||||
if (missingMetricsTables.length > 0) {
|
||||
// Do one final check of the actual tables
|
||||
const finalCheck = await client.query(`
|
||||
SELECT tablename as name
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
`);
|
||||
outputProgress({
|
||||
operation: 'Final table check',
|
||||
message: `All database tables: ${finalCheck.rows.map(t => t.name).join(', ')}`
|
||||
});
|
||||
await client.query('ROLLBACK');
|
||||
throw new Error(`Failed to create metrics tables: ${missingMetricsTables.join(', ')}`);
|
||||
}
|
||||
|
||||
// Commit final transaction
|
||||
await client.query('COMMIT');
|
||||
|
||||
outputProgress({
|
||||
status: 'complete',
|
||||
operation: 'Reset complete',
|
||||
message: 'All metrics tables have been reset successfully'
|
||||
});
|
||||
} catch (error) {
|
||||
outputProgress({
|
||||
status: 'error',
|
||||
operation: 'Reset failed',
|
||||
message: error.message,
|
||||
stack: error.stack
|
||||
});
|
||||
|
||||
if (client) {
|
||||
try {
|
||||
await client.query('ROLLBACK');
|
||||
} catch (rollbackError) {
|
||||
console.error('Error during rollback:', rollbackError);
|
||||
}
|
||||
// Make sure to re-enable foreign key checks even if there's an error
|
||||
await client.query('SET session_replication_role = \'origin\'').catch(() => {});
|
||||
}
|
||||
throw error;
|
||||
} finally {
|
||||
if (client) {
|
||||
// One final attempt to ensure foreign key checks are enabled
|
||||
await client.query('SET session_replication_role = \'origin\'').catch(() => {});
|
||||
await client.end();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Export if required as a module
|
||||
if (typeof module !== 'undefined' && module.exports) {
|
||||
module.exports = resetMetrics;
|
||||
}
|
||||
|
||||
// Run if called from command line
|
||||
if (require.main === module) {
|
||||
resetMetrics().catch(error => {
|
||||
console.error('Error:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
}
|
||||
@@ -1,337 +0,0 @@
|
||||
/**
|
||||
* This script updates the costeach values for existing orders from the original MySQL database
|
||||
* without needing to run the full import process.
|
||||
*/
|
||||
const dotenv = require("dotenv");
|
||||
const path = require("path");
|
||||
const fs = require("fs");
|
||||
const { setupConnections, closeConnections } = require('./import/utils');
|
||||
const { outputProgress, formatElapsedTime } = require('./metrics/utils/progress');
|
||||
|
||||
dotenv.config({ path: path.join(__dirname, "../.env") });
|
||||
|
||||
// SSH configuration
|
||||
const sshConfig = {
|
||||
ssh: {
|
||||
host: process.env.PROD_SSH_HOST,
|
||||
port: process.env.PROD_SSH_PORT || 22,
|
||||
username: process.env.PROD_SSH_USER,
|
||||
privateKey: process.env.PROD_SSH_KEY_PATH
|
||||
? fs.readFileSync(process.env.PROD_SSH_KEY_PATH)
|
||||
: undefined,
|
||||
compress: true, // Enable SSH compression
|
||||
},
|
||||
prodDbConfig: {
|
||||
// MySQL config for production
|
||||
host: process.env.PROD_DB_HOST || "localhost",
|
||||
user: process.env.PROD_DB_USER,
|
||||
password: process.env.PROD_DB_PASSWORD,
|
||||
database: process.env.PROD_DB_NAME,
|
||||
port: process.env.PROD_DB_PORT || 3306,
|
||||
timezone: 'Z',
|
||||
},
|
||||
localDbConfig: {
|
||||
// PostgreSQL config for local
|
||||
host: process.env.DB_HOST,
|
||||
user: process.env.DB_USER,
|
||||
password: process.env.DB_PASSWORD,
|
||||
database: process.env.DB_NAME,
|
||||
port: process.env.DB_PORT || 5432,
|
||||
ssl: process.env.DB_SSL === 'true',
|
||||
connectionTimeoutMillis: 60000,
|
||||
idleTimeoutMillis: 30000,
|
||||
max: 10 // connection pool max size
|
||||
}
|
||||
};
|
||||
|
||||
async function updateOrderCosts() {
|
||||
const startTime = Date.now();
|
||||
let connections;
|
||||
let updatedCount = 0;
|
||||
let errorCount = 0;
|
||||
|
||||
try {
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Order costs update",
|
||||
message: "Initializing SSH tunnel..."
|
||||
});
|
||||
|
||||
connections = await setupConnections(sshConfig);
|
||||
const { prodConnection, localConnection } = connections;
|
||||
|
||||
// 1. Get all orders from local database that need cost updates
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Order costs update",
|
||||
message: "Getting orders from local database..."
|
||||
});
|
||||
|
||||
const [orders] = await localConnection.query(`
|
||||
SELECT DISTINCT order_number, pid
|
||||
FROM orders
|
||||
WHERE costeach = 0 OR costeach IS NULL
|
||||
ORDER BY order_number
|
||||
`);
|
||||
|
||||
if (!orders || !orders.rows || orders.rows.length === 0) {
|
||||
console.log("No orders found that need cost updates");
|
||||
return { updatedCount: 0, errorCount: 0 };
|
||||
}
|
||||
|
||||
const totalOrders = orders.rows.length;
|
||||
console.log(`Found ${totalOrders} orders that need cost updates`);
|
||||
|
||||
// Process in batches of 1000 orders
|
||||
const BATCH_SIZE = 500;
|
||||
for (let i = 0; i < orders.rows.length; i += BATCH_SIZE) {
|
||||
try {
|
||||
// Start transaction for this batch
|
||||
await localConnection.beginTransaction();
|
||||
|
||||
const batch = orders.rows.slice(i, i + BATCH_SIZE);
|
||||
|
||||
const orderNumbers = [...new Set(batch.map(o => o.order_number))];
|
||||
|
||||
// 2. Fetch costs from production database for these orders
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Order costs update",
|
||||
message: `Fetching costs for orders ${i + 1} to ${Math.min(i + BATCH_SIZE, totalOrders)} of ${totalOrders}`,
|
||||
current: i,
|
||||
total: totalOrders,
|
||||
elapsed: formatElapsedTime((Date.now() - startTime) / 1000)
|
||||
});
|
||||
|
||||
const [costs] = await prodConnection.query(`
|
||||
SELECT
|
||||
oc.orderid as order_number,
|
||||
oc.pid,
|
||||
oc.costeach
|
||||
FROM order_costs oc
|
||||
INNER JOIN (
|
||||
SELECT
|
||||
orderid,
|
||||
pid,
|
||||
MAX(id) as max_id
|
||||
FROM order_costs
|
||||
WHERE orderid IN (?)
|
||||
AND pending = 0
|
||||
GROUP BY orderid, pid
|
||||
) latest ON oc.orderid = latest.orderid AND oc.pid = latest.pid AND oc.id = latest.max_id
|
||||
`, [orderNumbers]);
|
||||
|
||||
// Create a map of costs for easy lookup
|
||||
const costMap = {};
|
||||
if (costs && costs.length) {
|
||||
costs.forEach(c => {
|
||||
costMap[`${c.order_number}-${c.pid}`] = c.costeach || 0;
|
||||
});
|
||||
}
|
||||
|
||||
// 3. Update costs in local database by batches
|
||||
// Using a more efficient update approach with a temporary table
|
||||
|
||||
// Create a temporary table for each batch
|
||||
await localConnection.query(`
|
||||
DROP TABLE IF EXISTS temp_order_costs;
|
||||
CREATE TEMP TABLE temp_order_costs (
|
||||
order_number VARCHAR(50) NOT NULL,
|
||||
pid BIGINT NOT NULL,
|
||||
costeach DECIMAL(10,3) NOT NULL,
|
||||
PRIMARY KEY (order_number, pid)
|
||||
);
|
||||
`);
|
||||
|
||||
// Insert cost data into the temporary table
|
||||
const costEntries = [];
|
||||
for (const order of batch) {
|
||||
const key = `${order.order_number}-${order.pid}`;
|
||||
if (key in costMap) {
|
||||
costEntries.push({
|
||||
order_number: order.order_number,
|
||||
pid: order.pid,
|
||||
costeach: costMap[key]
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Insert in sub-batches of 100
|
||||
const DB_BATCH_SIZE = 50;
|
||||
for (let j = 0; j < costEntries.length; j += DB_BATCH_SIZE) {
|
||||
const subBatch = costEntries.slice(j, j + DB_BATCH_SIZE);
|
||||
if (subBatch.length === 0) continue;
|
||||
|
||||
const placeholders = subBatch.map((_, idx) =>
|
||||
`($${idx * 3 + 1}, $${idx * 3 + 2}, $${idx * 3 + 3})`
|
||||
).join(',');
|
||||
|
||||
const values = subBatch.flatMap(item => [
|
||||
item.order_number,
|
||||
item.pid,
|
||||
item.costeach
|
||||
]);
|
||||
|
||||
await localConnection.query(`
|
||||
INSERT INTO temp_order_costs (order_number, pid, costeach)
|
||||
VALUES ${placeholders}
|
||||
`, values);
|
||||
}
|
||||
|
||||
// Perform bulk update from the temporary table
|
||||
const [updateResult] = await localConnection.query(`
|
||||
UPDATE orders o
|
||||
SET costeach = t.costeach
|
||||
FROM temp_order_costs t
|
||||
WHERE o.order_number = t.order_number AND o.pid = t.pid
|
||||
RETURNING o.id
|
||||
`);
|
||||
|
||||
const batchUpdated = updateResult.rowCount || 0;
|
||||
updatedCount += batchUpdated;
|
||||
|
||||
// Commit transaction for this batch
|
||||
await localConnection.commit();
|
||||
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Order costs update",
|
||||
message: `Updated ${updatedCount} orders with costs from production (batch: ${batchUpdated})`,
|
||||
current: i + batch.length,
|
||||
total: totalOrders,
|
||||
elapsed: formatElapsedTime((Date.now() - startTime) / 1000)
|
||||
});
|
||||
} catch (error) {
|
||||
// If a batch fails, roll back that batch's transaction and continue
|
||||
try {
|
||||
await localConnection.rollback();
|
||||
} catch (rollbackError) {
|
||||
console.error("Error during batch rollback:", rollbackError);
|
||||
}
|
||||
|
||||
console.error(`Error processing batch ${i}-${i + BATCH_SIZE}:`, error);
|
||||
errorCount++;
|
||||
}
|
||||
}
|
||||
|
||||
// 4. For orders with no matching costs, set a default based on price
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Order costs update",
|
||||
message: "Setting default costs for remaining orders..."
|
||||
});
|
||||
|
||||
// Process remaining updates in smaller batches
|
||||
const DEFAULT_BATCH_SIZE = 10000;
|
||||
let totalDefaultUpdated = 0;
|
||||
|
||||
try {
|
||||
// Start with a count query to determine how many records need the default update
|
||||
const [countResult] = await localConnection.query(`
|
||||
SELECT COUNT(*) as count FROM orders
|
||||
WHERE (costeach = 0 OR costeach IS NULL)
|
||||
`);
|
||||
|
||||
const totalToUpdate = parseInt(countResult.rows[0]?.count || 0);
|
||||
|
||||
if (totalToUpdate > 0) {
|
||||
console.log(`Applying default cost to ${totalToUpdate} orders`);
|
||||
|
||||
// Apply the default in batches with separate transactions
|
||||
for (let i = 0; i < totalToUpdate; i += DEFAULT_BATCH_SIZE) {
|
||||
try {
|
||||
await localConnection.beginTransaction();
|
||||
|
||||
const [defaultUpdates] = await localConnection.query(`
|
||||
WITH orders_to_update AS (
|
||||
SELECT id FROM orders
|
||||
WHERE (costeach = 0 OR costeach IS NULL)
|
||||
LIMIT ${DEFAULT_BATCH_SIZE}
|
||||
)
|
||||
UPDATE orders o
|
||||
SET costeach = price * 0.5
|
||||
FROM orders_to_update otu
|
||||
WHERE o.id = otu.id
|
||||
RETURNING o.id
|
||||
`);
|
||||
|
||||
const batchDefaultUpdated = defaultUpdates.rowCount || 0;
|
||||
totalDefaultUpdated += batchDefaultUpdated;
|
||||
|
||||
await localConnection.commit();
|
||||
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Order costs update",
|
||||
message: `Applied default costs to ${totalDefaultUpdated} of ${totalToUpdate} orders`,
|
||||
current: totalDefaultUpdated,
|
||||
total: totalToUpdate,
|
||||
elapsed: formatElapsedTime((Date.now() - startTime) / 1000)
|
||||
});
|
||||
} catch (error) {
|
||||
try {
|
||||
await localConnection.rollback();
|
||||
} catch (rollbackError) {
|
||||
console.error("Error during default update rollback:", rollbackError);
|
||||
}
|
||||
|
||||
console.error(`Error applying default costs batch ${i}-${i + DEFAULT_BATCH_SIZE}:`, error);
|
||||
errorCount++;
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Error counting or updating remaining orders:", error);
|
||||
errorCount++;
|
||||
}
|
||||
|
||||
updatedCount += totalDefaultUpdated;
|
||||
|
||||
const endTime = Date.now();
|
||||
const totalSeconds = (endTime - startTime) / 1000;
|
||||
|
||||
outputProgress({
|
||||
status: "complete",
|
||||
operation: "Order costs update",
|
||||
message: `Updated ${updatedCount} orders (${totalDefaultUpdated} with default values) in ${formatElapsedTime(totalSeconds)}`,
|
||||
elapsed: formatElapsedTime(totalSeconds)
|
||||
});
|
||||
|
||||
return {
|
||||
status: "complete",
|
||||
updatedCount,
|
||||
errorCount
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("Error during order costs update:", error);
|
||||
|
||||
return {
|
||||
status: "error",
|
||||
error: error.message,
|
||||
updatedCount,
|
||||
errorCount
|
||||
};
|
||||
} finally {
|
||||
if (connections) {
|
||||
await closeConnections(connections).catch(err => {
|
||||
console.error("Error closing connections:", err);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run the script only if this is the main module
|
||||
if (require.main === module) {
|
||||
updateOrderCosts().then((results) => {
|
||||
console.log('Cost update completed:', results);
|
||||
// Force exit after a small delay to ensure all logs are written
|
||||
setTimeout(() => process.exit(0), 500);
|
||||
}).catch((error) => {
|
||||
console.error("Unhandled error:", error);
|
||||
// Force exit with error code after a small delay
|
||||
setTimeout(() => process.exit(1), 500);
|
||||
});
|
||||
}
|
||||
|
||||
// Export the function for use in other scripts
|
||||
module.exports = updateOrderCosts;
|
||||
Reference in New Issue
Block a user