Add new vendors, brands, categories tables and calculate scripts
This commit is contained in:
@@ -0,0 +1,426 @@
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const progress = require('../utils/progress'); // Assuming progress utils are here
|
||||
const { getConnection, closePool } = require('../utils/db'); // Assuming db utils are here
|
||||
const os = require('os'); // For detecting number of CPU cores
|
||||
|
||||
// --- Configuration ---
|
||||
const BATCH_SIZE_DAYS = 1; // Process 1 day per database function call
|
||||
const SQL_FUNCTION_FILE = path.resolve(__dirname, 'backfill_historical_snapshots.sql'); // Correct path
|
||||
const LOG_PROGRESS_INTERVAL_MS = 5000; // Update console progress roughly every 5 seconds
|
||||
const HISTORY_TYPE = 'backfill_snapshots'; // Identifier for history table
|
||||
const MAX_WORKERS = Math.max(1, Math.floor(os.cpus().length / 2)); // Use half of available CPU cores
|
||||
const USE_PARALLEL = false; // Set to true to enable parallel processing
|
||||
const PG_STATEMENT_TIMEOUT_MS = 1800000; // 30 minutes max per query
|
||||
|
||||
// --- Cancellation Handling ---
|
||||
let isCancelled = false;
|
||||
let runningQueryPromise = null; // To potentially track the active query
|
||||
|
||||
function requestCancellation() {
|
||||
if (!isCancelled) {
|
||||
isCancelled = true;
|
||||
console.warn('\nCancellation requested. Finishing current batch then stopping...');
|
||||
// Note: We are NOT forcefully cancelling the backend query anymore.
|
||||
}
|
||||
}
|
||||
|
||||
process.on('SIGINT', requestCancellation); // Handle Ctrl+C
|
||||
process.on('SIGTERM', requestCancellation); // Handle termination signals
|
||||
|
||||
// --- Main Backfill Function ---
|
||||
async function backfillSnapshots(cmdStartDate, cmdEndDate, cmdStartBatch = 1) {
|
||||
let connection;
|
||||
const overallStartTime = Date.now();
|
||||
let calculateHistoryId = null;
|
||||
let processedDaysTotal = 0; // Track total days processed across all batches executed in this run
|
||||
let currentBatchNum = cmdStartBatch > 0 ? cmdStartBatch : 1;
|
||||
let totalBatches = 0; // Initialize totalBatches
|
||||
let totalDays = 0; // Initialize totalDays
|
||||
|
||||
console.log(`Starting snapshot backfill process...`);
|
||||
console.log(`SQL Function definition file: ${SQL_FUNCTION_FILE}`);
|
||||
if (!fs.existsSync(SQL_FUNCTION_FILE)) {
|
||||
console.error(`FATAL: SQL file not found at ${SQL_FUNCTION_FILE}`);
|
||||
process.exit(1); // Exit early if file doesn't exist
|
||||
}
|
||||
|
||||
try {
|
||||
// Set up a connection with higher memory limits
|
||||
connection = await getConnection({
|
||||
// Add performance-related settings
|
||||
application_name: 'backfill_snapshots',
|
||||
statement_timeout: PG_STATEMENT_TIMEOUT_MS, // 30 min timeout per statement
|
||||
// These parameters may need to be configured in your database:
|
||||
// work_mem: '1GB',
|
||||
// maintenance_work_mem: '2GB',
|
||||
// temp_buffers: '1GB',
|
||||
});
|
||||
|
||||
console.log('Database connection acquired.');
|
||||
|
||||
// --- Ensure Function Exists ---
|
||||
console.log('Ensuring database function is up-to-date...');
|
||||
try {
|
||||
const sqlFunctionDef = fs.readFileSync(SQL_FUNCTION_FILE, 'utf8');
|
||||
if (!sqlFunctionDef.includes('CREATE OR REPLACE FUNCTION backfill_daily_snapshots_range_final')) {
|
||||
throw new Error(`SQL file ${SQL_FUNCTION_FILE} does not seem to contain the function definition.`);
|
||||
}
|
||||
await connection.query(sqlFunctionDef); // Execute the whole file
|
||||
console.log('Database function `backfill_daily_snapshots_range_final` created/updated.');
|
||||
|
||||
// Add performance query hints to the database
|
||||
await connection.query(`
|
||||
-- Analyze tables for better query planning
|
||||
ANALYZE public.products;
|
||||
ANALYZE public.imported_daily_inventory;
|
||||
ANALYZE public.imported_product_stat_history;
|
||||
ANALYZE public.daily_product_snapshots;
|
||||
ANALYZE public.imported_product_current_prices;
|
||||
`).catch(err => {
|
||||
// Non-fatal if analyze fails
|
||||
console.warn('Failed to analyze tables (non-fatal):', err.message);
|
||||
});
|
||||
|
||||
} catch (err) {
|
||||
console.error(`Error processing SQL function file ${SQL_FUNCTION_FILE}:`, err);
|
||||
throw new Error(`Failed to create or replace DB function: ${err.message}`);
|
||||
}
|
||||
|
||||
// --- Prepare History Record ---
|
||||
console.log('Preparing calculation history record...');
|
||||
// Ensure history table exists (optional, could be done elsewhere)
|
||||
await connection.query(`
|
||||
CREATE TABLE IF NOT EXISTS public.calculate_history (
|
||||
id SERIAL PRIMARY KEY,
|
||||
start_time TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
end_time TIMESTAMPTZ,
|
||||
duration_seconds INTEGER,
|
||||
status VARCHAR(20) NOT NULL, -- e.g., 'running', 'completed', 'failed', 'cancelled'
|
||||
error_message TEXT,
|
||||
additional_info JSONB -- Store type, file, batch info etc.
|
||||
);
|
||||
`);
|
||||
// Mark previous runs of this type as potentially failed if they were left 'running'
|
||||
await connection.query(`
|
||||
UPDATE public.calculate_history
|
||||
SET status = 'failed', error_message = 'Interrupted by new run.'
|
||||
WHERE status = 'running' AND additional_info->>'type' = $1;
|
||||
`, [HISTORY_TYPE]);
|
||||
|
||||
// Create new history record
|
||||
const historyResult = await connection.query(`
|
||||
INSERT INTO public.calculate_history (start_time, status, additional_info)
|
||||
VALUES (NOW(), 'running', jsonb_build_object('type', $1::text, 'sql_file', $2::text, 'start_batch', $3::integer))
|
||||
RETURNING id;
|
||||
`, [HISTORY_TYPE, path.basename(SQL_FUNCTION_FILE), cmdStartBatch]);
|
||||
calculateHistoryId = historyResult.rows[0].id;
|
||||
console.log(`Calculation history record created with ID: ${calculateHistoryId}`);
|
||||
|
||||
|
||||
// --- Determine Date Range ---
|
||||
console.log('Determining date range...');
|
||||
let effectiveStartDate, effectiveEndDate;
|
||||
|
||||
// Use command-line dates if provided, otherwise query DB
|
||||
if (cmdStartDate) {
|
||||
effectiveStartDate = cmdStartDate;
|
||||
} else {
|
||||
const minDateResult = await connection.query(`
|
||||
SELECT LEAST(
|
||||
COALESCE((SELECT MIN(date) FROM public.imported_daily_inventory WHERE date > '1970-01-01'), CURRENT_DATE),
|
||||
COALESCE((SELECT MIN(date) FROM public.imported_product_stat_history WHERE date > '1970-01-01'), CURRENT_DATE)
|
||||
)::date as min_date;
|
||||
`);
|
||||
effectiveStartDate = minDateResult.rows[0]?.min_date || new Date().toISOString().split('T')[0]; // Fallback
|
||||
console.log(`Auto-detected start date: ${effectiveStartDate}`);
|
||||
}
|
||||
|
||||
if (cmdEndDate) {
|
||||
effectiveEndDate = cmdEndDate;
|
||||
} else {
|
||||
const maxDateResult = await connection.query(`
|
||||
SELECT GREATEST(
|
||||
COALESCE((SELECT MAX(date) FROM public.imported_daily_inventory WHERE date < CURRENT_DATE), '1970-01-01'::date),
|
||||
COALESCE((SELECT MAX(date) FROM public.imported_product_stat_history WHERE date < CURRENT_DATE), '1970-01-01'::date)
|
||||
)::date as max_date;
|
||||
`);
|
||||
// Ensure end date is not today or in the future
|
||||
effectiveEndDate = maxDateResult.rows[0]?.max_date || new Date(Date.now() - 86400000).toISOString().split('T')[0]; // Default yesterday
|
||||
if (new Date(effectiveEndDate) >= new Date(new Date().toISOString().split('T')[0])) {
|
||||
effectiveEndDate = new Date(Date.now() - 86400000).toISOString().split('T')[0]; // Set to yesterday if >= today
|
||||
}
|
||||
console.log(`Auto-detected end date: ${effectiveEndDate}`);
|
||||
}
|
||||
|
||||
// Validate dates
|
||||
const dStart = new Date(effectiveStartDate);
|
||||
const dEnd = new Date(effectiveEndDate);
|
||||
if (isNaN(dStart.getTime()) || isNaN(dEnd.getTime()) || dStart > dEnd) {
|
||||
throw new Error(`Invalid date range: Start "${effectiveStartDate}", End "${effectiveEndDate}"`);
|
||||
}
|
||||
|
||||
// --- Batch Processing ---
|
||||
totalDays = Math.ceil((dEnd - dStart) / (1000 * 60 * 60 * 24)) + 1; // Inclusive
|
||||
totalBatches = Math.ceil(totalDays / BATCH_SIZE_DAYS);
|
||||
|
||||
console.log(`Target Date Range: ${effectiveStartDate} to ${effectiveEndDate} (${totalDays} days)`);
|
||||
console.log(`Total Batches: ${totalBatches} (Batch Size: ${BATCH_SIZE_DAYS} days)`);
|
||||
console.log(`Starting from Batch: ${currentBatchNum}`);
|
||||
|
||||
// Initial progress update
|
||||
progress.outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting Batch Processing',
|
||||
currentBatch: currentBatchNum,
|
||||
totalBatches: totalBatches,
|
||||
totalDays: totalDays,
|
||||
elapsed: '0s',
|
||||
remaining: 'Calculating...',
|
||||
rate: 0,
|
||||
historyId: calculateHistoryId // Include history ID in the object
|
||||
});
|
||||
|
||||
while (currentBatchNum <= totalBatches && !isCancelled) {
|
||||
const batchOffset = (currentBatchNum - 1) * BATCH_SIZE_DAYS;
|
||||
const batchStartDate = new Date(dStart);
|
||||
batchStartDate.setDate(dStart.getDate() + batchOffset);
|
||||
|
||||
const batchEndDate = new Date(batchStartDate);
|
||||
batchEndDate.setDate(batchStartDate.getDate() + BATCH_SIZE_DAYS - 1);
|
||||
|
||||
// Clamp batch end date to the overall effective end date
|
||||
if (batchEndDate > dEnd) {
|
||||
batchEndDate.setTime(dEnd.getTime());
|
||||
}
|
||||
|
||||
const batchStartDateStr = batchStartDate.toISOString().split('T')[0];
|
||||
const batchEndDateStr = batchEndDate.toISOString().split('T')[0];
|
||||
const batchStartTime = Date.now();
|
||||
|
||||
console.log(`\n--- Processing Batch ${currentBatchNum} / ${totalBatches} ---`);
|
||||
console.log(` Dates: ${batchStartDateStr} to ${batchEndDateStr}`);
|
||||
|
||||
// Execute the function for the batch
|
||||
try {
|
||||
progress.outputProgress({
|
||||
status: 'running',
|
||||
operation: `Executing DB function for batch ${currentBatchNum}...`,
|
||||
currentBatch: currentBatchNum,
|
||||
totalBatches: totalBatches,
|
||||
totalDays: totalDays,
|
||||
elapsed: progress.formatElapsedTime(overallStartTime),
|
||||
remaining: 'Executing...',
|
||||
rate: 0,
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
|
||||
// Performance improvement: Add batch processing hint
|
||||
await connection.query('SET LOCAL enable_parallel_append = on; SET LOCAL enable_parallel_hash = on; SET LOCAL max_parallel_workers_per_gather = 4;');
|
||||
|
||||
// Store promise in case we need to try and cancel (though not implemented forcefully)
|
||||
runningQueryPromise = connection.query(
|
||||
`SELECT backfill_daily_snapshots_range_final($1::date, $2::date);`,
|
||||
[batchStartDateStr, batchEndDateStr]
|
||||
);
|
||||
await runningQueryPromise; // Wait for the function call to complete
|
||||
runningQueryPromise = null; // Clear the promise
|
||||
|
||||
const batchDurationMs = Date.now() - batchStartTime;
|
||||
const daysInThisBatch = Math.ceil((batchEndDate - batchStartDate) / (1000 * 60 * 60 * 24)) + 1;
|
||||
processedDaysTotal += daysInThisBatch;
|
||||
|
||||
console.log(` Batch ${currentBatchNum} completed in ${progress.formatElapsedTime(batchStartTime)}.`);
|
||||
|
||||
// --- Update Progress & History ---
|
||||
const overallElapsedSec = Math.round((Date.now() - overallStartTime) / 1000);
|
||||
progress.outputProgress({
|
||||
status: 'running',
|
||||
operation: `Completed batch ${currentBatchNum}`,
|
||||
currentBatch: currentBatchNum,
|
||||
totalBatches: totalBatches,
|
||||
totalDays: totalDays,
|
||||
processedDays: processedDaysTotal,
|
||||
elapsed: progress.formatElapsedTime(overallStartTime),
|
||||
remaining: progress.estimateRemaining(overallStartTime, processedDaysTotal, totalDays),
|
||||
rate: progress.calculateRate(overallStartTime, processedDaysTotal),
|
||||
batchDuration: progress.formatElapsedTime(batchStartTime),
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
|
||||
// Save checkpoint in history
|
||||
await connection.query(`
|
||||
UPDATE public.calculate_history
|
||||
SET additional_info = jsonb_set(additional_info, '{last_completed_batch}', $1::jsonb)
|
||||
|| jsonb_build_object('last_processed_date', $2::text)
|
||||
WHERE id = $3::integer;
|
||||
`, [JSON.stringify(currentBatchNum), batchEndDateStr, calculateHistoryId]);
|
||||
|
||||
|
||||
} catch (batchError) {
|
||||
console.error(`\n--- ERROR in Batch ${currentBatchNum} (${batchStartDateStr} to ${batchEndDateStr}) ---`);
|
||||
console.error(' Database Error:', batchError.message);
|
||||
console.error(' DB Error Code:', batchError.code);
|
||||
// Log detailed error to history and re-throw to stop the process
|
||||
await connection.query(`
|
||||
UPDATE public.calculate_history
|
||||
SET status = 'failed',
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1::integer,
|
||||
error_message = $2::text,
|
||||
additional_info = additional_info || jsonb_build_object('failed_batch', $3::integer, 'failed_date_range', $4::text)
|
||||
WHERE id = $5::integer;
|
||||
`, [
|
||||
Math.round((Date.now() - overallStartTime) / 1000),
|
||||
`Batch ${currentBatchNum} failed: ${batchError.message} (Code: ${batchError.code || 'N/A'})`,
|
||||
currentBatchNum,
|
||||
`${batchStartDateStr} to ${batchEndDateStr}`,
|
||||
calculateHistoryId
|
||||
]);
|
||||
throw batchError; // Stop execution
|
||||
}
|
||||
|
||||
currentBatchNum++;
|
||||
// Optional delay between batches
|
||||
// await new Promise(resolve => setTimeout(resolve, 500));
|
||||
|
||||
} // End while loop
|
||||
|
||||
// --- Final Outcome ---
|
||||
const finalStatus = isCancelled ? 'cancelled' : 'completed';
|
||||
const finalMessage = isCancelled ? `Calculation stopped after completing batch ${currentBatchNum - 1}.` : 'Historical snapshots backfill completed successfully.';
|
||||
const finalDurationSec = Math.round((Date.now() - overallStartTime) / 1000);
|
||||
|
||||
console.log(`\n--- Backfill ${finalStatus.toUpperCase()} ---`);
|
||||
console.log(finalMessage);
|
||||
console.log(`Total duration: ${progress.formatElapsedTime(overallStartTime)}`);
|
||||
|
||||
// Update history record
|
||||
await connection.query(`
|
||||
UPDATE public.calculate_history SET status = $1::calculation_status, end_time = NOW(), duration_seconds = $2::integer, error_message = $3
|
||||
WHERE id = $4::integer;
|
||||
`, [finalStatus, finalDurationSec, (isCancelled ? 'User cancelled' : null), calculateHistoryId]);
|
||||
|
||||
if (!isCancelled) {
|
||||
progress.clearProgress(); // Clear progress state only on successful completion
|
||||
} else {
|
||||
progress.outputProgress({ // Final cancelled status update
|
||||
status: 'cancelled',
|
||||
operation: finalMessage,
|
||||
currentBatch: currentBatchNum - 1,
|
||||
totalBatches: totalBatches,
|
||||
totalDays: totalDays,
|
||||
processedDays: processedDaysTotal,
|
||||
elapsed: progress.formatElapsedTime(overallStartTime),
|
||||
remaining: 'Cancelled',
|
||||
rate: 0,
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
}
|
||||
|
||||
return { success: true, status: finalStatus, message: finalMessage, duration: finalDurationSec };
|
||||
|
||||
} catch (error) {
|
||||
console.error('\n--- Backfill encountered an unrecoverable error ---');
|
||||
console.error(error.message);
|
||||
const finalDurationSec = Math.round((Date.now() - overallStartTime) / 1000);
|
||||
|
||||
// Update history if possible
|
||||
if (connection && calculateHistoryId) {
|
||||
try {
|
||||
await connection.query(`
|
||||
UPDATE public.calculate_history
|
||||
SET status = $1::calculation_status, end_time = NOW(), duration_seconds = $2::integer, error_message = $3::text
|
||||
WHERE id = $4::integer;
|
||||
`, [
|
||||
isCancelled ? 'cancelled' : 'failed',
|
||||
finalDurationSec,
|
||||
error.message,
|
||||
calculateHistoryId
|
||||
]);
|
||||
} catch (histError) {
|
||||
console.error("Failed to update history record with error state:", histError);
|
||||
}
|
||||
} else {
|
||||
console.error("Could not update history record (no ID or connection).");
|
||||
}
|
||||
|
||||
// FIX: Use initialized value or a default if loop never started
|
||||
const batchNumForError = currentBatchNum > cmdStartBatch ? currentBatchNum - 1 : cmdStartBatch - 1;
|
||||
|
||||
// Update progress.outputProgress call to match actual function signature
|
||||
try {
|
||||
// Create progress data object
|
||||
const progressData = {
|
||||
status: 'failed',
|
||||
operation: 'Backfill failed',
|
||||
message: error.message,
|
||||
currentBatch: batchNumForError,
|
||||
totalBatches: totalBatches,
|
||||
totalDays: totalDays,
|
||||
processedDays: processedDaysTotal,
|
||||
elapsed: progress.formatElapsedTime(overallStartTime),
|
||||
remaining: 'Failed',
|
||||
rate: 0,
|
||||
// Include history ID in progress data if needed
|
||||
historyId: calculateHistoryId
|
||||
};
|
||||
|
||||
// Call with single object parameter (not separate historyId)
|
||||
progress.outputProgress(progressData);
|
||||
} catch (progressError) {
|
||||
console.error('Failed to report progress:', progressError);
|
||||
}
|
||||
|
||||
return { success: false, status: 'failed', error: error.message, duration: finalDurationSec };
|
||||
|
||||
} finally {
|
||||
if (connection) {
|
||||
console.log('Releasing database connection.');
|
||||
connection.release();
|
||||
}
|
||||
// Close pool only if this script is meant to be standalone
|
||||
// If part of a larger app, the app should manage pool closure
|
||||
// console.log('Closing database pool.');
|
||||
// await closePool();
|
||||
}
|
||||
}
|
||||
|
||||
// --- Script Execution ---
|
||||
|
||||
// Parse command-line arguments
|
||||
const args = process.argv.slice(2);
|
||||
let cmdStartDateArg, cmdEndDateArg, cmdStartBatchArg = 1; // Default start batch is 1
|
||||
|
||||
for (let i = 0; i < args.length; i++) {
|
||||
if (args[i] === '--start-date' && args[i+1]) cmdStartDateArg = args[++i];
|
||||
else if (args[i] === '--end-date' && args[i+1]) cmdEndDateArg = args[++i];
|
||||
else if (args[i] === '--start-batch' && args[i+1]) cmdStartBatchArg = parseInt(args[++i], 10);
|
||||
}
|
||||
|
||||
if (isNaN(cmdStartBatchArg) || cmdStartBatchArg < 1) {
|
||||
console.warn(`Invalid --start-batch value. Defaulting to 1.`);
|
||||
cmdStartBatchArg = 1;
|
||||
}
|
||||
|
||||
// Run the backfill process
|
||||
backfillSnapshots(cmdStartDateArg, cmdEndDateArg, cmdStartBatchArg)
|
||||
.then(result => {
|
||||
if (result.success) {
|
||||
console.log(`\n✅ ${result.message} (Duration: ${result.duration}s)`);
|
||||
process.exitCode = 0; // Success
|
||||
} else {
|
||||
console.error(`\n❌ Backfill failed: ${result.error || 'Unknown error'} (Duration: ${result.duration}s)`);
|
||||
process.exitCode = 1; // Failure
|
||||
}
|
||||
})
|
||||
.catch(err => {
|
||||
console.error('\n❌ Unexpected error during backfill execution:', err);
|
||||
process.exitCode = 1; // Failure
|
||||
})
|
||||
.finally(async () => {
|
||||
// Ensure pool is closed if run standalone
|
||||
console.log('Backfill script finished. Closing pool.');
|
||||
await closePool(); // Make sure closePool exists and works in your db utils
|
||||
process.exit(process.exitCode); // Exit with appropriate code
|
||||
});
|
||||
@@ -0,0 +1,161 @@
|
||||
-- Description: Backfills the daily_product_snapshots table using imported historical unit data
|
||||
-- (daily inventory/stats) and historical price data (current prices table).
|
||||
-- - Uses imported daily sales/receipt UNIT counts for accuracy.
|
||||
-- - ESTIMATES historical stock levels using a forward calculation.
|
||||
-- - APPROXIMATES historical REVENUE using looked-up historical base prices.
|
||||
-- - APPROXIMATES historical COGS, PROFIT, and STOCK VALUE using CURRENT product costs/prices.
|
||||
-- Run ONCE after importing historical data and before initial product_metrics population.
|
||||
-- Dependencies: Core import tables (products), imported history tables (imported_daily_inventory,
|
||||
-- imported_product_stat_history, imported_product_current_prices),
|
||||
-- daily_product_snapshots table must exist.
|
||||
-- Frequency: Run ONCE.
|
||||
|
||||
CREATE OR REPLACE FUNCTION backfill_daily_snapshots_range_final(
|
||||
_start_date DATE,
|
||||
_end_date DATE
|
||||
)
|
||||
RETURNS VOID AS $$
|
||||
DECLARE
|
||||
_current_processing_date DATE := _start_date;
|
||||
_batch_start_time TIMESTAMPTZ;
|
||||
_row_count INTEGER;
|
||||
BEGIN
|
||||
RAISE NOTICE 'Starting FINAL historical snapshot backfill from % to %.', _start_date, _end_date;
|
||||
RAISE NOTICE 'Using historical units and historical prices (for revenue approximation).';
|
||||
RAISE NOTICE 'WARNING: Historical COGS, Profit, and Stock Value use CURRENT product costs/prices.';
|
||||
|
||||
-- Ensure end date is not in the future
|
||||
IF _end_date >= CURRENT_DATE THEN
|
||||
_end_date := CURRENT_DATE - INTERVAL '1 day';
|
||||
RAISE NOTICE 'Adjusted end date to % to avoid conflict with hourly script.', _end_date;
|
||||
END IF;
|
||||
|
||||
-- Performance: Create temporary table with product info to avoid repeated lookups
|
||||
CREATE TEMP TABLE IF NOT EXISTS temp_product_info AS
|
||||
SELECT
|
||||
pid,
|
||||
sku,
|
||||
COALESCE(landing_cost_price, cost_price, 0.00) as effective_cost_price,
|
||||
COALESCE(price, 0.00) as current_price,
|
||||
COALESCE(regular_price, 0.00) as current_regular_price
|
||||
FROM public.products;
|
||||
|
||||
-- Performance: Create index on temporary table
|
||||
CREATE INDEX IF NOT EXISTS temp_product_info_pid_idx ON temp_product_info(pid);
|
||||
|
||||
ANALYZE temp_product_info;
|
||||
|
||||
RAISE NOTICE 'Created temporary product info table with % products', (SELECT COUNT(*) FROM temp_product_info);
|
||||
|
||||
WHILE _current_processing_date <= _end_date LOOP
|
||||
_batch_start_time := clock_timestamp();
|
||||
RAISE NOTICE 'Processing date: %', _current_processing_date;
|
||||
|
||||
-- Get Daily Transaction Unit Info from imported history
|
||||
WITH DailyHistoryUnits AS (
|
||||
SELECT
|
||||
pids.pid,
|
||||
-- Prioritize daily_inventory, fallback to product_stat_history for sold qty
|
||||
COALESCE(di.amountsold, ps.qty_sold, 0)::integer as units_sold_today,
|
||||
COALESCE(di.qtyreceived, 0)::integer as units_received_today
|
||||
FROM
|
||||
(SELECT DISTINCT pid FROM temp_product_info) pids -- Ensure all products are considered
|
||||
LEFT JOIN public.imported_daily_inventory di
|
||||
ON pids.pid = di.pid AND di.date = _current_processing_date
|
||||
LEFT JOIN public.imported_product_stat_history ps
|
||||
ON pids.pid = ps.pid AND ps.date = _current_processing_date
|
||||
-- Removed WHERE clause to ensure snapshots are created even for days with 0 activity,
|
||||
-- allowing stock carry-over. The main query will handle products properly.
|
||||
),
|
||||
HistoricalPrice AS (
|
||||
-- Find the base price (qty_buy=1) active on the processing date
|
||||
SELECT DISTINCT ON (pid)
|
||||
pid,
|
||||
price_each
|
||||
FROM public.imported_product_current_prices
|
||||
WHERE
|
||||
qty_buy = 1
|
||||
-- Use TIMESTAMPTZ comparison logic:
|
||||
AND date_active <= (_current_processing_date + interval '1 day' - interval '1 second') -- Active sometime on or before end of processing day
|
||||
AND (date_deactive IS NULL OR date_deactive > _current_processing_date) -- Not deactivated before start of processing day
|
||||
-- Assuming 'active' flag isn't needed if dates are correct; add 'AND active != 0' if necessary
|
||||
ORDER BY
|
||||
pid, date_active DESC -- Get the most recently activated price
|
||||
),
|
||||
PreviousStock AS (
|
||||
-- Get the estimated stock from the PREVIOUS day snapshot
|
||||
SELECT pid, eod_stock_quantity
|
||||
FROM public.daily_product_snapshots
|
||||
WHERE snapshot_date = _current_processing_date - INTERVAL '1 day'
|
||||
)
|
||||
-- Insert into the daily snapshots table
|
||||
INSERT INTO public.daily_product_snapshots (
|
||||
snapshot_date, pid, sku,
|
||||
eod_stock_quantity, eod_stock_cost, eod_stock_retail, eod_stock_gross, stockout_flag,
|
||||
units_sold, units_returned,
|
||||
gross_revenue, discounts, returns_revenue,
|
||||
net_revenue, cogs, gross_regular_revenue, profit,
|
||||
units_received, cost_received,
|
||||
calculation_timestamp
|
||||
)
|
||||
SELECT
|
||||
_current_processing_date AS snapshot_date,
|
||||
p.pid,
|
||||
p.sku,
|
||||
-- Estimated EOD Stock (using historical daily units)
|
||||
-- Handle potential NULL from joins with COALESCE 0
|
||||
COALESCE(ps.eod_stock_quantity, 0) + COALESCE(dh.units_received_today, 0) - COALESCE(dh.units_sold_today, 0) AS estimated_eod_stock,
|
||||
-- Valued Stock (using estimated stock and CURRENT prices/costs - APPROXIMATION)
|
||||
GREATEST(0, COALESCE(ps.eod_stock_quantity, 0) + COALESCE(dh.units_received_today, 0) - COALESCE(dh.units_sold_today, 0)) * p.effective_cost_price AS eod_stock_cost,
|
||||
GREATEST(0, COALESCE(ps.eod_stock_quantity, 0) + COALESCE(dh.units_received_today, 0) - COALESCE(dh.units_sold_today, 0)) * p.current_price AS eod_stock_retail, -- Stock retail uses current price
|
||||
GREATEST(0, COALESCE(ps.eod_stock_quantity, 0) + COALESCE(dh.units_received_today, 0) - COALESCE(dh.units_sold_today, 0)) * p.current_regular_price AS eod_stock_gross, -- Stock gross uses current regular price
|
||||
-- Stockout Flag (based on estimated stock)
|
||||
(COALESCE(ps.eod_stock_quantity, 0) + COALESCE(dh.units_received_today, 0) - COALESCE(dh.units_sold_today, 0)) <= 0 AS stockout_flag,
|
||||
|
||||
-- Today's Unit Aggregates from History
|
||||
COALESCE(dh.units_sold_today, 0) as units_sold,
|
||||
0 AS units_returned, -- Placeholder: Cannot determine returns from daily summary
|
||||
|
||||
-- Monetary Values using looked-up Historical Price and CURRENT Cost/RegPrice
|
||||
COALESCE(dh.units_sold_today, 0) * COALESCE(hp.price_each, p.current_price) AS gross_revenue, -- Approx Revenue
|
||||
0 AS discounts, -- Placeholder
|
||||
0 AS returns_revenue, -- Placeholder
|
||||
COALESCE(dh.units_sold_today, 0) * COALESCE(hp.price_each, p.current_price) AS net_revenue, -- Approx Net Revenue
|
||||
COALESCE(dh.units_sold_today, 0) * p.effective_cost_price AS cogs, -- Approx COGS (uses CURRENT cost)
|
||||
COALESCE(dh.units_sold_today, 0) * p.current_regular_price AS gross_regular_revenue, -- Approx Gross Regular Revenue
|
||||
-- Approx Profit
|
||||
(COALESCE(dh.units_sold_today, 0) * COALESCE(hp.price_each, p.current_price)) - (COALESCE(dh.units_sold_today, 0) * p.effective_cost_price) AS profit,
|
||||
|
||||
COALESCE(dh.units_received_today, 0) as units_received,
|
||||
-- Estimate received cost using CURRENT product cost
|
||||
COALESCE(dh.units_received_today, 0) * p.effective_cost_price AS cost_received, -- Approx
|
||||
|
||||
clock_timestamp() -- Timestamp of this specific calculation
|
||||
FROM temp_product_info p -- Use the temp table for better performance
|
||||
LEFT JOIN PreviousStock ps ON p.pid = ps.pid
|
||||
LEFT JOIN DailyHistoryUnits dh ON p.pid = dh.pid -- Join today's historical activity
|
||||
LEFT JOIN HistoricalPrice hp ON p.pid = hp.pid -- Join the looked-up historical price
|
||||
-- Optimization: Only process products with activity or previous stock
|
||||
WHERE (dh.units_sold_today > 0 OR dh.units_received_today > 0 OR COALESCE(ps.eod_stock_quantity, 0) > 0)
|
||||
|
||||
ON CONFLICT (snapshot_date, pid) DO NOTHING; -- Avoid errors if rerunning parts, but prefer clean runs
|
||||
|
||||
GET DIAGNOSTICS _row_count = ROW_COUNT;
|
||||
RAISE NOTICE 'Processed %: Inserted/Skipped % rows. Duration: %',
|
||||
_current_processing_date,
|
||||
_row_count,
|
||||
clock_timestamp() - _batch_start_time;
|
||||
|
||||
_current_processing_date := _current_processing_date + INTERVAL '1 day';
|
||||
|
||||
END LOOP;
|
||||
|
||||
-- Clean up temporary tables
|
||||
DROP TABLE IF EXISTS temp_product_info;
|
||||
|
||||
RAISE NOTICE 'Finished FINAL historical snapshot backfill.';
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Example usage:
|
||||
-- SELECT backfill_daily_snapshots_range_final('2023-01-01'::date, '2023-12-31'::date);
|
||||
@@ -0,0 +1,396 @@
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const os = require('os'); // For detecting CPU cores
|
||||
|
||||
// Get the base directory (the directory containing the inventory-server folder)
|
||||
const baseDir = path.resolve(__dirname, '../../..');
|
||||
|
||||
// Load environment variables from the inventory-server directory
|
||||
require('dotenv').config({ path: path.resolve(__dirname, '../..', '.env') });
|
||||
|
||||
// Configure statement timeout (30 minutes)
|
||||
const PG_STATEMENT_TIMEOUT_MS = 1800000;
|
||||
|
||||
// Add error handler for uncaught exceptions
|
||||
process.on('uncaughtException', (error) => {
|
||||
console.error('Uncaught Exception:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// Add error handler for unhandled promise rejections
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// Load progress module
|
||||
const progress = require('../utils/progress');
|
||||
|
||||
// Store progress functions in global scope to ensure availability
|
||||
global.formatElapsedTime = progress.formatElapsedTime;
|
||||
global.estimateRemaining = progress.estimateRemaining;
|
||||
global.calculateRate = progress.calculateRate;
|
||||
global.outputProgress = progress.outputProgress;
|
||||
global.clearProgress = progress.clearProgress;
|
||||
global.getProgress = progress.getProgress;
|
||||
global.logError = progress.logError;
|
||||
|
||||
// Load database module
|
||||
const { getConnection, closePool } = require('../utils/db');
|
||||
|
||||
// Add cancel handler
|
||||
let isCancelled = false;
|
||||
let runningQueryPromise = null;
|
||||
|
||||
function cancelCalculation() {
|
||||
if (!isCancelled) {
|
||||
isCancelled = true;
|
||||
console.log('Calculation has been cancelled by user');
|
||||
|
||||
// Store the query promise to potentially cancel it
|
||||
const queryToCancel = runningQueryPromise;
|
||||
if (queryToCancel) {
|
||||
console.log('Attempting to cancel the running query...');
|
||||
}
|
||||
|
||||
// Force-terminate any query that's been running for more than 5 seconds
|
||||
try {
|
||||
const connection = getConnection();
|
||||
connection.then(async (conn) => {
|
||||
try {
|
||||
// Identify and terminate long-running queries from our application
|
||||
await conn.query(`
|
||||
SELECT pg_cancel_backend(pid)
|
||||
FROM pg_stat_activity
|
||||
WHERE query_start < now() - interval '5 seconds'
|
||||
AND application_name = 'populate_metrics'
|
||||
AND query NOT LIKE '%pg_cancel_backend%'
|
||||
`);
|
||||
|
||||
// Release connection
|
||||
conn.release();
|
||||
} catch (err) {
|
||||
console.error('Error during force cancellation:', err);
|
||||
conn.release();
|
||||
}
|
||||
}).catch(err => {
|
||||
console.error('Could not get connection for cancellation:', err);
|
||||
});
|
||||
} catch (err) {
|
||||
console.error('Failed to terminate running queries:', err);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: 'Calculation has been cancelled'
|
||||
};
|
||||
}
|
||||
|
||||
// Handle SIGTERM signal for cancellation
|
||||
process.on('SIGTERM', cancelCalculation);
|
||||
process.on('SIGINT', cancelCalculation);
|
||||
|
||||
async function populateInitialMetrics() {
|
||||
let connection;
|
||||
const startTime = Date.now();
|
||||
let calculateHistoryId;
|
||||
|
||||
try {
|
||||
// Clean up any previously running calculations
|
||||
connection = await getConnection({
|
||||
// Add performance-related settings
|
||||
application_name: 'populate_metrics',
|
||||
statement_timeout: PG_STATEMENT_TIMEOUT_MS, // 30 min timeout per statement
|
||||
});
|
||||
|
||||
// Ensure the calculate_status table exists and has the correct structure
|
||||
await connection.query(`
|
||||
CREATE TABLE IF NOT EXISTS calculate_status (
|
||||
module_name TEXT PRIMARY KEY,
|
||||
last_calculation_timestamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
`);
|
||||
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
status = 'cancelled',
|
||||
end_time = NOW(),
|
||||
duration_seconds = EXTRACT(EPOCH FROM (NOW() - start_time))::INTEGER,
|
||||
error_message = 'Previous calculation was not completed properly'
|
||||
WHERE status = 'running' AND additional_info->>'type' = 'populate_initial_metrics'
|
||||
`);
|
||||
|
||||
// Create history record for this calculation
|
||||
const historyResult = await connection.query(`
|
||||
INSERT INTO calculate_history (
|
||||
start_time,
|
||||
status,
|
||||
additional_info
|
||||
) VALUES (
|
||||
NOW(),
|
||||
'running',
|
||||
jsonb_build_object(
|
||||
'type', 'populate_initial_metrics',
|
||||
'sql_file', 'populate_initial_product_metrics.sql'
|
||||
)
|
||||
) RETURNING id
|
||||
`);
|
||||
calculateHistoryId = historyResult.rows[0].id;
|
||||
|
||||
// Initialize progress
|
||||
global.outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting initial product metrics population',
|
||||
current: 0,
|
||||
total: 100,
|
||||
elapsed: '0s',
|
||||
remaining: 'Calculating... (this may take a while)',
|
||||
rate: 0,
|
||||
percentage: '0',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
},
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
|
||||
// Prepare the database - analyze tables
|
||||
global.outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Analyzing database tables for better query performance',
|
||||
current: 2,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: 'Analyzing...',
|
||||
rate: 0,
|
||||
percentage: '2',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
},
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
|
||||
// Enable better query planning and parallel operations
|
||||
await connection.query(`
|
||||
-- Analyze tables for better query planning
|
||||
ANALYZE public.products;
|
||||
ANALYZE public.purchase_orders;
|
||||
ANALYZE public.daily_product_snapshots;
|
||||
ANALYZE public.orders;
|
||||
|
||||
-- Enable parallel operations
|
||||
SET LOCAL enable_parallel_append = on;
|
||||
SET LOCAL enable_parallel_hash = on;
|
||||
SET LOCAL max_parallel_workers_per_gather = 4;
|
||||
|
||||
-- Larger work memory for complex sorts/joins
|
||||
SET LOCAL work_mem = '128MB';
|
||||
`).catch(err => {
|
||||
// Non-fatal if analyze fails
|
||||
console.warn('Failed to analyze tables (non-fatal):', err.message);
|
||||
});
|
||||
|
||||
// Execute the SQL query
|
||||
global.outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Executing initial metrics SQL query',
|
||||
current: 5,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: 'Calculating... (this could take several hours with 150M+ records)',
|
||||
rate: 0,
|
||||
percentage: '5',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
},
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
|
||||
// Read the SQL file
|
||||
const sqlFilePath = path.resolve(__dirname, 'populate_initial_product_metrics.sql');
|
||||
console.log('Base directory:', baseDir);
|
||||
console.log('Script directory:', __dirname);
|
||||
console.log('SQL file path:', sqlFilePath);
|
||||
console.log('Current working directory:', process.cwd());
|
||||
|
||||
if (!fs.existsSync(sqlFilePath)) {
|
||||
throw new Error(`SQL file not found at ${sqlFilePath}`);
|
||||
}
|
||||
|
||||
// Read and clean up the SQL (Slightly more robust cleaning)
|
||||
const sqlQuery = fs.readFileSync(sqlFilePath, 'utf8')
|
||||
.replace(/\r\n/g, '\n') // Handle Windows endings
|
||||
.replace(/\r/g, '\n') // Handle old Mac endings
|
||||
.trim(); // Remove leading/trailing whitespace VERY IMPORTANT
|
||||
|
||||
// Log details again AFTER cleaning
|
||||
console.log('SQL Query length (cleaned):', sqlQuery.length);
|
||||
console.log('SQL Query structure validation:');
|
||||
console.log('- Contains DO block:', sqlQuery.includes('DO $$') || sqlQuery.includes('DO $')); // Check both types of tag start
|
||||
console.log('- Contains BEGIN:', sqlQuery.includes('BEGIN'));
|
||||
console.log('- Contains END:', sqlQuery.includes('END $$;') || sqlQuery.includes('END $')); // Check both types of tag end
|
||||
console.log('- First 50 chars:', JSON.stringify(sqlQuery.slice(0, 50)));
|
||||
console.log('- Last 100 chars (cleaned):', JSON.stringify(sqlQuery.slice(-100)));
|
||||
|
||||
// Final check to ensure clean SQL ending
|
||||
if (!sqlQuery.endsWith('END $$;')) {
|
||||
console.warn('WARNING: SQL does not end with "END $$;". This might cause issues.');
|
||||
console.log('Exact ending:', JSON.stringify(sqlQuery.slice(-20)));
|
||||
}
|
||||
|
||||
// Execute the script
|
||||
console.log('Starting initial product metrics population...');
|
||||
|
||||
// Track the query promise for potential cancellation
|
||||
runningQueryPromise = connection.query({
|
||||
text: sqlQuery,
|
||||
rowMode: 'array'
|
||||
});
|
||||
await runningQueryPromise;
|
||||
runningQueryPromise = null;
|
||||
|
||||
// Update progress to 100%
|
||||
global.outputProgress({
|
||||
status: 'complete',
|
||||
operation: 'Initial product metrics population complete',
|
||||
current: 100,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: '0s',
|
||||
rate: 0,
|
||||
percentage: '100',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
},
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
|
||||
// Update history with completion
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1,
|
||||
status = 'completed'
|
||||
WHERE id = $2
|
||||
`, [Math.round((Date.now() - startTime) / 1000), calculateHistoryId]);
|
||||
|
||||
// Clear progress file on successful completion
|
||||
global.clearProgress();
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: 'Initial product metrics population completed successfully',
|
||||
duration: Math.round((Date.now() - startTime) / 1000)
|
||||
};
|
||||
} catch (error) {
|
||||
const endTime = Date.now();
|
||||
const totalElapsedSeconds = Math.round((endTime - startTime) / 1000);
|
||||
|
||||
// Enhanced error logging
|
||||
console.error('Error details:', {
|
||||
message: error.message,
|
||||
code: error.code,
|
||||
hint: error.hint,
|
||||
position: error.position,
|
||||
detail: error.detail,
|
||||
where: error.where ? error.where.substring(0, 500) + '...' : undefined, // Truncate to avoid huge logs
|
||||
severity: error.severity,
|
||||
file: error.file,
|
||||
line: error.line,
|
||||
routine: error.routine
|
||||
});
|
||||
|
||||
// Update history with error
|
||||
if (connection && calculateHistoryId) {
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1,
|
||||
status = $2,
|
||||
error_message = $3
|
||||
WHERE id = $4
|
||||
`, [
|
||||
totalElapsedSeconds,
|
||||
isCancelled ? 'cancelled' : 'failed',
|
||||
error.message,
|
||||
calculateHistoryId
|
||||
]);
|
||||
}
|
||||
|
||||
if (isCancelled) {
|
||||
global.outputProgress({
|
||||
status: 'cancelled',
|
||||
operation: 'Calculation cancelled',
|
||||
current: 50,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: null,
|
||||
rate: 0,
|
||||
percentage: '50',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: totalElapsedSeconds
|
||||
},
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
} else {
|
||||
global.outputProgress({
|
||||
status: 'error',
|
||||
operation: 'Error during initial product metrics population',
|
||||
message: error.message,
|
||||
current: 0,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: null,
|
||||
rate: 0,
|
||||
percentage: '0',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: totalElapsedSeconds
|
||||
},
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
}
|
||||
|
||||
console.error('Error during initial product metrics population:', error);
|
||||
return {
|
||||
success: false,
|
||||
error: error.message,
|
||||
duration: totalElapsedSeconds
|
||||
};
|
||||
} finally {
|
||||
if (connection) {
|
||||
connection.release();
|
||||
}
|
||||
await closePool();
|
||||
}
|
||||
}
|
||||
|
||||
// Start population process
|
||||
populateInitialMetrics()
|
||||
.then(result => {
|
||||
if (result.success) {
|
||||
console.log(`Initial product metrics population completed successfully in ${result.duration} seconds`);
|
||||
process.exit(0);
|
||||
} else {
|
||||
console.error(`Initial product metrics population failed: ${result.error}`);
|
||||
process.exit(1);
|
||||
}
|
||||
})
|
||||
.catch(err => {
|
||||
console.error('Unexpected error:', err);
|
||||
process.exit(1);
|
||||
});
|
||||
@@ -0,0 +1,430 @@
|
||||
-- Description: Performs the first population OR full recalculation of the product_metrics table based on
|
||||
-- historically backfilled daily_product_snapshots and current product/PO data.
|
||||
-- Calculates all metrics considering the full available history up to 'yesterday'.
|
||||
-- Run ONCE after backfill_historical_snapshots_final.sql completes successfully.
|
||||
-- Dependencies: Core import tables (products, purchase_orders), daily_product_snapshots (historically populated),
|
||||
-- configuration tables (settings_*), product_metrics table must exist.
|
||||
-- Frequency: Run ONCE.
|
||||
DO $$
|
||||
DECLARE
|
||||
_module_name VARCHAR := 'product_metrics_population'; -- Generic name
|
||||
_start_time TIMESTAMPTZ := clock_timestamp();
|
||||
-- Calculate metrics AS OF the end of the last fully completed day
|
||||
_calculation_date DATE := CURRENT_DATE - INTERVAL '1 day';
|
||||
BEGIN
|
||||
RAISE NOTICE 'Running % module. Calculating AS OF: %. Start Time: %', _module_name, _calculation_date, _start_time;
|
||||
|
||||
-- Optional: Consider TRUNCATE if you want a completely fresh start,
|
||||
-- otherwise ON CONFLICT will update existing rows if this is rerun.
|
||||
-- TRUNCATE TABLE public.product_metrics;
|
||||
RAISE NOTICE 'Populating product_metrics table. This may take some time...';
|
||||
|
||||
-- CTEs to gather necessary information AS OF _calculation_date
|
||||
WITH CurrentInfo AS (
|
||||
-- Fetches current product details, including costs/prices used for forecasting & fallbacks
|
||||
SELECT
|
||||
p.pid, p.sku, p.title, p.brand, p.vendor, COALESCE(p.image_175, p.image) as image_url,
|
||||
p.visible as is_visible, p.replenishable,
|
||||
COALESCE(p.price, 0.00) as current_price, COALESCE(p.regular_price, 0.00) as current_regular_price,
|
||||
COALESCE(p.cost_price, 0.00) as current_cost_price,
|
||||
COALESCE(p.landing_cost_price, p.cost_price, 0.00) as current_effective_cost, -- Use landing if available, else cost
|
||||
p.stock_quantity as current_stock, -- Use actual current stock for forecast base
|
||||
p.created_at, p.first_received, p.date_last_sold,
|
||||
p.moq,
|
||||
p.uom
|
||||
FROM public.products p
|
||||
),
|
||||
OnOrderInfo AS (
|
||||
-- Calculates current on-order quantities and costs
|
||||
SELECT
|
||||
pid,
|
||||
COALESCE(SUM(ordered - received), 0) AS on_order_qty,
|
||||
COALESCE(SUM((ordered - received) * cost_price), 0.00) AS on_order_cost,
|
||||
MIN(expected_date) AS earliest_expected_date
|
||||
FROM public.purchase_orders
|
||||
-- Use the most common statuses representing active, unfulfilled POs
|
||||
WHERE status IN ('open', 'partially_received', 'ordered', 'preordered', 'receiving_started', 'electronically_sent', 'electronically_ready_send')
|
||||
AND (ordered - received) > 0
|
||||
GROUP BY pid
|
||||
),
|
||||
HistoricalDates AS (
|
||||
-- Determines key historical dates from orders and PO history (receiving_history)
|
||||
SELECT
|
||||
p.pid,
|
||||
MIN(o.date)::date AS date_first_sold,
|
||||
MAX(o.date)::date AS max_order_date, -- Used as fallback for date_last_sold
|
||||
MIN(rh.first_receipt_date) AS date_first_received_calc,
|
||||
MAX(rh.last_receipt_date) AS date_last_received_calc
|
||||
FROM public.products p
|
||||
LEFT JOIN public.orders o ON p.pid = o.pid AND o.quantity > 0 AND o.status NOT IN ('canceled', 'returned')
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
po.pid,
|
||||
MIN((rh.item->>'received_at')::date) as first_receipt_date,
|
||||
MAX((rh.item->>'received_at')::date) as last_receipt_date
|
||||
FROM public.purchase_orders po
|
||||
CROSS JOIN LATERAL jsonb_array_elements(po.receiving_history) AS rh(item)
|
||||
WHERE jsonb_typeof(po.receiving_history) = 'array' AND jsonb_array_length(po.receiving_history) > 0
|
||||
GROUP BY po.pid
|
||||
) rh ON p.pid = rh.pid
|
||||
GROUP BY p.pid
|
||||
),
|
||||
SnapshotAggregates AS (
|
||||
-- Aggregates metrics from historical snapshots up to the _calculation_date
|
||||
SELECT
|
||||
pid,
|
||||
-- Rolling periods relative to _calculation_date
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '6 days' AND _calculation_date THEN units_sold ELSE 0 END) AS sales_7d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '6 days' AND _calculation_date THEN net_revenue ELSE 0 END) AS revenue_7d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '13 days' AND _calculation_date THEN units_sold ELSE 0 END) AS sales_14d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '13 days' AND _calculation_date THEN net_revenue ELSE 0 END) AS revenue_14d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN units_sold ELSE 0 END) AS sales_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN net_revenue ELSE 0 END) AS revenue_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN cogs ELSE 0 END) AS cogs_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN profit ELSE 0 END) AS profit_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN units_returned ELSE 0 END) AS returns_units_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN returns_revenue ELSE 0 END) AS returns_revenue_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN discounts ELSE 0 END) AS discounts_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN gross_revenue ELSE 0 END) AS gross_revenue_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN gross_regular_revenue ELSE 0 END) AS gross_regular_revenue_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date AND stockout_flag THEN 1 ELSE 0 END) AS stockout_days_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '364 days' AND _calculation_date THEN units_sold ELSE 0 END) AS sales_365d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '364 days' AND _calculation_date THEN net_revenue ELSE 0 END) AS revenue_365d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN units_received ELSE 0 END) AS received_qty_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN cost_received ELSE 0 END) AS received_cost_30d,
|
||||
|
||||
-- Averages over the last 30 days ending _calculation_date
|
||||
AVG(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN eod_stock_quantity END) AS avg_stock_units_30d,
|
||||
AVG(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN eod_stock_cost END) AS avg_stock_cost_30d,
|
||||
AVG(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN eod_stock_retail END) AS avg_stock_retail_30d,
|
||||
AVG(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN eod_stock_gross END) AS avg_stock_gross_30d,
|
||||
|
||||
-- Lifetime (Sum over ALL available snapshots up to calculation date)
|
||||
SUM(units_sold) AS lifetime_sales,
|
||||
SUM(net_revenue) AS lifetime_revenue,
|
||||
|
||||
-- Yesterday (Sales for the specific _calculation_date)
|
||||
SUM(CASE WHEN snapshot_date = _calculation_date THEN units_sold ELSE 0 END) as yesterday_sales
|
||||
|
||||
FROM public.daily_product_snapshots
|
||||
WHERE snapshot_date <= _calculation_date -- Ensure we only use data up to the calculation point
|
||||
GROUP BY pid
|
||||
),
|
||||
FirstPeriodMetrics AS (
|
||||
-- Calculates sales/revenue for first X days after first sale date
|
||||
-- Uses HistoricalDates CTE to get the first sale date
|
||||
SELECT
|
||||
pid, date_first_sold,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '6 days' THEN units_sold ELSE 0 END) AS first_7_days_sales,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '6 days' THEN net_revenue ELSE 0 END) AS first_7_days_revenue,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '29 days' THEN units_sold ELSE 0 END) AS first_30_days_sales,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '29 days' THEN net_revenue ELSE 0 END) AS first_30_days_revenue,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '59 days' THEN units_sold ELSE 0 END) AS first_60_days_sales,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '59 days' THEN net_revenue ELSE 0 END) AS first_60_days_revenue,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '89 days' THEN units_sold ELSE 0 END) AS first_90_days_sales,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '89 days' THEN net_revenue ELSE 0 END) AS first_90_days_revenue
|
||||
FROM public.daily_product_snapshots ds
|
||||
JOIN HistoricalDates hd USING(pid)
|
||||
WHERE date_first_sold IS NOT NULL
|
||||
AND snapshot_date >= date_first_sold -- Only consider snapshots after first sale
|
||||
AND snapshot_date <= _calculation_date -- Only up to the overall calculation date
|
||||
GROUP BY pid, date_first_sold
|
||||
),
|
||||
Settings AS (
|
||||
-- Fetches effective configuration settings (Product > Vendor > Global)
|
||||
SELECT
|
||||
p.pid,
|
||||
COALESCE(sp.lead_time_days, sv.default_lead_time_days, (SELECT setting_value FROM settings_global WHERE setting_key = 'default_lead_time_days')::int, 14) AS effective_lead_time,
|
||||
COALESCE(sp.days_of_stock, sv.default_days_of_stock, (SELECT setting_value FROM settings_global WHERE setting_key = 'default_days_of_stock')::int, 30) AS effective_days_of_stock,
|
||||
COALESCE(sp.safety_stock, (SELECT setting_value::int FROM settings_global WHERE setting_key = 'default_safety_stock_units'), 0) AS effective_safety_stock,
|
||||
COALESCE(sp.exclude_from_forecast, FALSE) AS exclude_forecast
|
||||
FROM public.products p
|
||||
LEFT JOIN public.settings_product sp ON p.pid = sp.pid
|
||||
LEFT JOIN public.settings_vendor sv ON p.vendor = sv.vendor
|
||||
),
|
||||
AvgLeadTime AS (
|
||||
-- Calculate Average Lead Time from historical POs
|
||||
SELECT
|
||||
pid,
|
||||
AVG(GREATEST(1,
|
||||
CASE
|
||||
WHEN last_received_date IS NOT NULL AND date IS NOT NULL
|
||||
THEN (last_received_date::date - date::date)
|
||||
ELSE 1
|
||||
END
|
||||
))::int AS avg_lead_time_days_calc
|
||||
FROM public.purchase_orders
|
||||
WHERE status = 'received' -- Assumes 'received' marks full receipt
|
||||
AND last_received_date IS NOT NULL
|
||||
AND date IS NOT NULL
|
||||
AND last_received_date >= date
|
||||
GROUP BY pid
|
||||
),
|
||||
RankedForABC AS (
|
||||
-- Ranks products based on the configured ABC metric (using historical data)
|
||||
SELECT
|
||||
p.pid,
|
||||
CASE COALESCE((SELECT setting_value FROM settings_global WHERE setting_key = 'abc_calculation_basis'), 'revenue_30d')
|
||||
WHEN 'sales_30d' THEN COALESCE(sa.sales_30d, 0)
|
||||
WHEN 'lifetime_revenue' THEN COALESCE(sa.lifetime_revenue, 0)::numeric
|
||||
ELSE COALESCE(sa.revenue_30d, 0) -- Default to revenue_30d
|
||||
END AS metric_value
|
||||
FROM public.products p -- Use products as the base
|
||||
JOIN SnapshotAggregates sa ON p.pid = sa.pid
|
||||
WHERE p.replenishable = TRUE -- Only rank replenishable products
|
||||
AND (CASE COALESCE((SELECT setting_value FROM settings_global WHERE setting_key = 'abc_calculation_basis'), 'revenue_30d')
|
||||
WHEN 'sales_30d' THEN COALESCE(sa.sales_30d, 0)
|
||||
WHEN 'lifetime_revenue' THEN COALESCE(sa.lifetime_revenue, 0)::numeric
|
||||
ELSE COALESCE(sa.revenue_30d, 0)
|
||||
END) > 0 -- Exclude zero-value products from ranking
|
||||
),
|
||||
CumulativeABC AS (
|
||||
-- Calculates cumulative metric values for ABC ranking
|
||||
SELECT
|
||||
pid, metric_value,
|
||||
SUM(metric_value) OVER (ORDER BY metric_value DESC NULLS LAST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) as cumulative_metric,
|
||||
SUM(metric_value) OVER () as total_metric
|
||||
FROM RankedForABC
|
||||
),
|
||||
FinalABC AS (
|
||||
-- Assigns A, B, or C class based on thresholds
|
||||
SELECT
|
||||
pid,
|
||||
CASE
|
||||
WHEN cumulative_metric / NULLIF(total_metric, 0) <= COALESCE((SELECT setting_value::numeric FROM settings_global WHERE setting_key = 'abc_revenue_threshold_a'), 0.8) THEN 'A'::char(1)
|
||||
WHEN cumulative_metric / NULLIF(total_metric, 0) <= COALESCE((SELECT setting_value::numeric FROM settings_global WHERE setting_key = 'abc_revenue_threshold_b'), 0.95) THEN 'B'::char(1)
|
||||
ELSE 'C'::char(1)
|
||||
END AS abc_class_calc
|
||||
FROM CumulativeABC
|
||||
)
|
||||
-- Final INSERT/UPDATE statement using all the prepared CTEs
|
||||
INSERT INTO public.product_metrics (
|
||||
pid, last_calculated, sku, title, brand, vendor, image_url, is_visible, is_replenishable,
|
||||
current_price, current_regular_price, current_cost_price, current_landing_cost_price,
|
||||
current_stock, current_stock_cost, current_stock_retail, current_stock_gross,
|
||||
on_order_qty, on_order_cost, on_order_retail, earliest_expected_date,
|
||||
date_created, date_first_received, date_last_received, date_first_sold, date_last_sold, age_days,
|
||||
sales_7d, revenue_7d, sales_14d, revenue_14d, sales_30d, revenue_30d, cogs_30d, profit_30d,
|
||||
returns_units_30d, returns_revenue_30d, discounts_30d, gross_revenue_30d, gross_regular_revenue_30d,
|
||||
stockout_days_30d, sales_365d, revenue_365d,
|
||||
avg_stock_units_30d, avg_stock_cost_30d, avg_stock_retail_30d, avg_stock_gross_30d,
|
||||
received_qty_30d, received_cost_30d,
|
||||
lifetime_sales, lifetime_revenue,
|
||||
first_7_days_sales, first_7_days_revenue, first_30_days_sales, first_30_days_revenue,
|
||||
first_60_days_sales, first_60_days_revenue, first_90_days_sales, first_90_days_revenue,
|
||||
asp_30d, acp_30d, avg_ros_30d, avg_sales_per_day_30d,
|
||||
margin_30d, markup_30d, gmroi_30d, stockturn_30d, return_rate_30d, discount_rate_30d,
|
||||
stockout_rate_30d, markdown_30d, markdown_rate_30d, sell_through_30d,
|
||||
avg_lead_time_days, abc_class,
|
||||
sales_velocity_daily, config_lead_time, config_days_of_stock, config_safety_stock,
|
||||
planning_period_days, lead_time_forecast_units, days_of_stock_forecast_units,
|
||||
planning_period_forecast_units, lead_time_closing_stock, days_of_stock_closing_stock,
|
||||
replenishment_needed_raw, replenishment_units, replenishment_cost, replenishment_retail, replenishment_profit,
|
||||
to_order_units, forecast_lost_sales_units, forecast_lost_revenue,
|
||||
stock_cover_in_days, po_cover_in_days, sells_out_in_days, replenish_date,
|
||||
overstocked_units, overstocked_cost, overstocked_retail, is_old_stock,
|
||||
yesterday_sales
|
||||
)
|
||||
SELECT
|
||||
-- Select columns in order, joining all CTEs by pid
|
||||
ci.pid, _start_time, ci.sku, ci.title, ci.brand, ci.vendor, ci.image_url, ci.is_visible, ci.replenishable,
|
||||
ci.current_price, ci.current_regular_price, ci.current_cost_price, ci.current_effective_cost,
|
||||
ci.current_stock, (ci.current_stock * COALESCE(ci.current_effective_cost, 0.00))::numeric(12,2), (ci.current_stock * COALESCE(ci.current_price, 0.00))::numeric(12,2), (ci.current_stock * COALESCE(ci.current_regular_price, 0.00))::numeric(12,2),
|
||||
COALESCE(ooi.on_order_qty, 0), COALESCE(ooi.on_order_cost, 0.00)::numeric(12,2), (COALESCE(ooi.on_order_qty, 0) * COALESCE(ci.current_price, 0.00))::numeric(12,2), ooi.earliest_expected_date,
|
||||
|
||||
-- Fix type issue with date calculation - properly cast timestamps to dates before arithmetic
|
||||
ci.created_at::date,
|
||||
COALESCE(ci.first_received::date, hd.date_first_received_calc),
|
||||
hd.date_last_received_calc,
|
||||
hd.date_first_sold,
|
||||
COALESCE(ci.date_last_sold, hd.max_order_date),
|
||||
-- Fix timestamp + integer error by ensuring we work only with dates
|
||||
CASE
|
||||
WHEN LEAST(ci.created_at::date, COALESCE(hd.date_first_sold, ci.created_at::date)) IS NOT NULL
|
||||
THEN (_calculation_date::date - LEAST(ci.created_at::date, COALESCE(hd.date_first_sold, ci.created_at::date)))::int
|
||||
ELSE NULL
|
||||
END,
|
||||
|
||||
COALESCE(sa.sales_7d, 0), COALESCE(sa.revenue_7d, 0), COALESCE(sa.sales_14d, 0), COALESCE(sa.revenue_14d, 0), COALESCE(sa.sales_30d, 0), COALESCE(sa.revenue_30d, 0), COALESCE(sa.cogs_30d, 0), COALESCE(sa.profit_30d, 0),
|
||||
COALESCE(sa.returns_units_30d, 0), COALESCE(sa.returns_revenue_30d, 0), COALESCE(sa.discounts_30d, 0), COALESCE(sa.gross_revenue_30d, 0), COALESCE(sa.gross_regular_revenue_30d, 0),
|
||||
COALESCE(sa.stockout_days_30d, 0), COALESCE(sa.sales_365d, 0), COALESCE(sa.revenue_365d, 0),
|
||||
sa.avg_stock_units_30d, sa.avg_stock_cost_30d, sa.avg_stock_retail_30d, sa.avg_stock_gross_30d, -- Averages can be NULL if no data
|
||||
COALESCE(sa.received_qty_30d, 0), COALESCE(sa.received_cost_30d, 0),
|
||||
COALESCE(sa.lifetime_sales, 0), COALESCE(sa.lifetime_revenue, 0),
|
||||
fpm.first_7_days_sales, fpm.first_7_days_revenue, fpm.first_30_days_sales, fpm.first_30_days_revenue,
|
||||
fpm.first_60_days_sales, fpm.first_60_days_revenue, fpm.first_90_days_sales, fpm.first_90_days_revenue,
|
||||
|
||||
-- Calculated KPIs (using COALESCE on inputs where appropriate)
|
||||
sa.revenue_30d / NULLIF(sa.sales_30d, 0) AS asp_30d,
|
||||
sa.cogs_30d / NULLIF(sa.sales_30d, 0) AS acp_30d,
|
||||
sa.profit_30d / NULLIF(sa.sales_30d, 0) AS avg_ros_30d,
|
||||
COALESCE(sa.sales_30d, 0) / 30.0 AS avg_sales_per_day_30d,
|
||||
|
||||
-- Fix for percentages - cast to numeric with appropriate precision
|
||||
((sa.profit_30d / NULLIF(sa.revenue_30d, 0)) * 100)::numeric(8,2) AS margin_30d,
|
||||
((sa.profit_30d / NULLIF(sa.cogs_30d, 0)) * 100)::numeric(8,2) AS markup_30d,
|
||||
sa.profit_30d / NULLIF(sa.avg_stock_cost_30d, 0) AS gmroi_30d,
|
||||
sa.sales_30d / NULLIF(sa.avg_stock_units_30d, 0) AS stockturn_30d,
|
||||
((sa.returns_units_30d / NULLIF(COALESCE(sa.sales_30d, 0) + COALESCE(sa.returns_units_30d, 0), 0)) * 100)::numeric(8,2) AS return_rate_30d,
|
||||
((sa.discounts_30d / NULLIF(sa.gross_revenue_30d, 0)) * 100)::numeric(8,2) AS discount_rate_30d,
|
||||
((COALESCE(sa.stockout_days_30d, 0) / 30.0) * 100)::numeric(8,2) AS stockout_rate_30d,
|
||||
GREATEST(0, sa.gross_regular_revenue_30d - sa.gross_revenue_30d) AS markdown_30d, -- Ensure markdown isn't negative
|
||||
((GREATEST(0, sa.gross_regular_revenue_30d - sa.gross_revenue_30d) / NULLIF(sa.gross_regular_revenue_30d, 0)) * 100)::numeric(8,2) AS markdown_rate_30d,
|
||||
-- Sell Through Rate: Sales / (Stock at end of period + Sales). This is one definition proxying for Sales / Beginning Stock.
|
||||
((sa.sales_30d / NULLIF(
|
||||
(SELECT eod_stock_quantity FROM daily_product_snapshots WHERE snapshot_date = _calculation_date AND pid = ci.pid LIMIT 1) + COALESCE(sa.sales_30d, 0)
|
||||
, 0)) * 100)::numeric(8,2) AS sell_through_30d,
|
||||
|
||||
-- Use calculated periodic metrics
|
||||
alt.avg_lead_time_days_calc,
|
||||
CASE
|
||||
WHEN ci.replenishable = FALSE THEN NULL -- Non-replenishable don't get a class
|
||||
ELSE COALESCE(fa.abc_class_calc, 'C') -- Default ranked replenishable but non-contributing to C
|
||||
END,
|
||||
|
||||
-- Forecasting intermediate values (based on historical aggregates ending _calculation_date)
|
||||
(COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) AS sales_velocity_daily, -- Ensure divisor > 0
|
||||
s.effective_lead_time AS config_lead_time, s.effective_days_of_stock AS config_days_of_stock, s.effective_safety_stock AS config_safety_stock,
|
||||
(s.effective_lead_time + s.effective_days_of_stock) AS planning_period_days,
|
||||
-- Calculate raw forecast need components (using safe velocity)
|
||||
(COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time AS lead_time_forecast_units,
|
||||
(COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock AS days_of_stock_forecast_units,
|
||||
-- Planning period forecast units (sum of lead time and DOS units)
|
||||
CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock) AS planning_period_forecast_units,
|
||||
-- Closing stock calculations (using raw forecast components for accuracy before rounding)
|
||||
(ci.current_stock + COALESCE(ooi.on_order_qty, 0) - ((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)) AS lead_time_closing_stock,
|
||||
((ci.current_stock + COALESCE(ooi.on_order_qty, 0) - ((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)))
|
||||
- ((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock) AS days_of_stock_closing_stock,
|
||||
-- Raw replenishment needed
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time) -- Use rounded forecast units
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0) AS replenishment_needed_raw,
|
||||
|
||||
-- Final Forecasting Metrics
|
||||
-- Replenishment Units (calculated need, before MOQ)
|
||||
CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
))::int AS replenishment_units,
|
||||
-- Replenishment Cost/Retail/Profit (based on replenishment_units)
|
||||
(CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
))::int) * COALESCE(ci.current_effective_cost, 0.00)::numeric(12,2) AS replenishment_cost,
|
||||
(CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
))::int) * COALESCE(ci.current_price, 0.00)::numeric(12,2) AS replenishment_retail,
|
||||
(CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
))::int) * (COALESCE(ci.current_price, 0.00) - COALESCE(ci.current_effective_cost, 0.00))::numeric(12,2) AS replenishment_profit,
|
||||
|
||||
-- *** FIX: To Order Units (Apply MOQ rounding) ***
|
||||
CASE
|
||||
WHEN COALESCE(ci.moq, 0) <= 1 THEN -- Treat no/invalid MOQ or MOQ=1 as no rounding needed
|
||||
CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
))::int
|
||||
ELSE -- Apply MOQ rounding: Round UP to nearest multiple of MOQ
|
||||
(CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
) / NULLIF(ci.moq::numeric, 0)) * COALESCE(ci.moq, 1))::int
|
||||
END AS to_order_units,
|
||||
|
||||
-- Forecast Lost Sales (Units occurring during lead time if current+on_order is insufficient)
|
||||
CEILING(GREATEST(0,
|
||||
((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time) -- Demand during lead time
|
||||
- (ci.current_stock + COALESCE(ooi.on_order_qty, 0)) -- Supply available before order arrives
|
||||
))::int AS forecast_lost_sales_units,
|
||||
-- Forecast Lost Revenue
|
||||
(CEILING(GREATEST(0,
|
||||
((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
- (ci.current_stock + COALESCE(ooi.on_order_qty, 0))
|
||||
))::int) * COALESCE(ci.current_price, 0.00)::numeric(12,2) AS forecast_lost_revenue,
|
||||
|
||||
-- Stock Cover etc (using safe velocity)
|
||||
ci.current_stock / NULLIF((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)), 0) AS stock_cover_in_days,
|
||||
COALESCE(ooi.on_order_qty, 0) / NULLIF((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)), 0) AS po_cover_in_days,
|
||||
(ci.current_stock + COALESCE(ooi.on_order_qty, 0)) / NULLIF((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)), 0) AS sells_out_in_days,
|
||||
-- Replenish Date (Project forward from 'today', which is _calculation_date + 1 day)
|
||||
CASE
|
||||
WHEN (COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) > 0 -- Check for positive velocity
|
||||
THEN
|
||||
_calculation_date + INTERVAL '1 day' -- Today
|
||||
+ FLOOR(GREATEST(0, ci.current_stock - s.effective_safety_stock) -- Stock above safety
|
||||
/ (COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) -- divided by velocity
|
||||
)::integer * INTERVAL '1 day' -- Gives date safety stock is hit
|
||||
- s.effective_lead_time * INTERVAL '1 day' -- Subtract lead time
|
||||
ELSE NULL -- Cannot calculate if no sales velocity
|
||||
END AS replenish_date,
|
||||
-- Overstocked Units (Stock above safety + planning period demand)
|
||||
GREATEST(0, ci.current_stock - s.effective_safety_stock -
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time) -- Demand during lead time
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock)) -- Demand during DOS
|
||||
)::int AS overstocked_units,
|
||||
(GREATEST(0, ci.current_stock - s.effective_safety_stock -
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
)::int) * COALESCE(ci.current_effective_cost, 0.00)::numeric(12,2) AS overstocked_cost,
|
||||
(GREATEST(0, ci.current_stock - s.effective_safety_stock -
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
)::int) * COALESCE(ci.current_price, 0.00)::numeric(12,2) AS overstocked_retail,
|
||||
-- Old Stock Flag
|
||||
(ci.created_at::date < (_calculation_date - INTERVAL '60 day')::date) AND
|
||||
(COALESCE(ci.date_last_sold, hd.max_order_date) IS NULL OR COALESCE(ci.date_last_sold, hd.max_order_date) < (_calculation_date - INTERVAL '60 day')::date) AND
|
||||
(hd.date_last_received_calc IS NULL OR hd.date_last_received_calc < (_calculation_date - INTERVAL '60 day')::date) AND
|
||||
COALESCE(ooi.on_order_qty, 0) = 0 AS is_old_stock,
|
||||
COALESCE(sa.yesterday_sales, 0) -- Sales for _calculation_date
|
||||
|
||||
FROM CurrentInfo ci
|
||||
LEFT JOIN OnOrderInfo ooi ON ci.pid = ooi.pid
|
||||
LEFT JOIN HistoricalDates hd ON ci.pid = hd.pid
|
||||
LEFT JOIN SnapshotAggregates sa ON ci.pid = sa.pid
|
||||
LEFT JOIN FirstPeriodMetrics fpm ON ci.pid = fpm.pid
|
||||
LEFT JOIN Settings s ON ci.pid = s.pid
|
||||
LEFT JOIN AvgLeadTime alt ON ci.pid = alt.pid -- Join calculated avg lead time
|
||||
LEFT JOIN FinalABC fa ON ci.pid = fa.pid -- Join calculated ABC class
|
||||
WHERE s.exclude_forecast IS FALSE OR s.exclude_forecast IS NULL
|
||||
|
||||
ON CONFLICT (pid) DO UPDATE SET
|
||||
-- *** IMPORTANT: List ALL columns here, ensuring order matches INSERT list ***
|
||||
-- Update ALL columns to ensure entire row is refreshed
|
||||
last_calculated = EXCLUDED.last_calculated, sku = EXCLUDED.sku, title = EXCLUDED.title, brand = EXCLUDED.brand, vendor = EXCLUDED.vendor, image_url = EXCLUDED.image_url, is_visible = EXCLUDED.is_visible, is_replenishable = EXCLUDED.is_replenishable,
|
||||
current_price = EXCLUDED.current_price, current_regular_price = EXCLUDED.current_regular_price, current_cost_price = EXCLUDED.current_cost_price, current_landing_cost_price = EXCLUDED.current_landing_cost_price,
|
||||
current_stock = EXCLUDED.current_stock, current_stock_cost = EXCLUDED.current_stock_cost, current_stock_retail = EXCLUDED.current_stock_retail, current_stock_gross = EXCLUDED.current_stock_gross,
|
||||
on_order_qty = EXCLUDED.on_order_qty, on_order_cost = EXCLUDED.on_order_cost, on_order_retail = EXCLUDED.on_order_retail, earliest_expected_date = EXCLUDED.earliest_expected_date,
|
||||
date_created = EXCLUDED.date_created, date_first_received = EXCLUDED.date_first_received, date_last_received = EXCLUDED.date_last_received, date_first_sold = EXCLUDED.date_first_sold, date_last_sold = EXCLUDED.date_last_sold, age_days = EXCLUDED.age_days,
|
||||
sales_7d = EXCLUDED.sales_7d, revenue_7d = EXCLUDED.revenue_7d, sales_14d = EXCLUDED.sales_14d, revenue_14d = EXCLUDED.revenue_14d, sales_30d = EXCLUDED.sales_30d, revenue_30d = EXCLUDED.revenue_30d, cogs_30d = EXCLUDED.cogs_30d, profit_30d = EXCLUDED.profit_30d,
|
||||
returns_units_30d = EXCLUDED.returns_units_30d, returns_revenue_30d = EXCLUDED.returns_revenue_30d, discounts_30d = EXCLUDED.discounts_30d, gross_revenue_30d = EXCLUDED.gross_revenue_30d, gross_regular_revenue_30d = EXCLUDED.gross_regular_revenue_30d,
|
||||
stockout_days_30d = EXCLUDED.stockout_days_30d, sales_365d = EXCLUDED.sales_365d, revenue_365d = EXCLUDED.revenue_365d,
|
||||
avg_stock_units_30d = EXCLUDED.avg_stock_units_30d, avg_stock_cost_30d = EXCLUDED.avg_stock_cost_30d, avg_stock_retail_30d = EXCLUDED.avg_stock_retail_30d, avg_stock_gross_30d = EXCLUDED.avg_stock_gross_30d,
|
||||
received_qty_30d = EXCLUDED.received_qty_30d, received_cost_30d = EXCLUDED.received_cost_30d,
|
||||
lifetime_sales = EXCLUDED.lifetime_sales, lifetime_revenue = EXCLUDED.lifetime_revenue,
|
||||
first_7_days_sales = EXCLUDED.first_7_days_sales, first_7_days_revenue = EXCLUDED.first_7_days_revenue, first_30_days_sales = EXCLUDED.first_30_days_sales, first_30_days_revenue = EXCLUDED.first_30_days_revenue,
|
||||
first_60_days_sales = EXCLUDED.first_60_days_sales, first_60_days_revenue = EXCLUDED.first_60_days_revenue, first_90_days_sales = EXCLUDED.first_90_days_sales, first_90_days_revenue = EXCLUDED.first_90_days_revenue,
|
||||
asp_30d = EXCLUDED.asp_30d, acp_30d = EXCLUDED.acp_30d, avg_ros_30d = EXCLUDED.avg_ros_30d, avg_sales_per_day_30d = EXCLUDED.avg_sales_per_day_30d,
|
||||
margin_30d = EXCLUDED.margin_30d, markup_30d = EXCLUDED.markup_30d, gmroi_30d = EXCLUDED.gmroi_30d, stockturn_30d = EXCLUDED.stockturn_30d, return_rate_30d = EXCLUDED.return_rate_30d, discount_rate_30d = EXCLUDED.discount_rate_30d,
|
||||
stockout_rate_30d = EXCLUDED.stockout_rate_30d, markdown_30d = EXCLUDED.markdown_30d, markdown_rate_30d = EXCLUDED.markdown_rate_30d, sell_through_30d = EXCLUDED.sell_through_30d,
|
||||
avg_lead_time_days = EXCLUDED.avg_lead_time_days, abc_class = EXCLUDED.abc_class,
|
||||
sales_velocity_daily = EXCLUDED.sales_velocity_daily, config_lead_time = EXCLUDED.config_lead_time, config_days_of_stock = EXCLUDED.config_days_of_stock, config_safety_stock = EXCLUDED.config_safety_stock,
|
||||
planning_period_days = EXCLUDED.planning_period_days, lead_time_forecast_units = EXCLUDED.lead_time_forecast_units, days_of_stock_forecast_units = EXCLUDED.days_of_stock_forecast_units,
|
||||
planning_period_forecast_units = EXCLUDED.planning_period_forecast_units, lead_time_closing_stock = EXCLUDED.lead_time_closing_stock, days_of_stock_closing_stock = EXCLUDED.days_of_stock_closing_stock,
|
||||
replenishment_needed_raw = EXCLUDED.replenishment_needed_raw, replenishment_units = EXCLUDED.replenishment_units, replenishment_cost = EXCLUDED.replenishment_cost, replenishment_retail = EXCLUDED.replenishment_retail, replenishment_profit = EXCLUDED.replenishment_profit,
|
||||
to_order_units = EXCLUDED.to_order_units, -- *** Update to use EXCLUDED ***
|
||||
forecast_lost_sales_units = EXCLUDED.forecast_lost_sales_units, forecast_lost_revenue = EXCLUDED.forecast_lost_revenue,
|
||||
stock_cover_in_days = EXCLUDED.stock_cover_in_days, po_cover_in_days = EXCLUDED.po_cover_in_days, sells_out_in_days = EXCLUDED.sells_out_in_days, replenish_date = EXCLUDED.replenish_date,
|
||||
overstocked_units = EXCLUDED.overstocked_units, overstocked_cost = EXCLUDED.overstocked_cost, overstocked_retail = EXCLUDED.overstocked_retail, is_old_stock = EXCLUDED.is_old_stock,
|
||||
yesterday_sales = EXCLUDED.yesterday_sales;
|
||||
RAISE NOTICE 'Finished % module. Duration: %', _module_name, clock_timestamp() - _start_time;
|
||||
END $$;
|
||||
Reference in New Issue
Block a user