10 Commits

Author SHA1 Message Date
a8d3fd8033 Update import scripts through orders 2025-02-17 00:53:07 -05:00
702b956ff1 Fix main import script issue 2025-02-16 11:54:28 -05:00
9b8577f258 Update import scripts through products 2025-02-14 21:46:50 -05:00
9623681a15 Update import scripts, working through categories 2025-02-14 13:30:14 -05:00
cc22fd8c35 Update backend/frontend 2025-02-14 11:26:02 -05:00
0ef1b6100e Clean up old files 2025-02-14 09:37:05 -05:00
a519746ccb Move authentication to postgres 2025-02-14 09:10:15 -05:00
f29dd8ef8b Clean up build errors 2025-02-13 20:02:11 -05:00
f2a5c06005 Fixes for re-running reset scripts 2025-02-13 10:25:04 -05:00
fb9f959fe5 Update schemas and reset scripts 2025-02-12 16:14:25 -05:00
44 changed files with 4262 additions and 3116 deletions

View File

@@ -1,5 +1,209 @@
// ecosystem.config.js
const path = require('path');
const dotenv = require('dotenv');
// Load environment variables safely with error handling
const loadEnvFile = (envPath) => {
try {
console.log('Loading env from:', envPath);
const result = dotenv.config({ path: envPath });
if (result.error) {
console.warn(`Warning: .env file not found or invalid at ${envPath}:`, result.error.message);
return {};
}
console.log('Env variables loaded from', envPath, ':', Object.keys(result.parsed || {}));
return result.parsed || {};
} catch (error) {
console.warn(`Warning: Error loading .env file at ${envPath}:`, error.message);
return {};
}
}
// Load environment variables for each server
const authEnv = loadEnvFile(path.resolve(__dirname, 'dashboard/auth-server/.env'));
const aircallEnv = loadEnvFile(path.resolve(__dirname, 'dashboard/aircall-server/.env'));
const klaviyoEnv = loadEnvFile(path.resolve(__dirname, 'dashboard/klaviyo-server/.env'));
const metaEnv = loadEnvFile(path.resolve(__dirname, 'dashboard/meta-server/.env'));
const googleAnalyticsEnv = require('dotenv').config({
path: path.resolve(__dirname, 'dashboard/google-server/.env')
}).parsed || {};
const typeformEnv = loadEnvFile(path.resolve(__dirname, 'dashboard/typeform-server/.env'));
const inventoryEnv = loadEnvFile(path.resolve(__dirname, 'inventory/.env'));
// Common log settings for all apps
const logSettings = {
log_rotate: true,
max_size: '10M',
retain: '10',
log_date_format: 'YYYY-MM-DD HH:mm:ss'
};
// Common app settings
const commonSettings = {
instances: 1,
exec_mode: 'fork',
autorestart: true,
watch: false,
max_memory_restart: '1G',
time: true,
...logSettings,
ignore_watch: [
'node_modules',
'logs',
'.git',
'*.log'
],
min_uptime: 5000,
max_restarts: 5,
restart_delay: 4000,
listen_timeout: 50000,
kill_timeout: 5000,
node_args: '--max-old-space-size=1536'
};
module.exports = { module.exports = {
apps: [ apps: [
{
...commonSettings,
name: 'auth-server',
script: './dashboard/auth-server/index.js',
env: {
NODE_ENV: 'production',
PORT: 3003,
...authEnv
},
error_file: 'dashboard/auth-server/logs/pm2/err.log',
out_file: 'dashboard/auth-server/logs/pm2/out.log',
log_file: 'dashboard/auth-server/logs/pm2/combined.log',
env_production: {
NODE_ENV: 'production',
PORT: 3003
},
env_development: {
NODE_ENV: 'development',
PORT: 3003
}
},
{
...commonSettings,
name: 'aircall-server',
script: './dashboard/aircall-server/server.js',
env: {
NODE_ENV: 'production',
AIRCALL_PORT: 3002,
...aircallEnv
},
error_file: 'dashboard/aircall-server/logs/pm2/err.log',
out_file: 'dashboard/aircall-server/logs/pm2/out.log',
log_file: 'dashboard/aircall-server/logs/pm2/combined.log',
env_production: {
NODE_ENV: 'production',
AIRCALL_PORT: 3002
}
},
{
...commonSettings,
name: 'klaviyo-server',
script: './dashboard/klaviyo-server/server.js',
env: {
NODE_ENV: 'production',
KLAVIYO_PORT: 3004,
...klaviyoEnv
},
error_file: 'dashboard/klaviyo-server/logs/pm2/err.log',
out_file: 'dashboard/klaviyo-server/logs/pm2/out.log',
log_file: 'dashboard/klaviyo-server/logs/pm2/combined.log',
env_production: {
NODE_ENV: 'production',
KLAVIYO_PORT: 3004
}
},
{
...commonSettings,
name: 'meta-server',
script: './dashboard/meta-server/server.js',
env: {
NODE_ENV: 'production',
PORT: 3005,
...metaEnv
},
error_file: 'dashboard/meta-server/logs/pm2/err.log',
out_file: 'dashboard/meta-server/logs/pm2/out.log',
log_file: 'dashboard/meta-server/logs/pm2/combined.log',
env_production: {
NODE_ENV: 'production',
PORT: 3005
}
},
{
name: "gorgias-server",
script: "./dashboard/gorgias-server/server.js",
env: {
NODE_ENV: "development",
PORT: 3006
},
env_production: {
NODE_ENV: "production",
PORT: 3006
},
error_file: "dashboard/logs/gorgias-server-error.log",
out_file: "dashboard/logs/gorgias-server-out.log",
log_file: "dashboard/logs/gorgias-server-combined.log",
time: true
},
{
...commonSettings,
name: 'google-server',
script: path.resolve(__dirname, 'dashboard/google-server/server.js'),
watch: false,
env: {
NODE_ENV: 'production',
GOOGLE_ANALYTICS_PORT: 3007,
...googleAnalyticsEnv
},
error_file: path.resolve(__dirname, 'dashboard/google-server/logs/pm2/err.log'),
out_file: path.resolve(__dirname, 'dashboard/google-server/logs/pm2/out.log'),
log_file: path.resolve(__dirname, 'dashboard/google-server/logs/pm2/combined.log'),
env_production: {
NODE_ENV: 'production',
GOOGLE_ANALYTICS_PORT: 3007
}
},
{
...commonSettings,
name: 'typeform-server',
script: './dashboard/typeform-server/server.js',
env: {
NODE_ENV: 'production',
TYPEFORM_PORT: 3008,
...typeformEnv
},
error_file: 'dashboard/typeform-server/logs/pm2/err.log',
out_file: 'dashboard/typeform-server/logs/pm2/out.log',
log_file: 'dashboard/typeform-server/logs/pm2/combined.log',
env_production: {
NODE_ENV: 'production',
TYPEFORM_PORT: 3008
}
},
{
...commonSettings,
name: 'inventory-server',
script: './inventory/src/server.js',
env: {
NODE_ENV: 'production',
PORT: 3010,
...inventoryEnv
},
error_file: 'inventory/logs/pm2/err.log',
out_file: 'inventory/logs/pm2/out.log',
log_file: 'inventory/logs/pm2/combined.log',
env_production: {
NODE_ENV: 'production',
PORT: 3010,
...inventoryEnv
}
},
{ {
...commonSettings, ...commonSettings,
name: 'new-auth-server', name: 'new-auth-server',
@@ -7,16 +211,12 @@ module.exports = {
env: { env: {
NODE_ENV: 'production', NODE_ENV: 'production',
AUTH_PORT: 3011, AUTH_PORT: 3011,
...inventoryEnv,
JWT_SECRET: process.env.JWT_SECRET JWT_SECRET: process.env.JWT_SECRET
}, },
error_file: 'inventory-server/auth/logs/pm2/err.log', error_file: 'inventory-server/auth/logs/pm2/err.log',
out_file: 'inventory-server/auth/logs/pm2/out.log', out_file: 'inventory-server/auth/logs/pm2/out.log',
log_file: 'inventory-server/auth/logs/pm2/combined.log', log_file: 'inventory-server/auth/logs/pm2/combined.log'
env_production: {
NODE_ENV: 'production',
AUTH_PORT: 3011,
JWT_SECRET: process.env.JWT_SECRET
}
} }
] ]
}; };

View File

@@ -0,0 +1,103 @@
require('dotenv').config({ path: '../.env' });
const bcrypt = require('bcrypt');
const { Pool } = require('pg');
const inquirer = require('inquirer');
// Log connection details for debugging (remove in production)
console.log('Attempting to connect with:', {
host: process.env.DB_HOST,
user: process.env.DB_USER,
database: process.env.DB_NAME,
port: process.env.DB_PORT
});
const pool = new Pool({
host: process.env.DB_HOST,
user: process.env.DB_USER,
password: process.env.DB_PASSWORD,
database: process.env.DB_NAME,
port: process.env.DB_PORT,
});
async function promptUser() {
const questions = [
{
type: 'input',
name: 'username',
message: 'Enter username:',
validate: (input) => {
if (input.length < 3) {
return 'Username must be at least 3 characters long';
}
return true;
}
},
{
type: 'password',
name: 'password',
message: 'Enter password:',
mask: '*',
validate: (input) => {
if (input.length < 8) {
return 'Password must be at least 8 characters long';
}
return true;
}
},
{
type: 'password',
name: 'confirmPassword',
message: 'Confirm password:',
mask: '*',
validate: (input, answers) => {
if (input !== answers.password) {
return 'Passwords do not match';
}
return true;
}
}
];
return inquirer.prompt(questions);
}
async function addUser() {
try {
// Get user input
const answers = await promptUser();
const { username, password } = answers;
// Hash password
const saltRounds = 10;
const hashedPassword = await bcrypt.hash(password, saltRounds);
// Check if user already exists
const checkResult = await pool.query(
'SELECT id FROM users WHERE username = $1',
[username]
);
if (checkResult.rows.length > 0) {
console.error('Error: Username already exists');
process.exit(1);
}
// Insert new user
const result = await pool.query(
'INSERT INTO users (username, password) VALUES ($1, $2) RETURNING id',
[username, hashedPassword]
);
console.log(`User ${username} created successfully with id ${result.rows[0].id}`);
} catch (error) {
console.error('Error creating user:', error);
console.error('Error details:', error.message);
if (error.code) {
console.error('Error code:', error.code);
}
} finally {
await pool.end();
}
}
addUser();

View File

@@ -1,41 +0,0 @@
const bcrypt = require('bcrypt');
const mysql = require('mysql2/promise');
const readline = require('readline').createInterface({
input: process.stdin,
output: process.stdout,
});
require('dotenv').config({ path: '../.env' });
const dbConfig = {
host: process.env.DB_HOST,
user: process.env.DB_USER,
password: process.env.DB_PASSWORD,
database: process.env.DB_NAME,
};
async function addUser() {
const username = await askQuestion('Enter username: ');
const password = await askQuestion('Enter password: ');
const hashedPassword = await bcrypt.hash(password, 10);
const connection = await mysql.createConnection(dbConfig);
try {
await connection.query('INSERT INTO users (username, password) VALUES (?, ?)', [username, hashedPassword]);
console.log(`User ${username} added successfully.`);
} catch (error) {
console.error('Error adding user:', error);
} finally {
connection.end();
readline.close();
}
}
function askQuestion(query) {
return new Promise(resolve => readline.question(query, ans => {
resolve(ans);
}));
}
addUser();

File diff suppressed because it is too large Load Diff

View File

@@ -1,21 +1,19 @@
{ {
"name": "auth-server", "name": "inventory-auth-server",
"version": "1.0.0", "version": "1.0.0",
"description": "Authentication server for inventory management", "description": "Authentication server for inventory management system",
"main": "server.js", "main": "server.js",
"scripts": { "scripts": {
"start": "node server.js", "start": "node server.js"
"dev": "nodemon server.js",
"add_user": "node add_user.js"
}, },
"dependencies": { "dependencies": {
"bcrypt": "^5.1.1", "bcrypt": "^5.1.1",
"cors": "^2.8.5", "cors": "^2.8.5",
"dotenv": "^16.4.5", "dotenv": "^16.4.7",
"express": "^4.18.2", "express": "^4.18.2",
"jsonwebtoken": "^9.0.2" "inquirer": "^8.2.6",
}, "jsonwebtoken": "^9.0.2",
"devDependencies": { "morgan": "^1.10.0",
"nodemon": "^3.1.0" "pg": "^8.11.3"
} }
} }

View File

@@ -1,6 +1,6 @@
CREATE TABLE `users` ( CREATE TABLE users (
`id` INT AUTO_INCREMENT PRIMARY KEY, id SERIAL PRIMARY KEY,
`username` VARCHAR(255) NOT NULL UNIQUE, username VARCHAR(255) NOT NULL UNIQUE,
`password` VARCHAR(255) NOT NULL, password VARCHAR(255) NOT NULL,
`created_at` TIMESTAMP DEFAULT CURRENT_TIMESTAMP created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
); );

View File

@@ -1,135 +1,102 @@
require('dotenv').config({ path: '../.env' });
const express = require('express'); const express = require('express');
const cors = require('cors');
const bcrypt = require('bcrypt'); const bcrypt = require('bcrypt');
const jwt = require('jsonwebtoken'); const jwt = require('jsonwebtoken');
const cors = require('cors'); const { Pool } = require('pg');
const mysql = require('mysql2/promise'); const morgan = require('morgan');
require('dotenv').config({ path: '../.env' });
// Log startup configuration
console.log('Starting auth server with config:', {
host: process.env.DB_HOST,
user: process.env.DB_USER,
database: process.env.DB_NAME,
port: process.env.DB_PORT,
auth_port: process.env.AUTH_PORT
});
const app = express(); const app = express();
const PORT = process.env.AUTH_PORT || 3011; const port = process.env.AUTH_PORT || 3011;
// Database configuration // Database configuration
const dbConfig = { const pool = new Pool({
host: process.env.DB_HOST, host: process.env.DB_HOST,
user: process.env.DB_USER, user: process.env.DB_USER,
password: process.env.DB_PASSWORD, password: process.env.DB_PASSWORD,
database: process.env.DB_NAME, database: process.env.DB_NAME,
}; port: process.env.DB_PORT,
});
// Create a connection pool // Middleware
const pool = mysql.createPool(dbConfig);
app.use(cors({
origin: [
'https://inventory.kent.pw',
'http://localhost:5173',
'http://127.0.0.1:5173',
/^http:\/\/192\.168\.\d+\.\d+(:\d+)?$/,
/^http:\/\/10\.\d+\.\d+\.\d+(:\d+)?$/
],
methods: ['GET', 'POST', 'OPTIONS'],
allowedHeaders: ['Content-Type', 'Authorization', 'X-Requested-With'],
credentials: true,
exposedHeaders: ['set-cookie']
}));
app.use(express.json()); app.use(express.json());
app.use(morgan('combined'));
// Debug middleware to log request details app.use(cors({
app.use((req, res, next) => { origin: ['http://localhost:5173', 'https://inventory.kent.pw'],
console.log('Request details:', { credentials: true
method: req.method, }));
url: req.url,
origin: req.get('Origin'),
headers: req.headers,
body: req.body,
});
next();
});
// Registration endpoint
app.post('/register', async (req, res) => {
try {
const { username, password } = req.body;
const hashedPassword = await bcrypt.hash(password, 10);
const connection = await pool.getConnection();
await connection.query('INSERT INTO users (username, password) VALUES (?, ?)', [username, hashedPassword]);
connection.release();
res.status(201).json({ message: 'User registered successfully' });
} catch (error) {
console.error('Registration error:', error);
res.status(500).json({ error: 'Registration failed' });
}
});
// Login endpoint // Login endpoint
app.post('/login', async (req, res) => { app.post('/login', async (req, res) => {
const { username, password } = req.body;
try { try {
const { username, password } = req.body; // Get user from database
console.log(`Login attempt for user: ${username}`); const result = await pool.query(
'SELECT id, username, password FROM users WHERE username = $1',
const connection = await pool.getConnection(); [username]
const [rows] = await connection.query(
'SELECT * FROM users WHERE username = ?',
[username],
); );
connection.release();
if (rows.length === 1) { const user = result.rows[0];
const user = rows[0];
const passwordMatch = await bcrypt.compare(password, user.password);
if (passwordMatch) { // Check if user exists and password is correct
console.log(`User ${username} authenticated successfully`); if (!user || !(await bcrypt.compare(password, user.password))) {
const token = jwt.sign( return res.status(401).json({ error: 'Invalid username or password' });
{ username: user.username },
process.env.JWT_SECRET,
{ expiresIn: '1h' },
);
res.json({ token });
} else {
console.error(`Invalid password for user: ${username}`);
res.status(401).json({ error: 'Invalid credentials' });
}
} else {
console.error(`User not found: ${username}`);
res.status(401).json({ error: 'Invalid credentials' });
} }
// Generate JWT token
const token = jwt.sign(
{ userId: user.id, username: user.username },
process.env.JWT_SECRET,
{ expiresIn: '24h' }
);
res.json({ token });
} catch (error) { } catch (error) {
console.error('Login error:', error); console.error('Login error:', error);
res.status(500).json({ error: 'Login failed' }); res.status(500).json({ error: 'Internal server error' });
} }
}); });
// Protected endpoint example // Protected route to verify token
app.get('/protected', async (req, res) => { app.get('/protected', async (req, res) => {
const authHeader = req.headers.authorization; const authHeader = req.headers.authorization;
if (!authHeader) { if (!authHeader) {
return res.status(401).json({ error: 'Unauthorized' }); return res.status(401).json({ error: 'No token provided' });
} }
const token = authHeader.split(' ')[1];
try { try {
const token = authHeader.split(' ')[1];
const decoded = jwt.verify(token, process.env.JWT_SECRET); const decoded = jwt.verify(token, process.env.JWT_SECRET);
res.json({ userId: decoded.userId, username: decoded.username });
// Optionally, you can fetch the user from the database here
// to verify that the user still exists or to get more user information
const connection = await pool.getConnection();
const [rows] = await connection.query('SELECT * FROM users WHERE username = ?', [decoded.username]);
connection.release();
if (rows.length === 0) {
return res.status(401).json({ error: 'User not found' });
}
res.json({ message: 'Protected resource accessed', user: decoded });
} catch (error) { } catch (error) {
console.error('Protected endpoint error:', error); console.error('Token verification error:', error);
res.status(403).json({ error: 'Invalid token' }); res.status(401).json({ error: 'Invalid token' });
} }
}); });
app.listen(PORT, "0.0.0.0", () => { // Health check endpoint
console.log(`Auth server running on port ${PORT}`); app.get('/health', (req, res) => {
}); res.json({ status: 'healthy' });
});
// Error handling middleware
app.use((err, req, res, next) => {
console.error(err.stack);
res.status(500).json({ error: 'Something broke!' });
});
// Start server
app.listen(port, () => {
console.log(`Auth server running on port ${port}`);
});

View File

@@ -1,150 +1,154 @@
-- Configuration tables schema -- Configuration tables schema
-- Stock threshold configurations -- Stock threshold configurations
CREATE TABLE IF NOT EXISTS stock_thresholds ( CREATE TABLE stock_thresholds (
id INT NOT NULL, id INTEGER NOT NULL,
category_id BIGINT, -- NULL means default/global threshold category_id BIGINT, -- NULL means default/global threshold
vendor VARCHAR(100), -- NULL means applies to all vendors vendor VARCHAR(100), -- NULL means applies to all vendors
critical_days INT NOT NULL DEFAULT 7, critical_days INTEGER NOT NULL DEFAULT 7,
reorder_days INT NOT NULL DEFAULT 14, reorder_days INTEGER NOT NULL DEFAULT 14,
overstock_days INT NOT NULL DEFAULT 90, overstock_days INTEGER NOT NULL DEFAULT 90,
low_stock_threshold INT NOT NULL DEFAULT 5, low_stock_threshold INTEGER NOT NULL DEFAULT 5,
min_reorder_quantity INT NOT NULL DEFAULT 1, min_reorder_quantity INTEGER NOT NULL DEFAULT 1,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (id), PRIMARY KEY (id),
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE, FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
UNIQUE KEY unique_category_vendor (category_id, vendor), UNIQUE (category_id, vendor)
INDEX idx_st_metrics (category_id, vendor)
); );
CREATE INDEX idx_st_metrics ON stock_thresholds(category_id, vendor);
-- Lead time threshold configurations -- Lead time threshold configurations
CREATE TABLE IF NOT EXISTS lead_time_thresholds ( CREATE TABLE lead_time_thresholds (
id INT NOT NULL, id INTEGER NOT NULL,
category_id BIGINT, -- NULL means default/global threshold category_id BIGINT, -- NULL means default/global threshold
vendor VARCHAR(100), -- NULL means applies to all vendors vendor VARCHAR(100), -- NULL means applies to all vendors
target_days INT NOT NULL DEFAULT 14, target_days INTEGER NOT NULL DEFAULT 14,
warning_days INT NOT NULL DEFAULT 21, warning_days INTEGER NOT NULL DEFAULT 21,
critical_days INT NOT NULL DEFAULT 30, critical_days INTEGER NOT NULL DEFAULT 30,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (id), PRIMARY KEY (id),
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE, FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
UNIQUE KEY unique_category_vendor (category_id, vendor) UNIQUE (category_id, vendor)
); );
-- Sales velocity window configurations -- Sales velocity window configurations
CREATE TABLE IF NOT EXISTS sales_velocity_config ( CREATE TABLE sales_velocity_config (
id INT NOT NULL, id INTEGER NOT NULL,
category_id BIGINT, -- NULL means default/global threshold category_id BIGINT, -- NULL means default/global threshold
vendor VARCHAR(100), -- NULL means applies to all vendors vendor VARCHAR(100), -- NULL means applies to all vendors
daily_window_days INT NOT NULL DEFAULT 30, daily_window_days INTEGER NOT NULL DEFAULT 30,
weekly_window_days INT NOT NULL DEFAULT 7, weekly_window_days INTEGER NOT NULL DEFAULT 7,
monthly_window_days INT NOT NULL DEFAULT 90, monthly_window_days INTEGER NOT NULL DEFAULT 90,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (id), PRIMARY KEY (id),
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE, FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
UNIQUE KEY unique_category_vendor (category_id, vendor), UNIQUE (category_id, vendor)
INDEX idx_sv_metrics (category_id, vendor)
); );
CREATE INDEX idx_sv_metrics ON sales_velocity_config(category_id, vendor);
-- ABC Classification configurations -- ABC Classification configurations
CREATE TABLE IF NOT EXISTS abc_classification_config ( CREATE TABLE abc_classification_config (
id INT NOT NULL PRIMARY KEY, id INTEGER NOT NULL PRIMARY KEY,
a_threshold DECIMAL(5,2) NOT NULL DEFAULT 20.0, a_threshold DECIMAL(5,2) NOT NULL DEFAULT 20.0,
b_threshold DECIMAL(5,2) NOT NULL DEFAULT 50.0, b_threshold DECIMAL(5,2) NOT NULL DEFAULT 50.0,
classification_period_days INT NOT NULL DEFAULT 90, classification_period_days INTEGER NOT NULL DEFAULT 90,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
); );
-- Safety stock configurations -- Safety stock configurations
CREATE TABLE IF NOT EXISTS safety_stock_config ( CREATE TABLE safety_stock_config (
id INT NOT NULL, id INTEGER NOT NULL,
category_id BIGINT, -- NULL means default/global threshold category_id BIGINT, -- NULL means default/global threshold
vendor VARCHAR(100), -- NULL means applies to all vendors vendor VARCHAR(100), -- NULL means applies to all vendors
coverage_days INT NOT NULL DEFAULT 14, coverage_days INTEGER NOT NULL DEFAULT 14,
service_level DECIMAL(5,2) NOT NULL DEFAULT 95.0, service_level DECIMAL(5,2) NOT NULL DEFAULT 95.0,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (id), PRIMARY KEY (id),
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE, FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
UNIQUE KEY unique_category_vendor (category_id, vendor), UNIQUE (category_id, vendor)
INDEX idx_ss_metrics (category_id, vendor)
); );
CREATE INDEX idx_ss_metrics ON safety_stock_config(category_id, vendor);
-- Turnover rate configurations -- Turnover rate configurations
CREATE TABLE IF NOT EXISTS turnover_config ( CREATE TABLE turnover_config (
id INT NOT NULL, id INTEGER NOT NULL,
category_id BIGINT, -- NULL means default/global threshold category_id BIGINT, -- NULL means default/global threshold
vendor VARCHAR(100), -- NULL means applies to all vendors vendor VARCHAR(100), -- NULL means applies to all vendors
calculation_period_days INT NOT NULL DEFAULT 30, calculation_period_days INTEGER NOT NULL DEFAULT 30,
target_rate DECIMAL(10,2) NOT NULL DEFAULT 1.0, target_rate DECIMAL(10,2) NOT NULL DEFAULT 1.0,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (id), PRIMARY KEY (id),
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE, FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
UNIQUE KEY unique_category_vendor (category_id, vendor) UNIQUE (category_id, vendor)
); );
-- Create table for sales seasonality factors -- Create table for sales seasonality factors
CREATE TABLE IF NOT EXISTS sales_seasonality ( CREATE TABLE sales_seasonality (
month INT NOT NULL, month INTEGER NOT NULL,
seasonality_factor DECIMAL(5,3) DEFAULT 0, seasonality_factor DECIMAL(5,3) DEFAULT 0,
last_updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, last_updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (month), PRIMARY KEY (month),
CHECK (month BETWEEN 1 AND 12), CONSTRAINT month_range CHECK (month BETWEEN 1 AND 12),
CHECK (seasonality_factor BETWEEN -1.0 AND 1.0) CONSTRAINT seasonality_range CHECK (seasonality_factor BETWEEN -1.0 AND 1.0)
); );
-- Insert default global thresholds if not exists -- Insert default global thresholds
INSERT INTO stock_thresholds (id, category_id, vendor, critical_days, reorder_days, overstock_days) INSERT INTO stock_thresholds (id, category_id, vendor, critical_days, reorder_days, overstock_days)
VALUES (1, NULL, NULL, 7, 14, 90) VALUES (1, NULL, NULL, 7, 14, 90)
ON DUPLICATE KEY UPDATE ON CONFLICT (id) DO UPDATE SET
critical_days = VALUES(critical_days), critical_days = EXCLUDED.critical_days,
reorder_days = VALUES(reorder_days), reorder_days = EXCLUDED.reorder_days,
overstock_days = VALUES(overstock_days); overstock_days = EXCLUDED.overstock_days;
INSERT INTO lead_time_thresholds (id, category_id, vendor, target_days, warning_days, critical_days) INSERT INTO lead_time_thresholds (id, category_id, vendor, target_days, warning_days, critical_days)
VALUES (1, NULL, NULL, 14, 21, 30) VALUES (1, NULL, NULL, 14, 21, 30)
ON DUPLICATE KEY UPDATE ON CONFLICT (id) DO UPDATE SET
target_days = VALUES(target_days), target_days = EXCLUDED.target_days,
warning_days = VALUES(warning_days), warning_days = EXCLUDED.warning_days,
critical_days = VALUES(critical_days); critical_days = EXCLUDED.critical_days;
INSERT INTO sales_velocity_config (id, category_id, vendor, daily_window_days, weekly_window_days, monthly_window_days) INSERT INTO sales_velocity_config (id, category_id, vendor, daily_window_days, weekly_window_days, monthly_window_days)
VALUES (1, NULL, NULL, 30, 7, 90) VALUES (1, NULL, NULL, 30, 7, 90)
ON DUPLICATE KEY UPDATE ON CONFLICT (id) DO UPDATE SET
daily_window_days = VALUES(daily_window_days), daily_window_days = EXCLUDED.daily_window_days,
weekly_window_days = VALUES(weekly_window_days), weekly_window_days = EXCLUDED.weekly_window_days,
monthly_window_days = VALUES(monthly_window_days); monthly_window_days = EXCLUDED.monthly_window_days;
INSERT INTO abc_classification_config (id, a_threshold, b_threshold, classification_period_days) INSERT INTO abc_classification_config (id, a_threshold, b_threshold, classification_period_days)
VALUES (1, 20.0, 50.0, 90) VALUES (1, 20.0, 50.0, 90)
ON DUPLICATE KEY UPDATE ON CONFLICT (id) DO UPDATE SET
a_threshold = VALUES(a_threshold), a_threshold = EXCLUDED.a_threshold,
b_threshold = VALUES(b_threshold), b_threshold = EXCLUDED.b_threshold,
classification_period_days = VALUES(classification_period_days); classification_period_days = EXCLUDED.classification_period_days;
INSERT INTO safety_stock_config (id, category_id, vendor, coverage_days, service_level) INSERT INTO safety_stock_config (id, category_id, vendor, coverage_days, service_level)
VALUES (1, NULL, NULL, 14, 95.0) VALUES (1, NULL, NULL, 14, 95.0)
ON DUPLICATE KEY UPDATE ON CONFLICT (id) DO UPDATE SET
coverage_days = VALUES(coverage_days), coverage_days = EXCLUDED.coverage_days,
service_level = VALUES(service_level); service_level = EXCLUDED.service_level;
INSERT INTO turnover_config (id, category_id, vendor, calculation_period_days, target_rate) INSERT INTO turnover_config (id, category_id, vendor, calculation_period_days, target_rate)
VALUES (1, NULL, NULL, 30, 1.0) VALUES (1, NULL, NULL, 30, 1.0)
ON DUPLICATE KEY UPDATE ON CONFLICT (id) DO UPDATE SET
calculation_period_days = VALUES(calculation_period_days), calculation_period_days = EXCLUDED.calculation_period_days,
target_rate = VALUES(target_rate); target_rate = EXCLUDED.target_rate;
-- Insert default seasonality factors (neutral) -- Insert default seasonality factors (neutral)
INSERT INTO sales_seasonality (month, seasonality_factor) INSERT INTO sales_seasonality (month, seasonality_factor)
VALUES VALUES
(1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0), (1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0),
(7, 0), (8, 0), (9, 0), (10, 0), (11, 0), (12, 0) (7, 0), (8, 0), (9, 0), (10, 0), (11, 0), (12, 0)
ON DUPLICATE KEY UPDATE last_updated = CURRENT_TIMESTAMP; ON CONFLICT (month) DO UPDATE SET
last_updated = CURRENT_TIMESTAMP;
-- View to show thresholds with category names -- View to show thresholds with category names
CREATE OR REPLACE VIEW stock_thresholds_view AS CREATE OR REPLACE VIEW stock_thresholds_view AS
@@ -153,9 +157,9 @@ SELECT
c.name as category_name, c.name as category_name,
CASE CASE
WHEN st.category_id IS NULL AND st.vendor IS NULL THEN 'Global Default' WHEN st.category_id IS NULL AND st.vendor IS NULL THEN 'Global Default'
WHEN st.category_id IS NULL THEN CONCAT('Vendor: ', st.vendor) WHEN st.category_id IS NULL THEN 'Vendor: ' || st.vendor
WHEN st.vendor IS NULL THEN CONCAT('Category: ', c.name) WHEN st.vendor IS NULL THEN 'Category: ' || c.name
ELSE CONCAT('Category: ', c.name, ' / Vendor: ', st.vendor) ELSE 'Category: ' || c.name || ' / Vendor: ' || st.vendor
END as threshold_scope END as threshold_scope
FROM FROM
stock_thresholds st stock_thresholds st
@@ -171,59 +175,51 @@ ORDER BY
c.name, c.name,
st.vendor; st.vendor;
-- History and status tables
CREATE TABLE IF NOT EXISTS calculate_history ( CREATE TABLE IF NOT EXISTS calculate_history (
id BIGINT AUTO_INCREMENT PRIMARY KEY, id BIGSERIAL PRIMARY KEY,
start_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, start_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
end_time TIMESTAMP NULL, end_time TIMESTAMP NULL,
duration_seconds INT, duration_seconds INTEGER,
duration_minutes DECIMAL(10,2) GENERATED ALWAYS AS (duration_seconds / 60.0) STORED, duration_minutes DECIMAL(10,2) GENERATED ALWAYS AS (duration_seconds::decimal / 60.0) STORED,
total_products INT DEFAULT 0, total_products INTEGER DEFAULT 0,
total_orders INT DEFAULT 0, total_orders INTEGER DEFAULT 0,
total_purchase_orders INT DEFAULT 0, total_purchase_orders INTEGER DEFAULT 0,
processed_products INT DEFAULT 0, processed_products INTEGER DEFAULT 0,
processed_orders INT DEFAULT 0, processed_orders INTEGER DEFAULT 0,
processed_purchase_orders INT DEFAULT 0, processed_purchase_orders INTEGER DEFAULT 0,
status ENUM('running', 'completed', 'failed', 'cancelled') DEFAULT 'running', status calculation_status DEFAULT 'running',
error_message TEXT, error_message TEXT,
additional_info JSON, additional_info JSONB
INDEX idx_status_time (status, start_time)
); );
CREATE TABLE IF NOT EXISTS calculate_status ( CREATE TABLE IF NOT EXISTS calculate_status (
module_name ENUM( module_name module_name PRIMARY KEY,
'product_metrics', last_calculation_timestamp TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
'time_aggregates',
'financial_metrics',
'vendor_metrics',
'category_metrics',
'brand_metrics',
'sales_forecasts',
'abc_classification'
) PRIMARY KEY,
last_calculation_timestamp TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
INDEX idx_last_calc (last_calculation_timestamp)
); );
CREATE TABLE IF NOT EXISTS sync_status ( CREATE TABLE IF NOT EXISTS sync_status (
table_name VARCHAR(50) PRIMARY KEY, table_name VARCHAR(50) PRIMARY KEY,
last_sync_timestamp TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, last_sync_timestamp TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
last_sync_id BIGINT, last_sync_id BIGINT
INDEX idx_last_sync (last_sync_timestamp)
); );
CREATE TABLE IF NOT EXISTS import_history ( CREATE TABLE IF NOT EXISTS import_history (
id BIGINT AUTO_INCREMENT PRIMARY KEY, id BIGSERIAL PRIMARY KEY,
table_name VARCHAR(50) NOT NULL, table_name VARCHAR(50) NOT NULL,
start_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, start_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
end_time TIMESTAMP NULL, end_time TIMESTAMP NULL,
duration_seconds INT, duration_seconds INTEGER,
duration_minutes DECIMAL(10,2) GENERATED ALWAYS AS (duration_seconds / 60.0) STORED, duration_minutes DECIMAL(10,2) GENERATED ALWAYS AS (duration_seconds::decimal / 60.0) STORED,
records_added INT DEFAULT 0, records_added INTEGER DEFAULT 0,
records_updated INT DEFAULT 0, records_updated INTEGER DEFAULT 0,
is_incremental BOOLEAN DEFAULT FALSE, is_incremental BOOLEAN DEFAULT FALSE,
status ENUM('running', 'completed', 'failed', 'cancelled') DEFAULT 'running', status calculation_status DEFAULT 'running',
error_message TEXT, error_message TEXT,
additional_info JSON, additional_info JSONB
INDEX idx_table_time (table_name, start_time), );
INDEX idx_status (status)
); -- Create all indexes after tables are fully created
CREATE INDEX IF NOT EXISTS idx_last_calc ON calculate_status(last_calculation_timestamp);
CREATE INDEX IF NOT EXISTS idx_last_sync ON sync_status(last_sync_timestamp);
CREATE INDEX IF NOT EXISTS idx_table_time ON import_history(table_name, start_time);

View File

@@ -1,8 +1,8 @@
-- Disable foreign key checks -- Disable foreign key checks
SET FOREIGN_KEY_CHECKS = 0; SET session_replication_role = 'replica';
-- Temporary tables for batch metrics processing -- Temporary tables for batch metrics processing
CREATE TABLE IF NOT EXISTS temp_sales_metrics ( CREATE TABLE temp_sales_metrics (
pid BIGINT NOT NULL, pid BIGINT NOT NULL,
daily_sales_avg DECIMAL(10,3), daily_sales_avg DECIMAL(10,3),
weekly_sales_avg DECIMAL(10,3), weekly_sales_avg DECIMAL(10,3),
@@ -14,9 +14,9 @@ CREATE TABLE IF NOT EXISTS temp_sales_metrics (
PRIMARY KEY (pid) PRIMARY KEY (pid)
); );
CREATE TABLE IF NOT EXISTS temp_purchase_metrics ( CREATE TABLE temp_purchase_metrics (
pid BIGINT NOT NULL, pid BIGINT NOT NULL,
avg_lead_time_days INT, avg_lead_time_days INTEGER,
last_purchase_date DATE, last_purchase_date DATE,
first_received_date DATE, first_received_date DATE,
last_received_date DATE, last_received_date DATE,
@@ -24,7 +24,7 @@ CREATE TABLE IF NOT EXISTS temp_purchase_metrics (
); );
-- New table for product metrics -- New table for product metrics
CREATE TABLE IF NOT EXISTS product_metrics ( CREATE TABLE product_metrics (
pid BIGINT NOT NULL, pid BIGINT NOT NULL,
last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
-- Sales velocity metrics -- Sales velocity metrics
@@ -32,16 +32,16 @@ CREATE TABLE IF NOT EXISTS product_metrics (
weekly_sales_avg DECIMAL(10,3), weekly_sales_avg DECIMAL(10,3),
monthly_sales_avg DECIMAL(10,3), monthly_sales_avg DECIMAL(10,3),
avg_quantity_per_order DECIMAL(10,3), avg_quantity_per_order DECIMAL(10,3),
number_of_orders INT, number_of_orders INTEGER,
first_sale_date DATE, first_sale_date DATE,
last_sale_date DATE, last_sale_date DATE,
-- Stock metrics -- Stock metrics
days_of_inventory INT, days_of_inventory INTEGER,
weeks_of_inventory INT, weeks_of_inventory INTEGER,
reorder_point INT, reorder_point INTEGER,
safety_stock INT, safety_stock INTEGER,
reorder_qty INT DEFAULT 0, reorder_qty INTEGER DEFAULT 0,
overstocked_amt INT DEFAULT 0, overstocked_amt INTEGER DEFAULT 0,
-- Financial metrics -- Financial metrics
avg_margin_percent DECIMAL(10,3), avg_margin_percent DECIMAL(10,3),
total_revenue DECIMAL(10,3), total_revenue DECIMAL(10,3),
@@ -50,7 +50,7 @@ CREATE TABLE IF NOT EXISTS product_metrics (
gross_profit DECIMAL(10,3), gross_profit DECIMAL(10,3),
gmroi DECIMAL(10,3), gmroi DECIMAL(10,3),
-- Purchase metrics -- Purchase metrics
avg_lead_time_days INT, avg_lead_time_days INTEGER,
last_purchase_date DATE, last_purchase_date DATE,
first_received_date DATE, first_received_date DATE,
last_received_date DATE, last_received_date DATE,
@@ -60,48 +60,50 @@ CREATE TABLE IF NOT EXISTS product_metrics (
-- Turnover metrics -- Turnover metrics
turnover_rate DECIMAL(12,3), turnover_rate DECIMAL(12,3),
-- Lead time metrics -- Lead time metrics
current_lead_time INT, current_lead_time INTEGER,
target_lead_time INT, target_lead_time INTEGER,
lead_time_status VARCHAR(20), lead_time_status VARCHAR(20),
-- Forecast metrics -- Forecast metrics
forecast_accuracy DECIMAL(5,2) DEFAULT NULL, forecast_accuracy DECIMAL(5,2) DEFAULT NULL,
forecast_bias DECIMAL(5,2) DEFAULT NULL, forecast_bias DECIMAL(5,2) DEFAULT NULL,
last_forecast_date DATE DEFAULT NULL, last_forecast_date DATE DEFAULT NULL,
PRIMARY KEY (pid), PRIMARY KEY (pid),
FOREIGN KEY (pid) REFERENCES products(pid) ON DELETE CASCADE, FOREIGN KEY (pid) REFERENCES products(pid) ON DELETE CASCADE
INDEX idx_metrics_revenue (total_revenue),
INDEX idx_metrics_stock_status (stock_status),
INDEX idx_metrics_lead_time (lead_time_status),
INDEX idx_metrics_turnover (turnover_rate),
INDEX idx_metrics_last_calculated (last_calculated_at),
INDEX idx_metrics_abc (abc_class),
INDEX idx_metrics_sales (daily_sales_avg, weekly_sales_avg, monthly_sales_avg),
INDEX idx_metrics_forecast (forecast_accuracy, forecast_bias)
); );
CREATE INDEX idx_metrics_revenue ON product_metrics(total_revenue);
CREATE INDEX idx_metrics_stock_status ON product_metrics(stock_status);
CREATE INDEX idx_metrics_lead_time ON product_metrics(lead_time_status);
CREATE INDEX idx_metrics_turnover ON product_metrics(turnover_rate);
CREATE INDEX idx_metrics_last_calculated ON product_metrics(last_calculated_at);
CREATE INDEX idx_metrics_abc ON product_metrics(abc_class);
CREATE INDEX idx_metrics_sales ON product_metrics(daily_sales_avg, weekly_sales_avg, monthly_sales_avg);
CREATE INDEX idx_metrics_forecast ON product_metrics(forecast_accuracy, forecast_bias);
-- New table for time-based aggregates -- New table for time-based aggregates
CREATE TABLE IF NOT EXISTS product_time_aggregates ( CREATE TABLE product_time_aggregates (
pid BIGINT NOT NULL, pid BIGINT NOT NULL,
year INT NOT NULL, year INTEGER NOT NULL,
month INT NOT NULL, month INTEGER NOT NULL,
-- Sales metrics -- Sales metrics
total_quantity_sold INT DEFAULT 0, total_quantity_sold INTEGER DEFAULT 0,
total_revenue DECIMAL(10,3) DEFAULT 0, total_revenue DECIMAL(10,3) DEFAULT 0,
total_cost DECIMAL(10,3) DEFAULT 0, total_cost DECIMAL(10,3) DEFAULT 0,
order_count INT DEFAULT 0, order_count INTEGER DEFAULT 0,
-- Stock changes -- Stock changes
stock_received INT DEFAULT 0, stock_received INTEGER DEFAULT 0,
stock_ordered INT DEFAULT 0, stock_ordered INTEGER DEFAULT 0,
-- Calculated fields -- Calculated fields
avg_price DECIMAL(10,3), avg_price DECIMAL(10,3),
profit_margin DECIMAL(10,3), profit_margin DECIMAL(10,3),
inventory_value DECIMAL(10,3), inventory_value DECIMAL(10,3),
gmroi DECIMAL(10,3), gmroi DECIMAL(10,3),
PRIMARY KEY (pid, year, month), PRIMARY KEY (pid, year, month),
FOREIGN KEY (pid) REFERENCES products(pid) ON DELETE CASCADE, FOREIGN KEY (pid) REFERENCES products(pid) ON DELETE CASCADE
INDEX idx_date (year, month)
); );
CREATE INDEX idx_date ON product_time_aggregates(year, month);
-- Create vendor_details table -- Create vendor_details table
CREATE TABLE vendor_details ( CREATE TABLE vendor_details (
vendor VARCHAR(100) PRIMARY KEY, vendor VARCHAR(100) PRIMARY KEY,
@@ -110,45 +112,47 @@ CREATE TABLE vendor_details (
phone VARCHAR(50), phone VARCHAR(50),
status VARCHAR(20) DEFAULT 'active', status VARCHAR(20) DEFAULT 'active',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
INDEX idx_status (status) );
) ENGINE=InnoDB;
CREATE INDEX idx_vendor_details_status ON vendor_details(status);
-- New table for vendor metrics -- New table for vendor metrics
CREATE TABLE IF NOT EXISTS vendor_metrics ( CREATE TABLE vendor_metrics (
vendor VARCHAR(100) NOT NULL, vendor VARCHAR(100) NOT NULL,
last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
-- Performance metrics -- Performance metrics
avg_lead_time_days DECIMAL(10,3), avg_lead_time_days DECIMAL(10,3),
on_time_delivery_rate DECIMAL(5,2), on_time_delivery_rate DECIMAL(5,2),
order_fill_rate DECIMAL(5,2), order_fill_rate DECIMAL(5,2),
total_orders INT DEFAULT 0, total_orders INTEGER DEFAULT 0,
total_late_orders INT DEFAULT 0, total_late_orders INTEGER DEFAULT 0,
total_purchase_value DECIMAL(10,3) DEFAULT 0, total_purchase_value DECIMAL(10,3) DEFAULT 0,
avg_order_value DECIMAL(10,3), avg_order_value DECIMAL(10,3),
-- Product metrics -- Product metrics
active_products INT DEFAULT 0, active_products INTEGER DEFAULT 0,
total_products INT DEFAULT 0, total_products INTEGER DEFAULT 0,
-- Financial metrics -- Financial metrics
total_revenue DECIMAL(10,3) DEFAULT 0, total_revenue DECIMAL(10,3) DEFAULT 0,
avg_margin_percent DECIMAL(5,2), avg_margin_percent DECIMAL(5,2),
-- Status -- Status
status VARCHAR(20) DEFAULT 'active', status VARCHAR(20) DEFAULT 'active',
PRIMARY KEY (vendor), PRIMARY KEY (vendor),
FOREIGN KEY (vendor) REFERENCES vendor_details(vendor) ON DELETE CASCADE, FOREIGN KEY (vendor) REFERENCES vendor_details(vendor) ON DELETE CASCADE
INDEX idx_vendor_performance (on_time_delivery_rate),
INDEX idx_vendor_status (status),
INDEX idx_metrics_last_calculated (last_calculated_at),
INDEX idx_vendor_metrics_orders (total_orders, total_late_orders)
); );
CREATE INDEX idx_vendor_performance ON vendor_metrics(on_time_delivery_rate);
CREATE INDEX idx_vendor_status ON vendor_metrics(status);
CREATE INDEX idx_vendor_metrics_last_calculated ON vendor_metrics(last_calculated_at);
CREATE INDEX idx_vendor_metrics_orders ON vendor_metrics(total_orders, total_late_orders);
-- New table for category metrics -- New table for category metrics
CREATE TABLE IF NOT EXISTS category_metrics ( CREATE TABLE category_metrics (
category_id BIGINT NOT NULL, category_id BIGINT NOT NULL,
last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
-- Product metrics -- Product metrics
product_count INT DEFAULT 0, product_count INTEGER DEFAULT 0,
active_products INT DEFAULT 0, active_products INTEGER DEFAULT 0,
-- Financial metrics -- Financial metrics
total_value DECIMAL(15,3) DEFAULT 0, total_value DECIMAL(15,3) DEFAULT 0,
avg_margin DECIMAL(5,2), avg_margin DECIMAL(5,2),
@@ -157,255 +161,215 @@ CREATE TABLE IF NOT EXISTS category_metrics (
-- Status -- Status
status VARCHAR(20) DEFAULT 'active', status VARCHAR(20) DEFAULT 'active',
PRIMARY KEY (category_id), PRIMARY KEY (category_id),
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE, FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE
INDEX idx_category_status (status),
INDEX idx_category_growth (growth_rate),
INDEX idx_metrics_last_calculated (last_calculated_at),
INDEX idx_category_metrics_products (product_count, active_products)
); );
CREATE INDEX idx_category_status ON category_metrics(status);
CREATE INDEX idx_category_growth ON category_metrics(growth_rate);
CREATE INDEX idx_metrics_last_calculated_cat ON category_metrics(last_calculated_at);
CREATE INDEX idx_category_metrics_products ON category_metrics(product_count, active_products);
-- New table for vendor time-based metrics -- New table for vendor time-based metrics
CREATE TABLE IF NOT EXISTS vendor_time_metrics ( CREATE TABLE vendor_time_metrics (
vendor VARCHAR(100) NOT NULL, vendor VARCHAR(100) NOT NULL,
year INT NOT NULL, year INTEGER NOT NULL,
month INT NOT NULL, month INTEGER NOT NULL,
-- Order metrics -- Order metrics
total_orders INT DEFAULT 0, total_orders INTEGER DEFAULT 0,
late_orders INT DEFAULT 0, late_orders INTEGER DEFAULT 0,
avg_lead_time_days DECIMAL(10,3), avg_lead_time_days DECIMAL(10,3),
-- Financial metrics -- Financial metrics
total_purchase_value DECIMAL(10,3) DEFAULT 0, total_purchase_value DECIMAL(10,3) DEFAULT 0,
total_revenue DECIMAL(10,3) DEFAULT 0, total_revenue DECIMAL(10,3) DEFAULT 0,
avg_margin_percent DECIMAL(5,2), avg_margin_percent DECIMAL(5,2),
PRIMARY KEY (vendor, year, month), PRIMARY KEY (vendor, year, month),
FOREIGN KEY (vendor) REFERENCES vendor_details(vendor) ON DELETE CASCADE, FOREIGN KEY (vendor) REFERENCES vendor_details(vendor) ON DELETE CASCADE
INDEX idx_vendor_date (year, month)
); );
CREATE INDEX idx_vendor_date ON vendor_time_metrics(year, month);
-- New table for category time-based metrics -- New table for category time-based metrics
CREATE TABLE IF NOT EXISTS category_time_metrics ( CREATE TABLE category_time_metrics (
category_id BIGINT NOT NULL, category_id BIGINT NOT NULL,
year INT NOT NULL, year INTEGER NOT NULL,
month INT NOT NULL, month INTEGER NOT NULL,
-- Product metrics -- Product metrics
product_count INT DEFAULT 0, product_count INTEGER DEFAULT 0,
active_products INT DEFAULT 0, active_products INTEGER DEFAULT 0,
-- Financial metrics -- Financial metrics
total_value DECIMAL(15,3) DEFAULT 0, total_value DECIMAL(15,3) DEFAULT 0,
total_revenue DECIMAL(15,3) DEFAULT 0, total_revenue DECIMAL(15,3) DEFAULT 0,
avg_margin DECIMAL(5,2), avg_margin DECIMAL(5,2),
turnover_rate DECIMAL(12,3), turnover_rate DECIMAL(12,3),
PRIMARY KEY (category_id, year, month), PRIMARY KEY (category_id, year, month),
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE, FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE
INDEX idx_category_date (year, month)
); );
CREATE INDEX idx_category_date ON category_time_metrics(year, month);
-- New table for category-based sales metrics -- New table for category-based sales metrics
CREATE TABLE IF NOT EXISTS category_sales_metrics ( CREATE TABLE category_sales_metrics (
category_id BIGINT NOT NULL, category_id BIGINT NOT NULL,
brand VARCHAR(100) NOT NULL, brand VARCHAR(100) NOT NULL,
period_start DATE NOT NULL, period_start DATE NOT NULL,
period_end DATE NOT NULL, period_end DATE NOT NULL,
avg_daily_sales DECIMAL(10,3) DEFAULT 0, avg_daily_sales DECIMAL(10,3) DEFAULT 0,
total_sold INT DEFAULT 0, total_sold INTEGER DEFAULT 0,
num_products INT DEFAULT 0, num_products INTEGER DEFAULT 0,
avg_price DECIMAL(10,3) DEFAULT 0, avg_price DECIMAL(10,3) DEFAULT 0,
last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (category_id, brand, period_start, period_end), PRIMARY KEY (category_id, brand, period_start, period_end),
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE, FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE
INDEX idx_category_brand (category_id, brand),
INDEX idx_period (period_start, period_end)
); );
CREATE INDEX idx_category_brand ON category_sales_metrics(category_id, brand);
CREATE INDEX idx_period ON category_sales_metrics(period_start, period_end);
-- New table for brand metrics -- New table for brand metrics
CREATE TABLE IF NOT EXISTS brand_metrics ( CREATE TABLE brand_metrics (
brand VARCHAR(100) NOT NULL, brand VARCHAR(100) NOT NULL,
last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
-- Product metrics -- Product metrics
product_count INT DEFAULT 0, product_count INTEGER DEFAULT 0,
active_products INT DEFAULT 0, active_products INTEGER DEFAULT 0,
-- Stock metrics -- Stock metrics
total_stock_units INT DEFAULT 0, total_stock_units INTEGER DEFAULT 0,
total_stock_cost DECIMAL(15,2) DEFAULT 0, total_stock_cost DECIMAL(15,2) DEFAULT 0,
total_stock_retail DECIMAL(15,2) DEFAULT 0, total_stock_retail DECIMAL(15,2) DEFAULT 0,
-- Sales metrics -- Sales metrics
total_revenue DECIMAL(15,2) DEFAULT 0, total_revenue DECIMAL(15,2) DEFAULT 0,
avg_margin DECIMAL(5,2) DEFAULT 0, avg_margin DECIMAL(5,2) DEFAULT 0,
growth_rate DECIMAL(5,2) DEFAULT 0, growth_rate DECIMAL(5,2) DEFAULT 0,
PRIMARY KEY (brand), PRIMARY KEY (brand)
INDEX idx_brand_metrics_last_calculated (last_calculated_at),
INDEX idx_brand_metrics_revenue (total_revenue),
INDEX idx_brand_metrics_growth (growth_rate)
); );
CREATE INDEX idx_brand_metrics_last_calculated ON brand_metrics(last_calculated_at);
CREATE INDEX idx_brand_metrics_revenue ON brand_metrics(total_revenue);
CREATE INDEX idx_brand_metrics_growth ON brand_metrics(growth_rate);
-- New table for brand time-based metrics -- New table for brand time-based metrics
CREATE TABLE IF NOT EXISTS brand_time_metrics ( CREATE TABLE brand_time_metrics (
brand VARCHAR(100) NOT NULL, brand VARCHAR(100) NOT NULL,
year INT NOT NULL, year INTEGER NOT NULL,
month INT NOT NULL, month INTEGER NOT NULL,
-- Product metrics -- Product metrics
product_count INT DEFAULT 0, product_count INTEGER DEFAULT 0,
active_products INT DEFAULT 0, active_products INTEGER DEFAULT 0,
-- Stock metrics -- Stock metrics
total_stock_units INT DEFAULT 0, total_stock_units INTEGER DEFAULT 0,
total_stock_cost DECIMAL(15,2) DEFAULT 0, total_stock_cost DECIMAL(15,2) DEFAULT 0,
total_stock_retail DECIMAL(15,2) DEFAULT 0, total_stock_retail DECIMAL(15,2) DEFAULT 0,
-- Sales metrics -- Sales metrics
total_revenue DECIMAL(15,2) DEFAULT 0, total_revenue DECIMAL(15,2) DEFAULT 0,
avg_margin DECIMAL(5,2) DEFAULT 0, avg_margin DECIMAL(5,2) DEFAULT 0,
PRIMARY KEY (brand, year, month), growth_rate DECIMAL(5,2) DEFAULT 0,
INDEX idx_brand_date (year, month) PRIMARY KEY (brand, year, month)
); );
CREATE INDEX idx_brand_time_date ON brand_time_metrics(year, month);
-- New table for sales forecasts -- New table for sales forecasts
CREATE TABLE IF NOT EXISTS sales_forecasts ( CREATE TABLE sales_forecasts (
pid BIGINT NOT NULL, pid BIGINT NOT NULL,
forecast_date DATE NOT NULL, forecast_date DATE NOT NULL,
forecast_units DECIMAL(10,2) DEFAULT 0, forecast_quantity INTEGER,
forecast_revenue DECIMAL(10,2) DEFAULT 0, confidence_level DECIMAL(5,2),
confidence_level DECIMAL(5,2) DEFAULT 0, created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (pid, forecast_date), PRIMARY KEY (pid, forecast_date),
FOREIGN KEY (pid) REFERENCES products(pid) ON DELETE CASCADE, FOREIGN KEY (pid) REFERENCES products(pid) ON DELETE CASCADE
INDEX idx_forecast_date (forecast_date),
INDEX idx_forecast_last_calculated (last_calculated_at)
); );
CREATE INDEX idx_forecast_date ON sales_forecasts(forecast_date);
-- New table for category forecasts -- New table for category forecasts
CREATE TABLE IF NOT EXISTS category_forecasts ( CREATE TABLE category_forecasts (
category_id BIGINT NOT NULL, category_id BIGINT NOT NULL,
forecast_date DATE NOT NULL, forecast_date DATE NOT NULL,
forecast_units DECIMAL(10,2) DEFAULT 0, forecast_revenue DECIMAL(15,2),
forecast_revenue DECIMAL(10,2) DEFAULT 0, forecast_units INTEGER,
confidence_level DECIMAL(5,2) DEFAULT 0, confidence_level DECIMAL(5,2),
last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (category_id, forecast_date), PRIMARY KEY (category_id, forecast_date),
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE, FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE
INDEX idx_category_forecast_date (forecast_date),
INDEX idx_category_forecast_last_calculated (last_calculated_at)
); );
-- Create view for inventory health CREATE INDEX idx_cat_forecast_date ON category_forecasts(forecast_date);
-- Create views for common calculations
CREATE OR REPLACE VIEW inventory_health AS CREATE OR REPLACE VIEW inventory_health AS
WITH product_thresholds AS ( WITH stock_levels AS (
SELECT SELECT
p.pid, p.pid,
COALESCE( p.title,
-- Try category+vendor specific p.SKU,
(SELECT critical_days FROM stock_thresholds st p.stock_quantity,
JOIN product_categories pc ON st.category_id = pc.cat_id p.preorder_count,
WHERE pc.pid = p.pid pm.daily_sales_avg,
AND st.vendor = p.vendor LIMIT 1), pm.weekly_sales_avg,
-- Try category specific pm.monthly_sales_avg,
(SELECT critical_days FROM stock_thresholds st pm.reorder_point,
JOIN product_categories pc ON st.category_id = pc.cat_id pm.safety_stock,
WHERE pc.pid = p.pid pm.days_of_inventory,
AND st.vendor IS NULL LIMIT 1), pm.weeks_of_inventory,
-- Try vendor specific pm.stock_status,
(SELECT critical_days FROM stock_thresholds st pm.abc_class,
WHERE st.category_id IS NULL pm.turnover_rate,
AND st.vendor = p.vendor LIMIT 1), pm.avg_lead_time_days,
-- Fall back to default pm.current_lead_time,
(SELECT critical_days FROM stock_thresholds st pm.target_lead_time,
WHERE st.category_id IS NULL pm.lead_time_status,
AND st.vendor IS NULL LIMIT 1), p.cost_price,
7 p.price,
) as critical_days, pm.inventory_value,
COALESCE( pm.gmroi
-- Try category+vendor specific
(SELECT reorder_days FROM stock_thresholds st
JOIN product_categories pc ON st.category_id = pc.cat_id
WHERE pc.pid = p.pid
AND st.vendor = p.vendor LIMIT 1),
-- Try category specific
(SELECT reorder_days FROM stock_thresholds st
JOIN product_categories pc ON st.category_id = pc.cat_id
WHERE pc.pid = p.pid
AND st.vendor IS NULL LIMIT 1),
-- Try vendor specific
(SELECT reorder_days FROM stock_thresholds st
WHERE st.category_id IS NULL
AND st.vendor = p.vendor LIMIT 1),
-- Fall back to default
(SELECT reorder_days FROM stock_thresholds st
WHERE st.category_id IS NULL
AND st.vendor IS NULL LIMIT 1),
14
) as reorder_days,
COALESCE(
-- Try category+vendor specific
(SELECT overstock_days FROM stock_thresholds st
JOIN product_categories pc ON st.category_id = pc.cat_id
WHERE pc.pid = p.pid
AND st.vendor = p.vendor LIMIT 1),
-- Try category specific
(SELECT overstock_days FROM stock_thresholds st
JOIN product_categories pc ON st.category_id = pc.cat_id
WHERE pc.pid = p.pid
AND st.vendor IS NULL LIMIT 1),
-- Try vendor specific
(SELECT overstock_days FROM stock_thresholds st
WHERE st.category_id IS NULL
AND st.vendor = p.vendor LIMIT 1),
-- Fall back to default
(SELECT overstock_days FROM stock_thresholds st
WHERE st.category_id IS NULL
AND st.vendor IS NULL LIMIT 1),
90
) as overstock_days
FROM products p FROM products p
LEFT JOIN product_metrics pm ON p.pid = pm.pid
WHERE p.managing_stock = true AND p.visible = true
) )
SELECT SELECT
p.pid, *,
p.SKU,
p.title,
p.stock_quantity,
COALESCE(pm.daily_sales_avg, 0) as daily_sales_avg,
COALESCE(pm.days_of_inventory, 0) as days_of_inventory,
COALESCE(pm.reorder_point, 0) as reorder_point,
COALESCE(pm.safety_stock, 0) as safety_stock,
CASE CASE
WHEN pm.daily_sales_avg = 0 THEN 'New' WHEN stock_quantity <= safety_stock THEN 'Critical'
WHEN p.stock_quantity <= CEIL(pm.daily_sales_avg * pt.critical_days) THEN 'Critical' WHEN stock_quantity <= reorder_point THEN 'Low'
WHEN p.stock_quantity <= CEIL(pm.daily_sales_avg * pt.reorder_days) THEN 'Reorder' WHEN stock_quantity > (reorder_point * 3) THEN 'Excess'
WHEN p.stock_quantity > (pm.daily_sales_avg * pt.overstock_days) THEN 'Overstocked'
ELSE 'Healthy' ELSE 'Healthy'
END as stock_status END as inventory_status,
FROM CASE
products p WHEN lead_time_status = 'delayed' AND stock_status = 'low' THEN 'High'
LEFT JOIN WHEN lead_time_status = 'delayed' OR stock_status = 'low' THEN 'Medium'
product_metrics pm ON p.pid = pm.pid ELSE 'Low'
LEFT JOIN END as risk_level
product_thresholds pt ON p.pid = pt.pid FROM stock_levels;
WHERE
p.managing_stock = true;
-- Create view for category performance trends -- Create view for category performance trends
CREATE OR REPLACE VIEW category_performance_trends AS CREATE OR REPLACE VIEW category_performance_trends AS
WITH monthly_trends AS (
SELECT
c.cat_id,
c.name as category_name,
ctm.year,
ctm.month,
ctm.product_count,
ctm.active_products,
ctm.total_value,
ctm.total_revenue,
ctm.avg_margin,
ctm.turnover_rate,
LAG(ctm.total_revenue) OVER (PARTITION BY c.cat_id ORDER BY ctm.year, ctm.month) as prev_month_revenue,
LAG(ctm.turnover_rate) OVER (PARTITION BY c.cat_id ORDER BY ctm.year, ctm.month) as prev_month_turnover
FROM categories c
JOIN category_time_metrics ctm ON c.cat_id = ctm.category_id
)
SELECT SELECT
c.cat_id as category_id, *,
c.name,
c.description,
p.name as parent_name,
c.status,
cm.product_count,
cm.active_products,
cm.total_value,
cm.avg_margin,
cm.turnover_rate,
cm.growth_rate,
CASE CASE
WHEN cm.growth_rate >= 20 THEN 'High Growth' WHEN prev_month_revenue IS NULL THEN 0
WHEN cm.growth_rate >= 5 THEN 'Growing' ELSE ((total_revenue - prev_month_revenue) / prev_month_revenue) * 100
WHEN cm.growth_rate >= -5 THEN 'Stable' END as revenue_growth_percent,
ELSE 'Declining' CASE
END as performance_rating WHEN prev_month_turnover IS NULL THEN 0
FROM ELSE ((turnover_rate - prev_month_turnover) / prev_month_turnover) * 100
categories c END as turnover_growth_percent
LEFT JOIN FROM monthly_trends;
categories p ON c.parent_id = p.cat_id
LEFT JOIN
category_metrics cm ON c.cat_id = cm.category_id;
-- Re-enable foreign key checks SET session_replication_role = 'origin';
SET FOREIGN_KEY_CHECKS = 1;

View File

@@ -1,6 +1,5 @@
-- Enable strict error reporting -- Enable strict error reporting
SET sql_mode = 'STRICT_ALL_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_ZERO_DATE,NO_ZERO_IN_DATE,NO_ENGINE_SUBSTITUTION'; SET session_replication_role = 'replica'; -- Disable foreign key checks temporarily
SET FOREIGN_KEY_CHECKS = 0;
-- Create tables -- Create tables
CREATE TABLE products ( CREATE TABLE products (
@@ -8,11 +7,11 @@ CREATE TABLE products (
title VARCHAR(255) NOT NULL, title VARCHAR(255) NOT NULL,
description TEXT, description TEXT,
SKU VARCHAR(50) NOT NULL, SKU VARCHAR(50) NOT NULL,
created_at TIMESTAMP NULL, created_at TIMESTAMP,
first_received TIMESTAMP NULL, first_received TIMESTAMP,
stock_quantity INT DEFAULT 0, stock_quantity INTEGER DEFAULT 0,
preorder_count INT DEFAULT 0, preorder_count INTEGER DEFAULT 0,
notions_inv_count INT DEFAULT 0, notions_inv_count INTEGER DEFAULT 0,
price DECIMAL(10, 3) NOT NULL, price DECIMAL(10, 3) NOT NULL,
regular_price DECIMAL(10, 3) NOT NULL, regular_price DECIMAL(10, 3) NOT NULL,
cost_price DECIMAL(10, 3), cost_price DECIMAL(10, 3),
@@ -37,47 +36,52 @@ CREATE TABLE products (
artist VARCHAR(100), artist VARCHAR(100),
options TEXT, options TEXT,
tags TEXT, tags TEXT,
moq INT DEFAULT 1, moq INTEGER DEFAULT 1,
uom INT DEFAULT 1, uom INTEGER DEFAULT 1,
rating DECIMAL(10,2) DEFAULT 0.00, rating DECIMAL(10,2) DEFAULT 0.00,
reviews INT UNSIGNED DEFAULT 0, reviews INTEGER DEFAULT 0,
weight DECIMAL(10,3), weight DECIMAL(10,3),
length DECIMAL(10,3), length DECIMAL(10,3),
width DECIMAL(10,3), width DECIMAL(10,3),
height DECIMAL(10,3), height DECIMAL(10,3),
country_of_origin VARCHAR(5), country_of_origin VARCHAR(5),
location VARCHAR(50), location VARCHAR(50),
total_sold INT UNSIGNED DEFAULT 0, total_sold INTEGER DEFAULT 0,
baskets INT UNSIGNED DEFAULT 0, baskets INTEGER DEFAULT 0,
notifies INT UNSIGNED DEFAULT 0, notifies INTEGER DEFAULT 0,
date_last_sold DATE, date_last_sold DATE,
updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (pid), PRIMARY KEY (pid)
INDEX idx_sku (SKU), );
INDEX idx_vendor (vendor),
INDEX idx_brand (brand), -- Create indexes for products table
INDEX idx_location (location), CREATE INDEX idx_products_sku ON products(SKU);
INDEX idx_total_sold (total_sold), CREATE INDEX idx_products_vendor ON products(vendor);
INDEX idx_date_last_sold (date_last_sold), CREATE INDEX idx_products_brand ON products(brand);
INDEX idx_updated (updated) CREATE INDEX idx_products_location ON products(location);
) ENGINE=InnoDB; CREATE INDEX idx_products_total_sold ON products(total_sold);
CREATE INDEX idx_products_date_last_sold ON products(date_last_sold);
CREATE INDEX idx_products_updated ON products(updated);
-- Create categories table with hierarchy support -- Create categories table with hierarchy support
CREATE TABLE categories ( CREATE TABLE categories (
cat_id BIGINT PRIMARY KEY, cat_id BIGINT PRIMARY KEY,
name VARCHAR(100) NOT NULL, name VARCHAR(100) NOT NULL,
type SMALLINT NOT NULL COMMENT '10=section, 11=category, 12=subcategory, 13=subsubcategory, 1=company, 2=line, 3=subline, 40=artist', type SMALLINT NOT NULL,
parent_id BIGINT, parent_id BIGINT,
description TEXT, description TEXT,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
status VARCHAR(20) DEFAULT 'active', status VARCHAR(20) DEFAULT 'active',
FOREIGN KEY (parent_id) REFERENCES categories(cat_id), FOREIGN KEY (parent_id) REFERENCES categories(cat_id)
INDEX idx_parent (parent_id), );
INDEX idx_type (type),
INDEX idx_status (status), COMMENT ON COLUMN categories.type IS '10=section, 11=category, 12=subcategory, 13=subsubcategory, 1=company, 2=line, 3=subline, 40=artist';
INDEX idx_name_type (name, type)
) ENGINE=InnoDB; CREATE INDEX idx_categories_parent ON categories(parent_id);
CREATE INDEX idx_categories_type ON categories(type);
CREATE INDEX idx_categories_status ON categories(status);
CREATE INDEX idx_categories_name_type ON categories(name, type);
-- Create product_categories junction table -- Create product_categories junction table
CREATE TABLE product_categories ( CREATE TABLE product_categories (
@@ -85,78 +89,86 @@ CREATE TABLE product_categories (
pid BIGINT NOT NULL, pid BIGINT NOT NULL,
PRIMARY KEY (pid, cat_id), PRIMARY KEY (pid, cat_id),
FOREIGN KEY (pid) REFERENCES products(pid) ON DELETE CASCADE, FOREIGN KEY (pid) REFERENCES products(pid) ON DELETE CASCADE,
FOREIGN KEY (cat_id) REFERENCES categories(cat_id) ON DELETE CASCADE, FOREIGN KEY (cat_id) REFERENCES categories(cat_id) ON DELETE CASCADE
INDEX idx_category (cat_id), );
INDEX idx_product (pid)
) ENGINE=InnoDB; CREATE INDEX idx_product_categories_category ON product_categories(cat_id);
CREATE INDEX idx_product_categories_product ON product_categories(pid);
-- Create orders table with its indexes -- Create orders table with its indexes
CREATE TABLE IF NOT EXISTS orders ( CREATE TABLE orders (
id BIGINT NOT NULL AUTO_INCREMENT, id BIGSERIAL PRIMARY KEY,
order_number VARCHAR(50) NOT NULL, order_number VARCHAR(50) NOT NULL,
pid BIGINT NOT NULL, pid BIGINT NOT NULL,
SKU VARCHAR(50) NOT NULL, SKU VARCHAR(50) NOT NULL,
date DATE NOT NULL, date DATE NOT NULL,
price DECIMAL(10,3) NOT NULL, price DECIMAL(10,3) NOT NULL,
quantity INT NOT NULL, quantity INTEGER NOT NULL,
discount DECIMAL(10,3) DEFAULT 0.000, discount DECIMAL(10,3) DEFAULT 0.000,
tax DECIMAL(10,3) DEFAULT 0.000, tax DECIMAL(10,3) DEFAULT 0.000,
tax_included TINYINT(1) DEFAULT 0, tax_included BOOLEAN DEFAULT false,
shipping DECIMAL(10,3) DEFAULT 0.000, shipping DECIMAL(10,3) DEFAULT 0.000,
costeach DECIMAL(10,3) DEFAULT 0.000, costeach DECIMAL(10,3) DEFAULT 0.000,
customer VARCHAR(50) NOT NULL, customer VARCHAR(50) NOT NULL,
customer_name VARCHAR(100), customer_name VARCHAR(100),
status VARCHAR(20) DEFAULT 'pending', status VARCHAR(20) DEFAULT 'pending',
canceled TINYINT(1) DEFAULT 0, canceled BOOLEAN DEFAULT false,
updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (id), UNIQUE (order_number, pid)
UNIQUE KEY unique_order_line (order_number, pid), );
KEY order_number (order_number),
KEY pid (pid), CREATE INDEX idx_orders_number ON orders(order_number);
KEY customer (customer), CREATE INDEX idx_orders_pid ON orders(pid);
KEY date (date), CREATE INDEX idx_orders_customer ON orders(customer);
KEY status (status), CREATE INDEX idx_orders_date ON orders(date);
INDEX idx_orders_metrics (pid, date, canceled), CREATE INDEX idx_orders_status ON orders(status);
INDEX idx_updated (updated) CREATE INDEX idx_orders_metrics ON orders(pid, date, canceled);
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; CREATE INDEX idx_orders_updated ON orders(updated);
-- Create purchase_orders table with its indexes -- Create purchase_orders table with its indexes
CREATE TABLE purchase_orders ( CREATE TABLE purchase_orders (
id BIGINT AUTO_INCREMENT PRIMARY KEY, id BIGSERIAL PRIMARY KEY,
po_id VARCHAR(50) NOT NULL, po_id VARCHAR(50) NOT NULL,
vendor VARCHAR(100) NOT NULL, vendor VARCHAR(100) NOT NULL,
date DATE NOT NULL, date DATE NOT NULL,
expected_date DATE, expected_date DATE,
pid BIGINT NOT NULL, pid BIGINT NOT NULL,
sku VARCHAR(50) NOT NULL, sku VARCHAR(50) NOT NULL,
name VARCHAR(100) NOT NULL COMMENT 'Product name from products.description', name VARCHAR(100) NOT NULL,
cost_price DECIMAL(10, 3) NOT NULL, cost_price DECIMAL(10, 3) NOT NULL,
po_cost_price DECIMAL(10, 3) NOT NULL COMMENT 'Original cost from PO, before receiving adjustments', po_cost_price DECIMAL(10, 3) NOT NULL,
status TINYINT UNSIGNED DEFAULT 1 COMMENT '0=canceled,1=created,10=electronically_ready_send,11=ordered,12=preordered,13=electronically_sent,15=receiving_started,50=done', status SMALLINT DEFAULT 1,
receiving_status TINYINT UNSIGNED DEFAULT 1 COMMENT '0=canceled,1=created,30=partial_received,40=full_received,50=paid', receiving_status SMALLINT DEFAULT 1,
notes TEXT, notes TEXT,
long_note TEXT, long_note TEXT,
ordered INT NOT NULL, ordered INTEGER NOT NULL,
received INT DEFAULT 0, received INTEGER DEFAULT 0,
received_date DATE COMMENT 'Date of first receiving', received_date DATE,
last_received_date DATE COMMENT 'Date of most recent receiving', last_received_date DATE,
received_by VARCHAR(100) COMMENT 'Name of person who first received this PO line', received_by VARCHAR(100),
receiving_history JSON COMMENT 'Array of receiving records with qty, date, cost, receiving_id, and alt_po flag', receiving_history JSONB,
updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (pid) REFERENCES products(pid), FOREIGN KEY (pid) REFERENCES products(pid),
INDEX idx_po_id (po_id), UNIQUE (po_id, pid)
INDEX idx_vendor (vendor), );
INDEX idx_status (status),
INDEX idx_receiving_status (receiving_status),
INDEX idx_purchase_orders_metrics (pid, date, status, ordered, received),
INDEX idx_po_metrics (pid, date, receiving_status, received_date),
INDEX idx_po_product_date (pid, date),
INDEX idx_po_product_status (pid, status),
INDEX idx_updated (updated),
UNIQUE KEY unique_po_product (po_id, pid)
) ENGINE=InnoDB;
SET FOREIGN_KEY_CHECKS = 1; COMMENT ON COLUMN purchase_orders.name IS 'Product name from products.description';
COMMENT ON COLUMN purchase_orders.po_cost_price IS 'Original cost from PO, before receiving adjustments';
COMMENT ON COLUMN purchase_orders.status IS '0=canceled,1=created,10=electronically_ready_send,11=ordered,12=preordered,13=electronically_sent,15=receiving_started,50=done';
COMMENT ON COLUMN purchase_orders.receiving_status IS '0=canceled,1=created,30=partial_received,40=full_received,50=paid';
COMMENT ON COLUMN purchase_orders.receiving_history IS 'Array of receiving records with qty, date, cost, receiving_id, and alt_po flag';
CREATE INDEX idx_po_id ON purchase_orders(po_id);
CREATE INDEX idx_po_vendor ON purchase_orders(vendor);
CREATE INDEX idx_po_status ON purchase_orders(status);
CREATE INDEX idx_po_receiving_status ON purchase_orders(receiving_status);
CREATE INDEX idx_po_metrics ON purchase_orders(pid, date, status, ordered, received);
CREATE INDEX idx_po_metrics_receiving ON purchase_orders(pid, date, receiving_status, received_date);
CREATE INDEX idx_po_product_date ON purchase_orders(pid, date);
CREATE INDEX idx_po_product_status ON purchase_orders(pid, status);
CREATE INDEX idx_po_updated ON purchase_orders(updated);
SET session_replication_role = 'origin'; -- Re-enable foreign key checks
-- Create views for common calculations -- Create views for common calculations
-- product_sales_trends view moved to metrics-schema.sql -- product_sales_trends view moved to metrics-schema.sql

View File

@@ -9,12 +9,14 @@
"version": "1.0.0", "version": "1.0.0",
"license": "ISC", "license": "ISC",
"dependencies": { "dependencies": {
"bcrypt": "^5.1.1",
"cors": "^2.8.5", "cors": "^2.8.5",
"csv-parse": "^5.6.0", "csv-parse": "^5.6.0",
"dotenv": "^16.4.7", "dotenv": "^16.4.7",
"express": "^4.18.2", "express": "^4.18.2",
"multer": "^1.4.5-lts.1", "multer": "^1.4.5-lts.1",
"mysql2": "^3.12.0", "mysql2": "^3.12.0",
"pg": "^8.13.3",
"pm2": "^5.3.0", "pm2": "^5.3.0",
"ssh2": "^1.16.0", "ssh2": "^1.16.0",
"uuid": "^9.0.1" "uuid": "^9.0.1"
@@ -23,6 +25,74 @@
"nodemon": "^3.0.2" "nodemon": "^3.0.2"
} }
}, },
"node_modules/@mapbox/node-pre-gyp": {
"version": "1.0.11",
"resolved": "https://registry.npmjs.org/@mapbox/node-pre-gyp/-/node-pre-gyp-1.0.11.tgz",
"integrity": "sha512-Yhlar6v9WQgUp/He7BdgzOz8lqMQ8sU+jkCq7Wx8Myc5YFJLbEe7lgui/V7G1qB1DJykHSGwreceSaD60Y0PUQ==",
"license": "BSD-3-Clause",
"dependencies": {
"detect-libc": "^2.0.0",
"https-proxy-agent": "^5.0.0",
"make-dir": "^3.1.0",
"node-fetch": "^2.6.7",
"nopt": "^5.0.0",
"npmlog": "^5.0.1",
"rimraf": "^3.0.2",
"semver": "^7.3.5",
"tar": "^6.1.11"
},
"bin": {
"node-pre-gyp": "bin/node-pre-gyp"
}
},
"node_modules/@mapbox/node-pre-gyp/node_modules/agent-base": {
"version": "6.0.2",
"resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz",
"integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==",
"license": "MIT",
"dependencies": {
"debug": "4"
},
"engines": {
"node": ">= 6.0.0"
}
},
"node_modules/@mapbox/node-pre-gyp/node_modules/debug": {
"version": "4.4.0",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz",
"integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==",
"license": "MIT",
"dependencies": {
"ms": "^2.1.3"
},
"engines": {
"node": ">=6.0"
},
"peerDependenciesMeta": {
"supports-color": {
"optional": true
}
}
},
"node_modules/@mapbox/node-pre-gyp/node_modules/https-proxy-agent": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz",
"integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==",
"license": "MIT",
"dependencies": {
"agent-base": "6",
"debug": "4"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/@mapbox/node-pre-gyp/node_modules/ms": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
"license": "MIT"
},
"node_modules/@pm2/agent": { "node_modules/@pm2/agent": {
"version": "2.0.4", "version": "2.0.4",
"resolved": "https://registry.npmjs.org/@pm2/agent/-/agent-2.0.4.tgz", "resolved": "https://registry.npmjs.org/@pm2/agent/-/agent-2.0.4.tgz",
@@ -276,6 +346,12 @@
"integrity": "sha512-C5Mc6rdnsaJDjO3UpGW/CQTHtCKaYlScZTly4JIu97Jxo/odCiH0ITnDXSJPTOrEKk/ycSZ0AOgTmkDtkOsvIA==", "integrity": "sha512-C5Mc6rdnsaJDjO3UpGW/CQTHtCKaYlScZTly4JIu97Jxo/odCiH0ITnDXSJPTOrEKk/ycSZ0AOgTmkDtkOsvIA==",
"license": "MIT" "license": "MIT"
}, },
"node_modules/abbrev": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz",
"integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==",
"license": "ISC"
},
"node_modules/accepts": { "node_modules/accepts": {
"version": "1.3.8", "version": "1.3.8",
"resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz",
@@ -322,6 +398,15 @@
"node": ">=6" "node": ">=6"
} }
}, },
"node_modules/ansi-regex": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/ansi-styles": { "node_modules/ansi-styles": {
"version": "4.3.0", "version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
@@ -356,6 +441,40 @@
"integrity": "sha512-klpgFSWLW1ZEs8svjfb7g4qWY0YS5imI82dTg+QahUvJ8YqAY0P10Uk8tTyh9ZGuYEZEMaeJYCF5BFuX552hsw==", "integrity": "sha512-klpgFSWLW1ZEs8svjfb7g4qWY0YS5imI82dTg+QahUvJ8YqAY0P10Uk8tTyh9ZGuYEZEMaeJYCF5BFuX552hsw==",
"license": "MIT" "license": "MIT"
}, },
"node_modules/aproba": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/aproba/-/aproba-2.0.0.tgz",
"integrity": "sha512-lYe4Gx7QT+MKGbDsA+Z+he/Wtef0BiwDOlK/XkBrdfsh9J/jPPXbX0tE9x9cl27Tmu5gg3QUbUrQYa/y+KOHPQ==",
"license": "ISC"
},
"node_modules/are-we-there-yet": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-2.0.0.tgz",
"integrity": "sha512-Ci/qENmwHnsYo9xKIcUJN5LeDKdJ6R1Z1j9V/J5wyq8nh/mYPEpIKJbBZXtZjG04HiK7zV/p6Vs9952MrMeUIw==",
"deprecated": "This package is no longer supported.",
"license": "ISC",
"dependencies": {
"delegates": "^1.0.0",
"readable-stream": "^3.6.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/are-we-there-yet/node_modules/readable-stream": {
"version": "3.6.2",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz",
"integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==",
"license": "MIT",
"dependencies": {
"inherits": "^2.0.3",
"string_decoder": "^1.1.1",
"util-deprecate": "^1.0.1"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/argparse": { "node_modules/argparse": {
"version": "2.0.1", "version": "2.0.1",
"resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
@@ -414,7 +533,6 @@
"version": "1.0.2", "version": "1.0.2",
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
"integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
"dev": true,
"license": "MIT" "license": "MIT"
}, },
"node_modules/basic-ftp": { "node_modules/basic-ftp": {
@@ -426,6 +544,20 @@
"node": ">=10.0.0" "node": ">=10.0.0"
} }
}, },
"node_modules/bcrypt": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/bcrypt/-/bcrypt-5.1.1.tgz",
"integrity": "sha512-AGBHOG5hPYZ5Xl9KXzU5iKq9516yEmvCKDg3ecP5kX2aB6UqTeXZxk2ELnDgDm6BQSMlLt9rDB4LoSMx0rYwww==",
"hasInstallScript": true,
"license": "MIT",
"dependencies": {
"@mapbox/node-pre-gyp": "^1.0.11",
"node-addon-api": "^5.0.0"
},
"engines": {
"node": ">= 10.0.0"
}
},
"node_modules/bcrypt-pbkdf": { "node_modules/bcrypt-pbkdf": {
"version": "1.0.2", "version": "1.0.2",
"resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz",
@@ -493,7 +625,6 @@
"version": "1.1.11", "version": "1.1.11",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
"integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
"dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"balanced-match": "^1.0.0", "balanced-match": "^1.0.0",
@@ -619,6 +750,15 @@
"fsevents": "~2.3.2" "fsevents": "~2.3.2"
} }
}, },
"node_modules/chownr": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz",
"integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==",
"license": "ISC",
"engines": {
"node": ">=10"
}
},
"node_modules/cli-tableau": { "node_modules/cli-tableau": {
"version": "2.0.1", "version": "2.0.1",
"resolved": "https://registry.npmjs.org/cli-tableau/-/cli-tableau-2.0.1.tgz", "resolved": "https://registry.npmjs.org/cli-tableau/-/cli-tableau-2.0.1.tgz",
@@ -648,6 +788,15 @@
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"license": "MIT" "license": "MIT"
}, },
"node_modules/color-support": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/color-support/-/color-support-1.1.3.tgz",
"integrity": "sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg==",
"license": "ISC",
"bin": {
"color-support": "bin.js"
}
},
"node_modules/commander": { "node_modules/commander": {
"version": "2.15.1", "version": "2.15.1",
"resolved": "https://registry.npmjs.org/commander/-/commander-2.15.1.tgz", "resolved": "https://registry.npmjs.org/commander/-/commander-2.15.1.tgz",
@@ -658,7 +807,6 @@
"version": "0.0.1", "version": "0.0.1",
"resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
"integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==",
"dev": true,
"license": "MIT" "license": "MIT"
}, },
"node_modules/concat-stream": { "node_modules/concat-stream": {
@@ -676,6 +824,12 @@
"typedarray": "^0.0.6" "typedarray": "^0.0.6"
} }
}, },
"node_modules/console-control-strings": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz",
"integrity": "sha512-ty/fTekppD2fIwRvnZAVdeOiGd1c7YXEixbgJTNzqcxJWKQnjJ/V1bNEEE6hygpM3WjwHFUVK6HTjWSzV4a8sQ==",
"license": "ISC"
},
"node_modules/content-disposition": { "node_modules/content-disposition": {
"version": "0.5.4", "version": "0.5.4",
"resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz",
@@ -801,6 +955,12 @@
"node": ">= 14" "node": ">= 14"
} }
}, },
"node_modules/delegates": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz",
"integrity": "sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==",
"license": "MIT"
},
"node_modules/denque": { "node_modules/denque": {
"version": "2.1.0", "version": "2.1.0",
"resolved": "https://registry.npmjs.org/denque/-/denque-2.1.0.tgz", "resolved": "https://registry.npmjs.org/denque/-/denque-2.1.0.tgz",
@@ -829,6 +989,15 @@
"npm": "1.2.8000 || >= 1.4.16" "npm": "1.2.8000 || >= 1.4.16"
} }
}, },
"node_modules/detect-libc": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.3.tgz",
"integrity": "sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw==",
"license": "Apache-2.0",
"engines": {
"node": ">=8"
}
},
"node_modules/dotenv": { "node_modules/dotenv": {
"version": "16.4.7", "version": "16.4.7",
"resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.7.tgz", "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.7.tgz",
@@ -861,6 +1030,12 @@
"integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==",
"license": "MIT" "license": "MIT"
}, },
"node_modules/emoji-regex": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
"license": "MIT"
},
"node_modules/encodeurl": { "node_modules/encodeurl": {
"version": "2.0.0", "version": "2.0.0",
"resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz",
@@ -1132,6 +1307,36 @@
"node": ">= 0.6" "node": ">= 0.6"
} }
}, },
"node_modules/fs-minipass": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz",
"integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==",
"license": "ISC",
"dependencies": {
"minipass": "^3.0.0"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/fs-minipass/node_modules/minipass": {
"version": "3.3.6",
"resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz",
"integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==",
"license": "ISC",
"dependencies": {
"yallist": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/fs.realpath": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
"integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==",
"license": "ISC"
},
"node_modules/fsevents": { "node_modules/fsevents": {
"version": "2.3.3", "version": "2.3.3",
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
@@ -1155,6 +1360,27 @@
"url": "https://github.com/sponsors/ljharb" "url": "https://github.com/sponsors/ljharb"
} }
}, },
"node_modules/gauge": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/gauge/-/gauge-3.0.2.tgz",
"integrity": "sha512-+5J6MS/5XksCuXq++uFRsnUd7Ovu1XenbeuIuNRJxYWjgQbPuFhT14lAvsWfqfAmnwluf1OwMjz39HjfLPci0Q==",
"deprecated": "This package is no longer supported.",
"license": "ISC",
"dependencies": {
"aproba": "^1.0.3 || ^2.0.0",
"color-support": "^1.1.2",
"console-control-strings": "^1.0.0",
"has-unicode": "^2.0.1",
"object-assign": "^4.1.1",
"signal-exit": "^3.0.0",
"string-width": "^4.2.3",
"strip-ansi": "^6.0.1",
"wide-align": "^1.1.2"
},
"engines": {
"node": ">=10"
}
},
"node_modules/generate-function": { "node_modules/generate-function": {
"version": "2.3.1", "version": "2.3.1",
"resolved": "https://registry.npmjs.org/generate-function/-/generate-function-2.3.1.tgz", "resolved": "https://registry.npmjs.org/generate-function/-/generate-function-2.3.1.tgz",
@@ -1250,6 +1476,27 @@
"integrity": "sha512-2e/nZezdVlyCopOCYHeW0onkbZg7xP1Ad6pndPy1rCygeRykefUS6r7oA5cJRGEFvseiaz5a/qUHFVX1dd6Isg==", "integrity": "sha512-2e/nZezdVlyCopOCYHeW0onkbZg7xP1Ad6pndPy1rCygeRykefUS6r7oA5cJRGEFvseiaz5a/qUHFVX1dd6Isg==",
"license": "MIT" "license": "MIT"
}, },
"node_modules/glob": {
"version": "7.2.3",
"resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
"integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
"deprecated": "Glob versions prior to v9 are no longer supported",
"license": "ISC",
"dependencies": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^3.1.1",
"once": "^1.3.0",
"path-is-absolute": "^1.0.0"
},
"engines": {
"node": "*"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/glob-parent": { "node_modules/glob-parent": {
"version": "5.1.2", "version": "5.1.2",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
@@ -1295,6 +1542,12 @@
"url": "https://github.com/sponsors/ljharb" "url": "https://github.com/sponsors/ljharb"
} }
}, },
"node_modules/has-unicode": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz",
"integrity": "sha512-8Rf9Y83NBReMnx0gFzA8JImQACstCYWUplepDa9xprwwtmgEZUF0h/i5xSA625zB/I37EtrswSST6OXxwaaIJQ==",
"license": "ISC"
},
"node_modules/hasown": { "node_modules/hasown": {
"version": "2.0.2", "version": "2.0.2",
"resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
@@ -1414,6 +1667,17 @@
"dev": true, "dev": true,
"license": "ISC" "license": "ISC"
}, },
"node_modules/inflight": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
"integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==",
"deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.",
"license": "ISC",
"dependencies": {
"once": "^1.3.0",
"wrappy": "1"
}
},
"node_modules/inherits": { "node_modules/inherits": {
"version": "2.0.4", "version": "2.0.4",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
@@ -1490,6 +1754,15 @@
"node": ">=0.10.0" "node": ">=0.10.0"
} }
}, },
"node_modules/is-fullwidth-code-point": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
"integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/is-glob": { "node_modules/is-glob": {
"version": "4.0.3", "version": "4.0.3",
"resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
@@ -1605,6 +1878,30 @@
"url": "https://github.com/sponsors/wellwelwel" "url": "https://github.com/sponsors/wellwelwel"
} }
}, },
"node_modules/make-dir": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz",
"integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==",
"license": "MIT",
"dependencies": {
"semver": "^6.0.0"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/make-dir/node_modules/semver": {
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
"integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
"license": "ISC",
"bin": {
"semver": "bin/semver.js"
}
},
"node_modules/math-intrinsics": { "node_modules/math-intrinsics": {
"version": "1.1.0", "version": "1.1.0",
"resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
@@ -1678,7 +1975,6 @@
"version": "3.1.2", "version": "3.1.2",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
"integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
"dev": true,
"license": "ISC", "license": "ISC",
"dependencies": { "dependencies": {
"brace-expansion": "^1.1.7" "brace-expansion": "^1.1.7"
@@ -1696,6 +1992,40 @@
"url": "https://github.com/sponsors/ljharb" "url": "https://github.com/sponsors/ljharb"
} }
}, },
"node_modules/minipass": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz",
"integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==",
"license": "ISC",
"engines": {
"node": ">=8"
}
},
"node_modules/minizlib": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz",
"integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==",
"license": "MIT",
"dependencies": {
"minipass": "^3.0.0",
"yallist": "^4.0.0"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/minizlib/node_modules/minipass": {
"version": "3.3.6",
"resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz",
"integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==",
"license": "ISC",
"dependencies": {
"yallist": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/mkdirp": { "node_modules/mkdirp": {
"version": "1.0.4", "version": "1.0.4",
"resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz",
@@ -1857,6 +2187,32 @@
"node": ">= 0.4.0" "node": ">= 0.4.0"
} }
}, },
"node_modules/node-addon-api": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-5.1.0.tgz",
"integrity": "sha512-eh0GgfEkpnoWDq+VY8OyvYhFEzBk6jIYbRKdIlyTiAXIVJ8PyBaKb0rp7oDtoddbdoHWhq8wwr+XZ81F1rpNdA==",
"license": "MIT"
},
"node_modules/node-fetch": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
"integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
"license": "MIT",
"dependencies": {
"whatwg-url": "^5.0.0"
},
"engines": {
"node": "4.x || >=6.0.0"
},
"peerDependencies": {
"encoding": "^0.1.0"
},
"peerDependenciesMeta": {
"encoding": {
"optional": true
}
}
},
"node_modules/nodemon": { "node_modules/nodemon": {
"version": "3.1.9", "version": "3.1.9",
"resolved": "https://registry.npmjs.org/nodemon/-/nodemon-3.1.9.tgz", "resolved": "https://registry.npmjs.org/nodemon/-/nodemon-3.1.9.tgz",
@@ -1934,6 +2290,21 @@
"node": ">=4" "node": ">=4"
} }
}, },
"node_modules/nopt": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/nopt/-/nopt-5.0.0.tgz",
"integrity": "sha512-Tbj67rffqceeLpcRXrT7vKAN8CwfPeIBgM7E6iBkmKLV7bEMwpGgYLGv0jACUsECaa/vuxP0IjEont6umdMgtQ==",
"license": "ISC",
"dependencies": {
"abbrev": "1"
},
"bin": {
"nopt": "bin/nopt.js"
},
"engines": {
"node": ">=6"
}
},
"node_modules/normalize-path": { "node_modules/normalize-path": {
"version": "3.0.0", "version": "3.0.0",
"resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
@@ -1943,6 +2314,19 @@
"node": ">=0.10.0" "node": ">=0.10.0"
} }
}, },
"node_modules/npmlog": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/npmlog/-/npmlog-5.0.1.tgz",
"integrity": "sha512-AqZtDUWOMKs1G/8lwylVjrdYgqA4d9nu8hc+0gzRxlDb1I10+FHBGMXs6aiQHFdCUUlqH99MUMuLfzWDNDtfxw==",
"deprecated": "This package is no longer supported.",
"license": "ISC",
"dependencies": {
"are-we-there-yet": "^2.0.0",
"console-control-strings": "^1.1.0",
"gauge": "^3.0.0",
"set-blocking": "^2.0.0"
}
},
"node_modules/nssocket": { "node_modules/nssocket": {
"version": "0.6.0", "version": "0.6.0",
"resolved": "https://registry.npmjs.org/nssocket/-/nssocket-0.6.0.tgz", "resolved": "https://registry.npmjs.org/nssocket/-/nssocket-0.6.0.tgz",
@@ -1995,6 +2379,15 @@
"node": ">= 0.8" "node": ">= 0.8"
} }
}, },
"node_modules/once": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
"integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
"license": "ISC",
"dependencies": {
"wrappy": "1"
}
},
"node_modules/pac-proxy-agent": { "node_modules/pac-proxy-agent": {
"version": "7.1.0", "version": "7.1.0",
"resolved": "https://registry.npmjs.org/pac-proxy-agent/-/pac-proxy-agent-7.1.0.tgz", "resolved": "https://registry.npmjs.org/pac-proxy-agent/-/pac-proxy-agent-7.1.0.tgz",
@@ -2065,6 +2458,15 @@
"node": ">= 0.8" "node": ">= 0.8"
} }
}, },
"node_modules/path-is-absolute": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
"integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==",
"license": "MIT",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/path-parse": { "node_modules/path-parse": {
"version": "1.0.7", "version": "1.0.7",
"resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
@@ -2077,6 +2479,95 @@
"integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==", "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==",
"license": "MIT" "license": "MIT"
}, },
"node_modules/pg": {
"version": "8.13.3",
"resolved": "https://registry.npmjs.org/pg/-/pg-8.13.3.tgz",
"integrity": "sha512-P6tPt9jXbL9HVu/SSRERNYaYG++MjnscnegFh9pPHihfoBSujsrka0hyuymMzeJKFWrcG8wvCKy8rCe8e5nDUQ==",
"license": "MIT",
"dependencies": {
"pg-connection-string": "^2.7.0",
"pg-pool": "^3.7.1",
"pg-protocol": "^1.7.1",
"pg-types": "^2.1.0",
"pgpass": "1.x"
},
"engines": {
"node": ">= 8.0.0"
},
"optionalDependencies": {
"pg-cloudflare": "^1.1.1"
},
"peerDependencies": {
"pg-native": ">=3.0.1"
},
"peerDependenciesMeta": {
"pg-native": {
"optional": true
}
}
},
"node_modules/pg-cloudflare": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/pg-cloudflare/-/pg-cloudflare-1.1.1.tgz",
"integrity": "sha512-xWPagP/4B6BgFO+EKz3JONXv3YDgvkbVrGw2mTo3D6tVDQRh1e7cqVGvyR3BE+eQgAvx1XhW/iEASj4/jCWl3Q==",
"license": "MIT",
"optional": true
},
"node_modules/pg-connection-string": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/pg-connection-string/-/pg-connection-string-2.7.0.tgz",
"integrity": "sha512-PI2W9mv53rXJQEOb8xNR8lH7Hr+EKa6oJa38zsK0S/ky2er16ios1wLKhZyxzD7jUReiWokc9WK5nxSnC7W1TA==",
"license": "MIT"
},
"node_modules/pg-int8": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/pg-int8/-/pg-int8-1.0.1.tgz",
"integrity": "sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==",
"license": "ISC",
"engines": {
"node": ">=4.0.0"
}
},
"node_modules/pg-pool": {
"version": "3.7.1",
"resolved": "https://registry.npmjs.org/pg-pool/-/pg-pool-3.7.1.tgz",
"integrity": "sha512-xIOsFoh7Vdhojas6q3596mXFsR8nwBQBXX5JiV7p9buEVAGqYL4yFzclON5P9vFrpu1u7Zwl2oriyDa89n0wbw==",
"license": "MIT",
"peerDependencies": {
"pg": ">=8.0"
}
},
"node_modules/pg-protocol": {
"version": "1.7.1",
"resolved": "https://registry.npmjs.org/pg-protocol/-/pg-protocol-1.7.1.tgz",
"integrity": "sha512-gjTHWGYWsEgy9MsY0Gp6ZJxV24IjDqdpTW7Eh0x+WfJLFsm/TJx1MzL6T0D88mBvkpxotCQ6TwW6N+Kko7lhgQ==",
"license": "MIT"
},
"node_modules/pg-types": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/pg-types/-/pg-types-2.2.0.tgz",
"integrity": "sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==",
"license": "MIT",
"dependencies": {
"pg-int8": "1.0.1",
"postgres-array": "~2.0.0",
"postgres-bytea": "~1.0.0",
"postgres-date": "~1.0.4",
"postgres-interval": "^1.1.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/pgpass": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/pgpass/-/pgpass-1.0.5.tgz",
"integrity": "sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug==",
"license": "MIT",
"dependencies": {
"split2": "^4.1.0"
}
},
"node_modules/picomatch": { "node_modules/picomatch": {
"version": "2.3.1", "version": "2.3.1",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
@@ -2320,6 +2811,45 @@
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
"license": "MIT" "license": "MIT"
}, },
"node_modules/postgres-array": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/postgres-array/-/postgres-array-2.0.0.tgz",
"integrity": "sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==",
"license": "MIT",
"engines": {
"node": ">=4"
}
},
"node_modules/postgres-bytea": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/postgres-bytea/-/postgres-bytea-1.0.0.tgz",
"integrity": "sha512-xy3pmLuQqRBZBXDULy7KbaitYqLcmxigw14Q5sj8QBVLqEwXfeybIKVWiqAXTlcvdvb0+xkOtDbfQMOf4lST1w==",
"license": "MIT",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/postgres-date": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/postgres-date/-/postgres-date-1.0.7.tgz",
"integrity": "sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q==",
"license": "MIT",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/postgres-interval": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/postgres-interval/-/postgres-interval-1.2.0.tgz",
"integrity": "sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==",
"license": "MIT",
"dependencies": {
"xtend": "^4.0.0"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/process-nextick-args": { "node_modules/process-nextick-args": {
"version": "2.0.1", "version": "2.0.1",
"resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
@@ -2544,6 +3074,22 @@
"url": "https://github.com/sponsors/ljharb" "url": "https://github.com/sponsors/ljharb"
} }
}, },
"node_modules/rimraf": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
"integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
"deprecated": "Rimraf versions prior to v4 are no longer supported",
"license": "ISC",
"dependencies": {
"glob": "^7.1.3"
},
"bin": {
"rimraf": "bin.js"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/run-series": { "node_modules/run-series": {
"version": "1.1.9", "version": "1.1.9",
"resolved": "https://registry.npmjs.org/run-series/-/run-series-1.1.9.tgz", "resolved": "https://registry.npmjs.org/run-series/-/run-series-1.1.9.tgz",
@@ -2667,6 +3213,12 @@
"node": ">= 0.8.0" "node": ">= 0.8.0"
} }
}, },
"node_modules/set-blocking": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz",
"integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==",
"license": "ISC"
},
"node_modules/setprototypeof": { "node_modules/setprototypeof": {
"version": "1.2.0", "version": "1.2.0",
"resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz",
@@ -2850,6 +3402,15 @@
"source-map": "^0.6.0" "source-map": "^0.6.0"
} }
}, },
"node_modules/split2": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/split2/-/split2-4.2.0.tgz",
"integrity": "sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==",
"license": "ISC",
"engines": {
"node": ">= 10.x"
}
},
"node_modules/sprintf-js": { "node_modules/sprintf-js": {
"version": "1.1.2", "version": "1.1.2",
"resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.2.tgz", "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.2.tgz",
@@ -2914,6 +3475,32 @@
"integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==",
"license": "MIT" "license": "MIT"
}, },
"node_modules/string-width": {
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
"license": "MIT",
"dependencies": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/strip-ansi": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
"license": "MIT",
"dependencies": {
"ansi-regex": "^5.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/supports-color": { "node_modules/supports-color": {
"version": "7.2.0", "version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
@@ -2965,6 +3552,23 @@
"url": "https://www.buymeacoffee.com/systeminfo" "url": "https://www.buymeacoffee.com/systeminfo"
} }
}, },
"node_modules/tar": {
"version": "6.2.1",
"resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz",
"integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==",
"license": "ISC",
"dependencies": {
"chownr": "^2.0.0",
"fs-minipass": "^2.0.0",
"minipass": "^5.0.0",
"minizlib": "^2.1.1",
"mkdirp": "^1.0.3",
"yallist": "^4.0.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/to-regex-range": { "node_modules/to-regex-range": {
"version": "5.0.1", "version": "5.0.1",
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
@@ -2996,6 +3600,12 @@
"nodetouch": "bin/nodetouch.js" "nodetouch": "bin/nodetouch.js"
} }
}, },
"node_modules/tr46": {
"version": "0.0.3",
"resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
"integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==",
"license": "MIT"
},
"node_modules/tslib": { "node_modules/tslib": {
"version": "1.9.3", "version": "1.9.3",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-1.9.3.tgz", "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.9.3.tgz",
@@ -3132,6 +3742,37 @@
"lodash": "^4.17.14" "lodash": "^4.17.14"
} }
}, },
"node_modules/webidl-conversions": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
"integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==",
"license": "BSD-2-Clause"
},
"node_modules/whatwg-url": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
"integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
"license": "MIT",
"dependencies": {
"tr46": "~0.0.3",
"webidl-conversions": "^3.0.0"
}
},
"node_modules/wide-align": {
"version": "1.1.5",
"resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.5.tgz",
"integrity": "sha512-eDMORYaPNZ4sQIuuYPDHdQvf4gyCF9rEEV/yPxGfwPkRodwEgiMUUXTx/dex+Me0wxx53S+NgUHaP7y3MGlDmg==",
"license": "ISC",
"dependencies": {
"string-width": "^1.0.2 || 2 || 3 || 4"
}
},
"node_modules/wrappy": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
"integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
"license": "ISC"
},
"node_modules/ws": { "node_modules/ws": {
"version": "7.5.10", "version": "7.5.10",
"resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz", "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz",

View File

@@ -18,12 +18,14 @@
"author": "", "author": "",
"license": "ISC", "license": "ISC",
"dependencies": { "dependencies": {
"bcrypt": "^5.1.1",
"cors": "^2.8.5", "cors": "^2.8.5",
"csv-parse": "^5.6.0", "csv-parse": "^5.6.0",
"dotenv": "^16.4.7", "dotenv": "^16.4.7",
"express": "^4.18.2", "express": "^4.18.2",
"multer": "^1.4.5-lts.1", "multer": "^1.4.5-lts.1",
"mysql2": "^3.12.0", "mysql2": "^3.12.0",
"pg": "^8.13.3",
"pm2": "^5.3.0", "pm2": "^5.3.0",
"ssh2": "^1.16.0", "ssh2": "^1.16.0",
"uuid": "^9.0.1" "uuid": "^9.0.1"

View File

@@ -14,7 +14,15 @@ function outputProgress(data) {
function runScript(scriptPath) { function runScript(scriptPath) {
return new Promise((resolve, reject) => { return new Promise((resolve, reject) => {
const child = spawn('node', [scriptPath], { const child = spawn('node', [scriptPath], {
stdio: ['inherit', 'pipe', 'pipe'] stdio: ['inherit', 'pipe', 'pipe'],
env: {
...process.env,
PGHOST: process.env.DB_HOST,
PGUSER: process.env.DB_USER,
PGPASSWORD: process.env.DB_PASSWORD,
PGDATABASE: process.env.DB_NAME,
PGPORT: process.env.DB_PORT || '5432'
}
}); });
let output = ''; let output = '';

View File

@@ -19,7 +19,6 @@ const IMPORT_PURCHASE_ORDERS = true;
const INCREMENTAL_UPDATE = process.env.INCREMENTAL_UPDATE !== 'false'; // Default to true unless explicitly set to false const INCREMENTAL_UPDATE = process.env.INCREMENTAL_UPDATE !== 'false'; // Default to true unless explicitly set to false
// SSH configuration // SSH configuration
// In import-from-prod.js
const sshConfig = { const sshConfig = {
ssh: { ssh: {
host: process.env.PROD_SSH_HOST, host: process.env.PROD_SSH_HOST,
@@ -31,6 +30,7 @@ const sshConfig = {
compress: true, // Enable SSH compression compress: true, // Enable SSH compression
}, },
prodDbConfig: { prodDbConfig: {
// MySQL config for production
host: process.env.PROD_DB_HOST || "localhost", host: process.env.PROD_DB_HOST || "localhost",
user: process.env.PROD_DB_USER, user: process.env.PROD_DB_USER,
password: process.env.PROD_DB_PASSWORD, password: process.env.PROD_DB_PASSWORD,
@@ -39,21 +39,16 @@ const sshConfig = {
timezone: 'Z', timezone: 'Z',
}, },
localDbConfig: { localDbConfig: {
// PostgreSQL config for local
host: process.env.DB_HOST, host: process.env.DB_HOST,
user: process.env.DB_USER, user: process.env.DB_USER,
password: process.env.DB_PASSWORD, password: process.env.DB_PASSWORD,
database: process.env.DB_NAME, database: process.env.DB_NAME,
multipleStatements: true, port: process.env.DB_PORT || 5432,
waitForConnections: true, ssl: process.env.DB_SSL === 'true',
connectionLimit: 10, connectionTimeoutMillis: 60000,
queueLimit: 0, idleTimeoutMillis: 30000,
namedPlaceholders: true, max: 10 // connection pool max size
connectTimeout: 60000,
enableKeepAlive: true,
keepAliveInitialDelay: 10000,
compress: true,
timezone: 'Z',
stringifyObjects: false,
} }
}; };
@@ -108,7 +103,7 @@ async function main() {
SET SET
status = 'cancelled', status = 'cancelled',
end_time = NOW(), end_time = NOW(),
duration_seconds = TIMESTAMPDIFF(SECOND, start_time, NOW()), duration_seconds = EXTRACT(EPOCH FROM (NOW() - start_time))::INTEGER,
error_message = 'Previous import was not completed properly' error_message = 'Previous import was not completed properly'
WHERE status = 'running' WHERE status = 'running'
`); `);
@@ -118,9 +113,10 @@ async function main() {
CREATE TABLE IF NOT EXISTS sync_status ( CREATE TABLE IF NOT EXISTS sync_status (
table_name VARCHAR(50) PRIMARY KEY, table_name VARCHAR(50) PRIMARY KEY,
last_sync_timestamp TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, last_sync_timestamp TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
last_sync_id BIGINT, last_sync_id BIGINT
INDEX idx_last_sync (last_sync_timestamp)
); );
CREATE INDEX IF NOT EXISTS idx_last_sync ON sync_status (last_sync_timestamp);
`); `);
// Create import history record for the overall session // Create import history record for the overall session
@@ -134,17 +130,17 @@ async function main() {
) VALUES ( ) VALUES (
'all_tables', 'all_tables',
NOW(), NOW(),
?, $1::boolean,
'running', 'running',
JSON_OBJECT( jsonb_build_object(
'categories_enabled', ?, 'categories_enabled', $2::boolean,
'products_enabled', ?, 'products_enabled', $3::boolean,
'orders_enabled', ?, 'orders_enabled', $4::boolean,
'purchase_orders_enabled', ? 'purchase_orders_enabled', $5::boolean
) )
) ) RETURNING id
`, [INCREMENTAL_UPDATE, IMPORT_CATEGORIES, IMPORT_PRODUCTS, IMPORT_ORDERS, IMPORT_PURCHASE_ORDERS]); `, [INCREMENTAL_UPDATE, IMPORT_CATEGORIES, IMPORT_PRODUCTS, IMPORT_ORDERS, IMPORT_PURCHASE_ORDERS]);
importHistoryId = historyResult.insertId; importHistoryId = historyResult.rows[0].id;
const results = { const results = {
categories: null, categories: null,
@@ -162,8 +158,8 @@ async function main() {
if (isImportCancelled) throw new Error("Import cancelled"); if (isImportCancelled) throw new Error("Import cancelled");
completedSteps++; completedSteps++;
console.log('Categories import result:', results.categories); console.log('Categories import result:', results.categories);
totalRecordsAdded += results.categories?.recordsAdded || 0; totalRecordsAdded += parseInt(results.categories?.recordsAdded || 0);
totalRecordsUpdated += results.categories?.recordsUpdated || 0; totalRecordsUpdated += parseInt(results.categories?.recordsUpdated || 0);
} }
if (IMPORT_PRODUCTS) { if (IMPORT_PRODUCTS) {
@@ -171,8 +167,8 @@ async function main() {
if (isImportCancelled) throw new Error("Import cancelled"); if (isImportCancelled) throw new Error("Import cancelled");
completedSteps++; completedSteps++;
console.log('Products import result:', results.products); console.log('Products import result:', results.products);
totalRecordsAdded += results.products?.recordsAdded || 0; totalRecordsAdded += parseInt(results.products?.recordsAdded || 0);
totalRecordsUpdated += results.products?.recordsUpdated || 0; totalRecordsUpdated += parseInt(results.products?.recordsUpdated || 0);
} }
if (IMPORT_ORDERS) { if (IMPORT_ORDERS) {
@@ -180,8 +176,8 @@ async function main() {
if (isImportCancelled) throw new Error("Import cancelled"); if (isImportCancelled) throw new Error("Import cancelled");
completedSteps++; completedSteps++;
console.log('Orders import result:', results.orders); console.log('Orders import result:', results.orders);
totalRecordsAdded += results.orders?.recordsAdded || 0; totalRecordsAdded += parseInt(results.orders?.recordsAdded || 0);
totalRecordsUpdated += results.orders?.recordsUpdated || 0; totalRecordsUpdated += parseInt(results.orders?.recordsUpdated || 0);
} }
if (IMPORT_PURCHASE_ORDERS) { if (IMPORT_PURCHASE_ORDERS) {
@@ -189,8 +185,8 @@ async function main() {
if (isImportCancelled) throw new Error("Import cancelled"); if (isImportCancelled) throw new Error("Import cancelled");
completedSteps++; completedSteps++;
console.log('Purchase orders import result:', results.purchaseOrders); console.log('Purchase orders import result:', results.purchaseOrders);
totalRecordsAdded += results.purchaseOrders?.recordsAdded || 0; totalRecordsAdded += parseInt(results.purchaseOrders?.recordsAdded || 0);
totalRecordsUpdated += results.purchaseOrders?.recordsUpdated || 0; totalRecordsUpdated += parseInt(results.purchaseOrders?.recordsUpdated || 0);
} }
const endTime = Date.now(); const endTime = Date.now();
@@ -201,21 +197,21 @@ async function main() {
UPDATE import_history UPDATE import_history
SET SET
end_time = NOW(), end_time = NOW(),
duration_seconds = ?, duration_seconds = $1,
records_added = ?, records_added = $2,
records_updated = ?, records_updated = $3,
status = 'completed', status = 'completed',
additional_info = JSON_OBJECT( additional_info = jsonb_build_object(
'categories_enabled', ?, 'categories_enabled', $4::boolean,
'products_enabled', ?, 'products_enabled', $5::boolean,
'orders_enabled', ?, 'orders_enabled', $6::boolean,
'purchase_orders_enabled', ?, 'purchase_orders_enabled', $7::boolean,
'categories_result', CAST(? AS JSON), 'categories_result', COALESCE($8::jsonb, 'null'::jsonb),
'products_result', CAST(? AS JSON), 'products_result', COALESCE($9::jsonb, 'null'::jsonb),
'orders_result', CAST(? AS JSON), 'orders_result', COALESCE($10::jsonb, 'null'::jsonb),
'purchase_orders_result', CAST(? AS JSON) 'purchase_orders_result', COALESCE($11::jsonb, 'null'::jsonb)
) )
WHERE id = ? WHERE id = $12
`, [ `, [
totalElapsedSeconds, totalElapsedSeconds,
totalRecordsAdded, totalRecordsAdded,
@@ -259,10 +255,10 @@ async function main() {
UPDATE import_history UPDATE import_history
SET SET
end_time = NOW(), end_time = NOW(),
duration_seconds = ?, duration_seconds = $1,
status = ?, status = $2,
error_message = ? error_message = $3
WHERE id = ? WHERE id = $4
`, [totalElapsedSeconds, error.message === "Import cancelled" ? 'cancelled' : 'failed', error.message, importHistoryId]); `, [totalElapsedSeconds, error.message === "Import cancelled" ? 'cancelled' : 'failed', error.message, importHistoryId]);
} }

View File

@@ -9,170 +9,206 @@ async function importCategories(prodConnection, localConnection) {
const startTime = Date.now(); const startTime = Date.now();
const typeOrder = [10, 20, 11, 21, 12, 13]; const typeOrder = [10, 20, 11, 21, 12, 13];
let totalInserted = 0; let totalInserted = 0;
let totalUpdated = 0;
let skippedCategories = []; let skippedCategories = [];
try { try {
// Process each type in order with its own query // Start a single transaction for the entire import
await localConnection.query('BEGIN');
// Process each type in order with its own savepoint
for (const type of typeOrder) { for (const type of typeOrder) {
const [categories] = await prodConnection.query( try {
` // Create a savepoint for this type
SELECT await localConnection.query(`SAVEPOINT category_type_${type}`);
pc.cat_id,
pc.name,
pc.type,
CASE
WHEN pc.type IN (10, 20) THEN NULL -- Top level categories should have no parent
WHEN pc.master_cat_id IS NULL THEN NULL
ELSE pc.master_cat_id
END as parent_id,
pc.combined_name as description
FROM product_categories pc
WHERE pc.type = ?
ORDER BY pc.cat_id
`,
[type]
);
if (categories.length === 0) continue; // Production query remains MySQL compatible
const [categories] = await prodConnection.query(
console.log(`\nProcessing ${categories.length} type ${type} categories`); `
if (type === 10) { SELECT
console.log("Type 10 categories:", JSON.stringify(categories, null, 2)); pc.cat_id,
} pc.name,
pc.type,
// For types that can have parents (11, 21, 12, 13), verify parent existence CASE
let categoriesToInsert = categories; WHEN pc.type IN (10, 20) THEN NULL -- Top level categories should have no parent
if (![10, 20].includes(type)) { WHEN pc.master_cat_id IS NULL THEN NULL
// Get all parent IDs ELSE pc.master_cat_id
const parentIds = [ END as parent_id,
...new Set( pc.combined_name as description
categories.map((c) => c.parent_id).filter((id) => id !== null) FROM product_categories pc
), WHERE pc.type = ?
]; ORDER BY pc.cat_id
`,
// Check which parents exist [type]
const [existingParents] = await localConnection.query(
"SELECT cat_id FROM categories WHERE cat_id IN (?)",
[parentIds]
);
const existingParentIds = new Set(existingParents.map((p) => p.cat_id));
// Filter categories and track skipped ones
categoriesToInsert = categories.filter(
(cat) =>
cat.parent_id === null || existingParentIds.has(cat.parent_id)
);
const invalidCategories = categories.filter(
(cat) =>
cat.parent_id !== null && !existingParentIds.has(cat.parent_id)
); );
if (invalidCategories.length > 0) { if (categories.length === 0) {
const skippedInfo = invalidCategories.map((c) => ({ await localConnection.query(`RELEASE SAVEPOINT category_type_${type}`);
id: c.cat_id, continue;
name: c.name, }
type: c.type,
missing_parent: c.parent_id,
}));
skippedCategories.push(...skippedInfo);
console.log( console.log(`\nProcessing ${categories.length} type ${type} categories`);
"\nSkipping categories with missing parents:", if (type === 10) {
invalidCategories console.log("Type 10 categories:", JSON.stringify(categories, null, 2));
.map( }
(c) =>
`${c.cat_id} - ${c.name} (missing parent: ${c.parent_id})` // For types that can have parents (11, 21, 12, 13), verify parent existence
) let categoriesToInsert = categories;
.join("\n") if (![10, 20].includes(type)) {
); // Get all parent IDs
const parentIds = [
...new Set(
categories
.filter(c => c && c.parent_id !== null)
.map(c => c.parent_id)
),
];
console.log(`Processing ${categories.length} type ${type} categories with ${parentIds.length} unique parent IDs`);
console.log('Parent IDs:', parentIds);
// No need to check for parent existence - we trust they exist since they were just inserted
categoriesToInsert = categories;
} }
if (categoriesToInsert.length === 0) { if (categoriesToInsert.length === 0) {
console.log( console.log(
`No valid categories of type ${type} to insert - all had missing parents` `No valid categories of type ${type} to insert`
); );
await localConnection.query(`RELEASE SAVEPOINT category_type_${type}`);
continue; continue;
} }
console.log(
`Inserting ${categoriesToInsert.length} type ${type} categories`
);
// PostgreSQL upsert query with parameterized values
const values = categoriesToInsert.flatMap((cat) => [
cat.cat_id,
cat.name,
cat.type,
cat.parent_id,
cat.description,
'active',
new Date(),
new Date()
]);
console.log('Attempting to insert/update with values:', JSON.stringify(values, null, 2));
const placeholders = categoriesToInsert
.map((_, i) => `($${i * 8 + 1}, $${i * 8 + 2}, $${i * 8 + 3}, $${i * 8 + 4}, $${i * 8 + 5}, $${i * 8 + 6}, $${i * 8 + 7}, $${i * 8 + 8})`)
.join(',');
console.log('Using placeholders:', placeholders);
// Insert categories with ON CONFLICT clause for PostgreSQL
const query = `
WITH inserted_categories AS (
INSERT INTO categories (
cat_id, name, type, parent_id, description, status, created_at, updated_at
)
VALUES ${placeholders}
ON CONFLICT (cat_id) DO UPDATE SET
name = EXCLUDED.name,
type = EXCLUDED.type,
parent_id = EXCLUDED.parent_id,
description = EXCLUDED.description,
status = EXCLUDED.status,
updated_at = EXCLUDED.updated_at
RETURNING
cat_id,
CASE
WHEN xmax = 0 THEN true
ELSE false
END as is_insert
)
SELECT
COUNT(*) as total,
COUNT(*) FILTER (WHERE is_insert) as inserted,
COUNT(*) FILTER (WHERE NOT is_insert) as updated
FROM inserted_categories`;
console.log('Executing query:', query);
const result = await localConnection.query(query, values);
console.log('Query result:', result);
// Get the first result since query returns an array
const queryResult = Array.isArray(result) ? result[0] : result;
if (!queryResult || !queryResult.rows || !queryResult.rows[0]) {
console.error('Query failed to return results. Result:', queryResult);
throw new Error('Query did not return expected results');
}
const total = parseInt(queryResult.rows[0].total) || 0;
const inserted = parseInt(queryResult.rows[0].inserted) || 0;
const updated = parseInt(queryResult.rows[0].updated) || 0;
console.log(`Total: ${total}, Inserted: ${inserted}, Updated: ${updated}`);
totalInserted += inserted;
totalUpdated += updated;
// Release the savepoint for this type
await localConnection.query(`RELEASE SAVEPOINT category_type_${type}`);
outputProgress({
status: "running",
operation: "Categories import",
message: `Imported ${inserted} (updated ${updated}) categories of type ${type}`,
current: totalInserted + totalUpdated,
total: categories.length,
elapsed: formatElapsedTime((Date.now() - startTime) / 1000),
});
} catch (error) {
// Rollback to the savepoint for this type
await localConnection.query(`ROLLBACK TO SAVEPOINT category_type_${type}`);
throw error;
} }
console.log(
`Inserting ${categoriesToInsert.length} type ${type} categories`
);
const placeholders = categoriesToInsert
.map(() => "(?, ?, ?, ?, ?, ?, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP)")
.join(",");
const values = categoriesToInsert.flatMap((cat) => [
cat.cat_id,
cat.name,
cat.type,
cat.parent_id,
cat.description,
"active",
]);
// Insert categories and create relationships in one query to avoid race conditions
await localConnection.query(
`
INSERT INTO categories (cat_id, name, type, parent_id, description, status, created_at, updated_at)
VALUES ${placeholders}
ON DUPLICATE KEY UPDATE
name = VALUES(name),
type = VALUES(type),
parent_id = VALUES(parent_id),
description = VALUES(description),
status = VALUES(status),
updated_at = CURRENT_TIMESTAMP
`,
values
);
totalInserted += categoriesToInsert.length;
outputProgress({
status: "running",
operation: "Categories import",
current: totalInserted,
total: totalInserted,
elapsed: formatElapsedTime((Date.now() - startTime) / 1000),
});
} }
// After all imports, if we skipped any categories, throw an error // Commit the entire transaction - we'll do this even if we have skipped categories
if (skippedCategories.length > 0) { await localConnection.query('COMMIT');
const error = new Error(
"Categories import completed with errors - some categories were skipped due to missing parents"
);
error.skippedCategories = skippedCategories;
throw error;
}
outputProgress({ outputProgress({
status: "complete", status: "complete",
operation: "Categories import completed", operation: "Categories import completed",
current: totalInserted, current: totalInserted + totalUpdated,
total: totalInserted, total: totalInserted + totalUpdated,
duration: formatElapsedTime((Date.now() - startTime) / 1000), duration: formatElapsedTime((Date.now() - startTime) / 1000),
warnings: skippedCategories.length > 0 ? {
message: "Some categories were skipped due to missing parents",
skippedCategories
} : undefined
}); });
return { return {
status: "complete", status: "complete",
totalImported: totalInserted recordsAdded: totalInserted,
recordsUpdated: totalUpdated,
totalRecords: totalInserted + totalUpdated,
warnings: skippedCategories.length > 0 ? {
message: "Some categories were skipped due to missing parents",
skippedCategories
} : undefined
}; };
} catch (error) { } catch (error) {
console.error("Error importing categories:", error); console.error("Error importing categories:", error);
if (error.skippedCategories) {
console.error( // Only rollback if we haven't committed yet
"Skipped categories:", try {
JSON.stringify(error.skippedCategories, null, 2) await localConnection.query('ROLLBACK');
); } catch (rollbackError) {
console.error("Error during rollback:", rollbackError);
} }
outputProgress({ outputProgress({
status: "error", status: "error",
operation: "Categories import failed", operation: "Categories import failed",
error: error.message, error: error.message
skippedCategories: error.skippedCategories
}); });
throw error; throw error;

View File

@@ -2,7 +2,7 @@ const { outputProgress, formatElapsedTime, estimateRemaining, calculateRate } =
const { importMissingProducts, setupTemporaryTables, cleanupTemporaryTables, materializeCalculations } = require('./products'); const { importMissingProducts, setupTemporaryTables, cleanupTemporaryTables, materializeCalculations } = require('./products');
/** /**
* Imports orders from a production MySQL database to a local MySQL database. * Imports orders from a production MySQL database to a local PostgreSQL database.
* It can run in two modes: * It can run in two modes:
* 1. Incremental update mode (default): Only fetch orders that have changed since the last sync time. * 1. Incremental update mode (default): Only fetch orders that have changed since the last sync time.
* 2. Full update mode: Fetch all eligible orders within the last 5 years regardless of timestamp. * 2. Full update mode: Fetch all eligible orders within the last 5 years regardless of timestamp.
@@ -23,93 +23,18 @@ async function importOrders(prodConnection, localConnection, incrementalUpdate =
let importedCount = 0; let importedCount = 0;
let totalOrderItems = 0; let totalOrderItems = 0;
let totalUniqueOrders = 0; let totalUniqueOrders = 0;
// Add a cumulative counter for processed orders before the loop
let cumulativeProcessedOrders = 0; let cumulativeProcessedOrders = 0;
try { try {
// Clean up any existing temp tables first
await localConnection.query(`
DROP TEMPORARY TABLE IF EXISTS temp_order_items;
DROP TEMPORARY TABLE IF EXISTS temp_order_meta;
DROP TEMPORARY TABLE IF EXISTS temp_order_discounts;
DROP TEMPORARY TABLE IF EXISTS temp_order_taxes;
DROP TEMPORARY TABLE IF EXISTS temp_order_costs;
`);
// Create all temp tables with correct schema
await localConnection.query(`
CREATE TEMPORARY TABLE temp_order_items (
order_id INT UNSIGNED NOT NULL,
pid INT UNSIGNED NOT NULL,
SKU VARCHAR(50) NOT NULL,
price DECIMAL(10,2) NOT NULL,
quantity INT NOT NULL,
base_discount DECIMAL(10,2) DEFAULT 0,
PRIMARY KEY (order_id, pid)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
`);
await localConnection.query(`
CREATE TEMPORARY TABLE temp_order_meta (
order_id INT UNSIGNED NOT NULL,
date DATE NOT NULL,
customer VARCHAR(100) NOT NULL,
customer_name VARCHAR(150) NOT NULL,
status INT,
canceled TINYINT(1),
summary_discount DECIMAL(10,2) DEFAULT 0.00,
summary_subtotal DECIMAL(10,2) DEFAULT 0.00,
PRIMARY KEY (order_id)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
`);
await localConnection.query(`
CREATE TEMPORARY TABLE temp_order_discounts (
order_id INT UNSIGNED NOT NULL,
pid INT UNSIGNED NOT NULL,
discount DECIMAL(10,2) NOT NULL,
PRIMARY KEY (order_id, pid)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
`);
await localConnection.query(`
CREATE TEMPORARY TABLE temp_order_taxes (
order_id INT UNSIGNED NOT NULL,
pid INT UNSIGNED NOT NULL,
tax DECIMAL(10,2) NOT NULL,
PRIMARY KEY (order_id, pid)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
`);
await localConnection.query(`
CREATE TEMPORARY TABLE temp_order_costs (
order_id INT UNSIGNED NOT NULL,
pid INT UNSIGNED NOT NULL,
costeach DECIMAL(10,3) DEFAULT 0.000,
PRIMARY KEY (order_id, pid)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
`);
// Get column names from the local table
const [columns] = await localConnection.query(`
SELECT COLUMN_NAME
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_NAME = 'orders'
AND COLUMN_NAME != 'updated' -- Exclude the updated column
ORDER BY ORDINAL_POSITION
`);
const columnNames = columns.map(col => col.COLUMN_NAME);
// Get last sync info // Get last sync info
const [syncInfo] = await localConnection.query( const [syncInfo] = await localConnection.query(
"SELECT last_sync_timestamp FROM sync_status WHERE table_name = 'orders'" "SELECT last_sync_timestamp FROM sync_status WHERE table_name = 'orders'"
); );
const lastSyncTime = syncInfo?.[0]?.last_sync_timestamp || '1970-01-01'; const lastSyncTime = syncInfo?.rows?.[0]?.last_sync_timestamp || '1970-01-01';
console.log('Orders: Using last sync time:', lastSyncTime); console.log('Orders: Using last sync time:', lastSyncTime);
// First get count of order items // First get count of order items - Keep MySQL compatible for production
const [[{ total }]] = await prodConnection.query(` const [[{ total }]] = await prodConnection.query(`
SELECT COUNT(*) as total SELECT COUNT(*) as total
FROM order_items oi FROM order_items oi
@@ -141,12 +66,13 @@ async function importOrders(prodConnection, localConnection, incrementalUpdate =
totalOrderItems = total; totalOrderItems = total;
console.log('Orders: Found changes:', totalOrderItems); console.log('Orders: Found changes:', totalOrderItems);
// Get order items in batches // Get order items - Keep MySQL compatible for production
console.log('Orders: Starting MySQL query...');
const [orderItems] = await prodConnection.query(` const [orderItems] = await prodConnection.query(`
SELECT SELECT
oi.order_id, oi.order_id,
oi.prod_pid as pid, oi.prod_pid,
oi.prod_itemnumber as SKU, COALESCE(NULLIF(TRIM(oi.prod_itemnumber), ''), 'NO-SKU') as SKU,
oi.prod_price as price, oi.prod_price as price,
oi.qty_ordered as quantity, oi.qty_ordered as quantity,
COALESCE(oi.prod_price_reg - oi.prod_price, 0) as base_discount, COALESCE(oi.prod_price_reg - oi.prod_price, 0) as base_discount,
@@ -177,24 +103,78 @@ async function importOrders(prodConnection, localConnection, incrementalUpdate =
` : ''} ` : ''}
`, incrementalUpdate ? [lastSyncTime, lastSyncTime, lastSyncTime] : []); `, incrementalUpdate ? [lastSyncTime, lastSyncTime, lastSyncTime] : []);
console.log('Orders: Processing', orderItems.length, 'order items'); console.log('Orders: Found', orderItems.length, 'order items to process');
// Create tables in PostgreSQL for debugging
await localConnection.query(`
DROP TABLE IF EXISTS debug_order_items;
DROP TABLE IF EXISTS debug_order_meta;
DROP TABLE IF EXISTS debug_order_discounts;
DROP TABLE IF EXISTS debug_order_taxes;
DROP TABLE IF EXISTS debug_order_costs;
CREATE TABLE debug_order_items (
order_id INTEGER NOT NULL,
pid INTEGER NOT NULL,
SKU VARCHAR(50) NOT NULL,
price DECIMAL(10,2) NOT NULL,
quantity INTEGER NOT NULL,
base_discount DECIMAL(10,2) DEFAULT 0,
PRIMARY KEY (order_id, pid)
);
CREATE TABLE debug_order_meta (
order_id INTEGER NOT NULL,
date DATE NOT NULL,
customer VARCHAR(100) NOT NULL,
customer_name VARCHAR(150) NOT NULL,
status INTEGER,
canceled BOOLEAN,
summary_discount DECIMAL(10,2) DEFAULT 0.00,
summary_subtotal DECIMAL(10,2) DEFAULT 0.00,
PRIMARY KEY (order_id)
);
CREATE TABLE debug_order_discounts (
order_id INTEGER NOT NULL,
pid INTEGER NOT NULL,
discount DECIMAL(10,2) NOT NULL,
PRIMARY KEY (order_id, pid)
);
CREATE TABLE debug_order_taxes (
order_id INTEGER NOT NULL,
pid INTEGER NOT NULL,
tax DECIMAL(10,2) NOT NULL,
PRIMARY KEY (order_id, pid)
);
CREATE TABLE debug_order_costs (
order_id INTEGER NOT NULL,
pid INTEGER NOT NULL,
costeach DECIMAL(10,3) DEFAULT 0.000,
PRIMARY KEY (order_id, pid)
);
`);
// Insert order items in batches // Insert order items in batches
for (let i = 0; i < orderItems.length; i += 5000) { for (let i = 0; i < orderItems.length; i += 5000) {
const batch = orderItems.slice(i, Math.min(i + 5000, orderItems.length)); const batch = orderItems.slice(i, Math.min(i + 5000, orderItems.length));
const placeholders = batch.map(() => "(?, ?, ?, ?, ?, ?)").join(","); const placeholders = batch.map((_, idx) =>
`($${idx * 6 + 1}, $${idx * 6 + 2}, $${idx * 6 + 3}, $${idx * 6 + 4}, $${idx * 6 + 5}, $${idx * 6 + 6})`
).join(",");
const values = batch.flatMap(item => [ const values = batch.flatMap(item => [
item.order_id, item.pid, item.SKU, item.price, item.quantity, item.base_discount item.order_id, item.prod_pid, item.SKU, item.price, item.quantity, item.base_discount
]); ]);
await localConnection.query(` await localConnection.query(`
INSERT INTO temp_order_items (order_id, pid, SKU, price, quantity, base_discount) INSERT INTO debug_order_items (order_id, pid, SKU, price, quantity, base_discount)
VALUES ${placeholders} VALUES ${placeholders}
ON DUPLICATE KEY UPDATE ON CONFLICT (order_id, pid) DO UPDATE SET
SKU = VALUES(SKU), SKU = EXCLUDED.SKU,
price = VALUES(price), price = EXCLUDED.price,
quantity = VALUES(quantity), quantity = EXCLUDED.quantity,
base_discount = VALUES(base_discount) base_discount = EXCLUDED.base_discount
`, values); `, values);
processedCount = i + batch.length; processedCount = i + batch.length;
@@ -203,24 +183,26 @@ async function importOrders(prodConnection, localConnection, incrementalUpdate =
operation: "Orders import", operation: "Orders import",
message: `Loading order items: ${processedCount} of ${totalOrderItems}`, message: `Loading order items: ${processedCount} of ${totalOrderItems}`,
current: processedCount, current: processedCount,
total: totalOrderItems total: totalOrderItems,
elapsed: formatElapsedTime((Date.now() - startTime) / 1000),
remaining: estimateRemaining(startTime, processedCount, totalOrderItems),
rate: calculateRate(startTime, processedCount)
}); });
} }
// Get unique order IDs // Get unique order IDs
const orderIds = [...new Set(orderItems.map(item => item.order_id))]; const orderIds = [...new Set(orderItems.map(item => item.order_id))];
totalUniqueOrders = orderIds.length; totalUniqueOrders = orderIds.length;
console.log('Total unique order IDs:', totalUniqueOrders); console.log('Orders: Processing', totalUniqueOrders, 'unique orders');
// Reset processed count for order processing phase // Reset processed count for order processing phase
processedCount = 0; processedCount = 0;
// Get order metadata in batches // Process metadata, discounts, taxes, and costs in parallel
for (let i = 0; i < orderIds.length; i += 5000) { const METADATA_BATCH_SIZE = 2000;
const batchIds = orderIds.slice(i, i + 5000); const PG_BATCH_SIZE = 200;
console.log(`Processing batch ${i/5000 + 1}, size: ${batchIds.length}`);
console.log('Sample of batch IDs:', batchIds.slice(0, 5)); const processMetadataBatch = async (batchIds) => {
const [orders] = await prodConnection.query(` const [orders] = await prodConnection.query(`
SELECT SELECT
o.order_id, o.order_id,
@@ -235,64 +217,46 @@ async function importOrders(prodConnection, localConnection, incrementalUpdate =
LEFT JOIN users u ON o.order_cid = u.cid LEFT JOIN users u ON o.order_cid = u.cid
WHERE o.order_id IN (?) WHERE o.order_id IN (?)
`, [batchIds]); `, [batchIds]);
console.log(`Retrieved ${orders.length} orders for ${batchIds.length} IDs`); // Process in sub-batches for PostgreSQL
const duplicates = orders.filter((order, index, self) => for (let j = 0; j < orders.length; j += PG_BATCH_SIZE) {
self.findIndex(o => o.order_id === order.order_id) !== index const subBatch = orders.slice(j, j + PG_BATCH_SIZE);
); if (subBatch.length === 0) continue;
if (duplicates.length > 0) {
console.log('Found duplicates:', duplicates); const placeholders = subBatch.map((_, idx) =>
`($${idx * 8 + 1}, $${idx * 8 + 2}, $${idx * 8 + 3}, $${idx * 8 + 4}, $${idx * 8 + 5}, $${idx * 8 + 6}, $${idx * 8 + 7}, $${idx * 8 + 8})`
).join(",");
const values = subBatch.flatMap(order => [
order.order_id,
order.date,
order.customer,
order.customer_name || '',
order.status,
order.canceled,
order.summary_discount || 0,
order.summary_subtotal || 0
]);
await localConnection.query(`
INSERT INTO debug_order_meta (
order_id, date, customer, customer_name, status, canceled,
summary_discount, summary_subtotal
)
VALUES ${placeholders}
ON CONFLICT (order_id) DO UPDATE SET
date = EXCLUDED.date,
customer = EXCLUDED.customer,
customer_name = EXCLUDED.customer_name,
status = EXCLUDED.status,
canceled = EXCLUDED.canceled,
summary_discount = EXCLUDED.summary_discount,
summary_subtotal = EXCLUDED.summary_subtotal
`, values);
} }
};
const placeholders = orders.map(() => "(?, ?, ?, ?, ?, ?, ?, ?)").join(","); const processDiscountsBatch = async (batchIds) => {
const values = orders.flatMap(order => [
order.order_id,
order.date,
order.customer,
order.customer_name,
order.status,
order.canceled,
order.summary_discount,
order.summary_subtotal
]);
await localConnection.query(`
INSERT INTO temp_order_meta (
order_id,
date,
customer,
customer_name,
status,
canceled,
summary_discount,
summary_subtotal
) VALUES ${placeholders}
ON DUPLICATE KEY UPDATE
date = VALUES(date),
customer = VALUES(customer),
customer_name = VALUES(customer_name),
status = VALUES(status),
canceled = VALUES(canceled),
summary_discount = VALUES(summary_discount),
summary_subtotal = VALUES(summary_subtotal)
`, values);
processedCount = i + orders.length;
outputProgress({
status: "running",
operation: "Orders import",
message: `Loading order metadata: ${processedCount} of ${totalUniqueOrders}`,
current: processedCount,
total: totalUniqueOrders
});
}
// Reset processed count for final phase
processedCount = 0;
// Get promotional discounts in batches
for (let i = 0; i < orderIds.length; i += 5000) {
const batchIds = orderIds.slice(i, i + 5000);
const [discounts] = await prodConnection.query(` const [discounts] = await prodConnection.query(`
SELECT order_id, pid, SUM(amount) as discount SELECT order_id, pid, SUM(amount) as discount
FROM order_discount_items FROM order_discount_items
@@ -300,313 +264,297 @@ async function importOrders(prodConnection, localConnection, incrementalUpdate =
GROUP BY order_id, pid GROUP BY order_id, pid
`, [batchIds]); `, [batchIds]);
if (discounts.length > 0) { if (discounts.length === 0) return;
const placeholders = discounts.map(() => "(?, ?, ?)").join(",");
const values = discounts.flatMap(d => [d.order_id, d.pid, d.discount]); for (let j = 0; j < discounts.length; j += PG_BATCH_SIZE) {
const subBatch = discounts.slice(j, j + PG_BATCH_SIZE);
if (subBatch.length === 0) continue;
const placeholders = subBatch.map((_, idx) =>
`($${idx * 3 + 1}, $${idx * 3 + 2}, $${idx * 3 + 3})`
).join(",");
const values = subBatch.flatMap(d => [
d.order_id,
d.pid,
d.discount || 0
]);
await localConnection.query(` await localConnection.query(`
INSERT INTO temp_order_discounts VALUES ${placeholders} INSERT INTO debug_order_discounts (order_id, pid, discount)
ON DUPLICATE KEY UPDATE VALUES ${placeholders}
discount = VALUES(discount) ON CONFLICT (order_id, pid) DO UPDATE SET
discount = EXCLUDED.discount
`, values); `, values);
} }
} };
// Get tax information in batches const processTaxesBatch = async (batchIds) => {
for (let i = 0; i < orderIds.length; i += 5000) { // Optimized tax query to avoid subquery
const batchIds = orderIds.slice(i, i + 5000);
const [taxes] = await prodConnection.query(` const [taxes] = await prodConnection.query(`
SELECT DISTINCT SELECT oti.order_id, otip.pid, otip.item_taxes_to_collect as tax
oti.order_id, FROM (
otip.pid, SELECT order_id, MAX(taxinfo_id) as latest_taxinfo_id
otip.item_taxes_to_collect as tax
FROM order_tax_info oti
JOIN (
SELECT order_id, MAX(stamp) as max_stamp
FROM order_tax_info FROM order_tax_info
WHERE order_id IN (?) WHERE order_id IN (?)
GROUP BY order_id GROUP BY order_id
) latest ON oti.order_id = latest.order_id AND oti.stamp = latest.max_stamp ) latest_info
JOIN order_tax_info oti ON oti.order_id = latest_info.order_id
AND oti.taxinfo_id = latest_info.latest_taxinfo_id
JOIN order_tax_info_products otip ON oti.taxinfo_id = otip.taxinfo_id JOIN order_tax_info_products otip ON oti.taxinfo_id = otip.taxinfo_id
`, [batchIds]); `, [batchIds]);
if (taxes.length > 0) { if (taxes.length === 0) return;
// Remove any duplicates before inserting
const uniqueTaxes = new Map();
taxes.forEach(t => {
const key = `${t.order_id}-${t.pid}`;
uniqueTaxes.set(key, t);
});
const values = Array.from(uniqueTaxes.values()).flatMap(t => [t.order_id, t.pid, t.tax]); for (let j = 0; j < taxes.length; j += PG_BATCH_SIZE) {
if (values.length > 0) { const subBatch = taxes.slice(j, j + PG_BATCH_SIZE);
const placeholders = Array(uniqueTaxes.size).fill("(?, ?, ?)").join(","); if (subBatch.length === 0) continue;
await localConnection.query(`
INSERT INTO temp_order_taxes VALUES ${placeholders}
ON DUPLICATE KEY UPDATE tax = VALUES(tax)
`, values);
}
}
}
// Get costeach values in batches const placeholders = subBatch.map((_, idx) =>
for (let i = 0; i < orderIds.length; i += 5000) { `($${idx * 3 + 1}, $${idx * 3 + 2}, $${idx * 3 + 3})`
const batchIds = orderIds.slice(i, i + 5000); ).join(",");
const [costs] = await prodConnection.query(`
SELECT const values = subBatch.flatMap(t => [
oc.orderid as order_id, t.order_id,
oc.pid, t.pid,
COALESCE( t.tax || 0
oc.costeach, ]);
(SELECT pi.costeach
FROM product_inventory pi
WHERE pi.pid = oc.pid
AND pi.daterec <= o.date_placed
ORDER BY pi.daterec DESC LIMIT 1)
) as costeach
FROM order_costs oc
JOIN _order o ON oc.orderid = o.order_id
WHERE oc.orderid IN (?)
`, [batchIds]);
if (costs.length > 0) {
const placeholders = costs.map(() => '(?, ?, ?)').join(",");
const values = costs.flatMap(c => [c.order_id, c.pid, c.costeach || 0]);
await localConnection.query(` await localConnection.query(`
INSERT INTO temp_order_costs (order_id, pid, costeach) INSERT INTO debug_order_taxes (order_id, pid, tax)
VALUES ${placeholders} VALUES ${placeholders}
ON DUPLICATE KEY UPDATE costeach = VALUES(costeach) ON CONFLICT (order_id, pid) DO UPDATE SET
tax = EXCLUDED.tax
`, values); `, values);
} }
} };
// Now combine all the data and insert into orders table const processCostsBatch = async (batchIds) => {
// Pre-check all products at once instead of per batch const [costs] = await prodConnection.query(`
const allOrderPids = [...new Set(orderItems.map(item => item.pid))];
const [existingProducts] = allOrderPids.length > 0 ? await localConnection.query(
"SELECT pid FROM products WHERE pid IN (?)",
[allOrderPids]
) : [[]];
const existingPids = new Set(existingProducts.map(p => p.pid));
// Process in larger batches
for (let i = 0; i < orderIds.length; i += 5000) {
const batchIds = orderIds.slice(i, i + 5000);
// Get combined data for this batch
const [orders] = await localConnection.query(`
SELECT SELECT
oi.order_id as order_number, oi.order_id,
oi.pid, oi.prod_pid as pid,
oi.SKU, oi.prod_price as costeach
om.date, FROM order_items oi
oi.price,
oi.quantity,
oi.base_discount + COALESCE(od.discount, 0) +
CASE
WHEN om.summary_discount > 0 THEN
ROUND((om.summary_discount * (oi.price * oi.quantity)) /
NULLIF(om.summary_subtotal, 0), 2)
ELSE 0
END as discount,
COALESCE(ot.tax, 0) as tax,
0 as tax_included,
0 as shipping,
om.customer,
om.customer_name,
om.status,
om.canceled,
COALESCE(tc.costeach, 0) as costeach
FROM temp_order_items oi
JOIN temp_order_meta om ON oi.order_id = om.order_id
LEFT JOIN temp_order_discounts od ON oi.order_id = od.order_id AND oi.pid = od.pid
LEFT JOIN temp_order_taxes ot ON oi.order_id = ot.order_id AND oi.pid = ot.pid
LEFT JOIN temp_order_costs tc ON oi.order_id = tc.order_id AND oi.pid = tc.pid
WHERE oi.order_id IN (?) WHERE oi.order_id IN (?)
`, [batchIds]); `, [batchIds]);
// Filter orders and track missing products - do this in a single pass if (costs.length === 0) return;
const validOrders = [];
const values = [];
const processedOrderItems = new Set(); // Track unique order items
const processedOrders = new Set(); // Track unique orders
for (const order of orders) {
if (!existingPids.has(order.pid)) {
missingProducts.add(order.pid);
skippedOrders.add(order.order_number);
continue;
}
validOrders.push(order);
values.push(...columnNames.map(col => order[col] ?? null));
processedOrderItems.add(`${order.order_number}-${order.pid}`);
processedOrders.add(order.order_number);
}
if (validOrders.length > 0) { for (let j = 0; j < costs.length; j += PG_BATCH_SIZE) {
// Pre-compute the placeholders string once const subBatch = costs.slice(j, j + PG_BATCH_SIZE);
const singlePlaceholder = `(${columnNames.map(() => "?").join(",")})`; if (subBatch.length === 0) continue;
const placeholders = Array(validOrders.length).fill(singlePlaceholder).join(",");
const placeholders = subBatch.map((_, idx) =>
`($${idx * 3 + 1}, $${idx * 3 + 2}, $${idx * 3 + 3})`
).join(",");
const result = await localConnection.query(` const values = subBatch.flatMap(c => [
INSERT INTO orders (${columnNames.join(",")}) c.order_id,
c.pid,
c.costeach || 0
]);
await localConnection.query(`
INSERT INTO debug_order_costs (order_id, pid, costeach)
VALUES ${placeholders} VALUES ${placeholders}
ON DUPLICATE KEY UPDATE ON CONFLICT (order_id, pid) DO UPDATE SET
SKU = VALUES(SKU), costeach = EXCLUDED.costeach
date = VALUES(date), `, values);
price = VALUES(price),
quantity = VALUES(quantity),
discount = VALUES(discount),
tax = VALUES(tax),
tax_included = VALUES(tax_included),
shipping = VALUES(shipping),
customer = VALUES(customer),
customer_name = VALUES(customer_name),
status = VALUES(status),
canceled = VALUES(canceled),
costeach = VALUES(costeach)
`, validOrders.map(o => columnNames.map(col => o[col] ?? null)).flat());
const affectedRows = result[0].affectedRows;
const updates = Math.floor(affectedRows / 2);
const inserts = affectedRows - (updates * 2);
recordsAdded += inserts;
recordsUpdated += updates;
importedCount += processedOrderItems.size; // Count unique order items processed
} }
};
// Update progress based on unique orders processed // Process all data types in parallel for each batch
cumulativeProcessedOrders += processedOrders.size; for (let i = 0; i < orderIds.length; i += METADATA_BATCH_SIZE) {
const batchIds = orderIds.slice(i, i + METADATA_BATCH_SIZE);
await Promise.all([
processMetadataBatch(batchIds),
processDiscountsBatch(batchIds),
processTaxesBatch(batchIds),
processCostsBatch(batchIds)
]);
processedCount = i + batchIds.length;
outputProgress({ outputProgress({
status: "running", status: "running",
operation: "Orders import", operation: "Orders import",
message: `Imported ${importedCount} order items (${cumulativeProcessedOrders} of ${totalUniqueOrders} orders processed)`, message: `Loading order data: ${processedCount} of ${totalUniqueOrders}`,
current: cumulativeProcessedOrders, current: processedCount,
total: totalUniqueOrders, total: totalUniqueOrders,
elapsed: formatElapsedTime((Date.now() - startTime) / 1000), elapsed: formatElapsedTime((Date.now() - startTime) / 1000),
remaining: estimateRemaining(startTime, cumulativeProcessedOrders, totalUniqueOrders), remaining: estimateRemaining(startTime, processedCount, totalUniqueOrders),
rate: calculateRate(startTime, cumulativeProcessedOrders) rate: calculateRate(startTime, processedCount)
}); });
} }
// Now try to import any orders that were skipped due to missing products // Pre-check all products at once
if (skippedOrders.size > 0) { const allOrderPids = [...new Set(orderItems.map(item => item.prod_pid))];
try { console.log('Orders: Checking', allOrderPids.length, 'unique products');
outputProgress({
status: "running", const [existingProducts] = allOrderPids.length > 0 ? await localConnection.query(
operation: "Orders import", "SELECT pid FROM products WHERE pid = ANY($1::bigint[])",
message: `Retrying import of ${skippedOrders.size} orders with previously missing products`, [allOrderPids]
}); ) : [[]];
const existingPids = new Set(existingProducts.rows.map(p => p.pid));
// Process in smaller batches
for (let i = 0; i < orderIds.length; i += 1000) {
const batchIds = orderIds.slice(i, i + 1000);
// Get the orders that were skipped // Get combined data for this batch in sub-batches
const [skippedProdOrders] = await localConnection.query(` const PG_BATCH_SIZE = 100; // Process 100 records at a time
SELECT DISTINCT for (let j = 0; j < batchIds.length; j += PG_BATCH_SIZE) {
const subBatchIds = batchIds.slice(j, j + PG_BATCH_SIZE);
const [orders] = await localConnection.query(`
WITH order_totals AS (
SELECT
oi.order_id,
oi.pid,
SUM(COALESCE(od.discount, 0)) as promo_discount,
COALESCE(ot.tax, 0) as total_tax
FROM debug_order_items oi
LEFT JOIN debug_order_discounts od ON oi.order_id = od.order_id AND oi.pid = od.pid
LEFT JOIN debug_order_taxes ot ON oi.order_id = ot.order_id AND oi.pid = ot.pid
GROUP BY oi.order_id, oi.pid, ot.tax
)
SELECT
oi.order_id as order_number, oi.order_id as order_number,
oi.pid, oi.pid::bigint as pid,
oi.SKU, oi.SKU as sku,
om.date, om.date,
oi.price, oi.price,
oi.quantity, oi.quantity,
oi.base_discount + COALESCE(od.discount, 0) + (oi.base_discount +
CASE COALESCE(ot.promo_discount, 0) +
WHEN o.summary_discount > 0 THEN CASE
ROUND((o.summary_discount * (oi.price * oi.quantity)) / WHEN om.summary_discount > 0 AND om.summary_subtotal > 0 THEN
NULLIF(o.summary_subtotal, 0), 2) ROUND((om.summary_discount * (oi.price * oi.quantity)) / NULLIF(om.summary_subtotal, 0), 2)
ELSE 0 ELSE 0
END as discount, END)::DECIMAL(10,2) as discount,
COALESCE(ot.tax, 0) as tax, COALESCE(ot.total_tax, 0)::DECIMAL(10,2) as tax,
0 as tax_included, false as tax_included,
0 as shipping, 0 as shipping,
om.customer, om.customer,
om.customer_name, om.customer_name,
om.status, om.status,
om.canceled, om.canceled,
COALESCE(tc.costeach, 0) as costeach COALESCE(oc.costeach, oi.price)::DECIMAL(10,3) as costeach
FROM temp_order_items oi FROM (
JOIN temp_order_meta om ON oi.order_id = om.order_id SELECT DISTINCT ON (order_id, pid)
LEFT JOIN _order o ON oi.order_id = o.order_id order_id, pid, SKU, price, quantity, base_discount
LEFT JOIN temp_order_discounts od ON oi.order_id = od.order_id AND oi.pid = od.pid FROM debug_order_items
LEFT JOIN temp_order_taxes ot ON oi.order_id = ot.order_id AND oi.pid = ot.pid WHERE order_id = ANY($1)
LEFT JOIN temp_order_costs tc ON oi.order_id = tc.order_id AND oi.pid = tc.pid ORDER BY order_id, pid
WHERE oi.order_id IN (?) ) oi
`, [Array.from(skippedOrders)]); JOIN debug_order_meta om ON oi.order_id = om.order_id
LEFT JOIN order_totals ot ON oi.order_id = ot.order_id AND oi.pid = ot.pid
LEFT JOIN debug_order_costs oc ON oi.order_id = oc.order_id AND oi.pid = oc.pid
ORDER BY oi.order_id, oi.pid
`, [subBatchIds]);
// Check which products exist now // Filter orders and track missing products
const skippedPids = [...new Set(skippedProdOrders.map(o => o.pid))]; const validOrders = [];
const [existingProducts] = skippedPids.length > 0 ? await localConnection.query( const processedOrderItems = new Set();
"SELECT pid FROM products WHERE pid IN (?)", const processedOrders = new Set();
[skippedPids]
) : [[]]; for (const order of orders.rows) {
const existingPids = new Set(existingProducts.map(p => p.pid)); if (!existingPids.has(order.pid)) {
missingProducts.add(order.pid);
// Filter orders that can now be imported skippedOrders.add(order.order_number);
const validOrders = skippedProdOrders.filter(order => existingPids.has(order.pid)); continue;
const retryOrderItems = new Set(); // Track unique order items in retry }
validOrders.push(order);
if (validOrders.length > 0) { processedOrderItems.add(`${order.order_number}-${order.pid}`);
const placeholders = validOrders.map(() => `(${columnNames.map(() => "?").join(", ")})`).join(","); processedOrders.add(order.order_number);
const values = validOrders.map(o => columnNames.map(col => o[col] ?? null)).flat();
const result = await localConnection.query(`
INSERT INTO orders (${columnNames.join(", ")})
VALUES ${placeholders}
ON DUPLICATE KEY UPDATE
SKU = VALUES(SKU),
date = VALUES(date),
price = VALUES(price),
quantity = VALUES(quantity),
discount = VALUES(discount),
tax = VALUES(tax),
tax_included = VALUES(tax_included),
shipping = VALUES(shipping),
customer = VALUES(customer),
customer_name = VALUES(customer_name),
status = VALUES(status),
canceled = VALUES(canceled),
costeach = VALUES(costeach)
`, values);
const affectedRows = result[0].affectedRows;
const updates = Math.floor(affectedRows / 2);
const inserts = affectedRows - (updates * 2);
// Track unique order items
validOrders.forEach(order => {
retryOrderItems.add(`${order.order_number}-${order.pid}`);
});
outputProgress({
status: "running",
operation: "Orders import",
message: `Successfully imported ${retryOrderItems.size} previously skipped order items`,
});
// Update the main counters
recordsAdded += inserts;
recordsUpdated += updates;
importedCount += retryOrderItems.size;
} }
} catch (error) {
console.warn('Warning: Failed to retry skipped orders:', error.message); // Process valid orders in smaller sub-batches
console.warn(`Skipped ${skippedOrders.size} orders due to ${missingProducts.size} missing products`); const FINAL_BATCH_SIZE = 50;
for (let k = 0; k < validOrders.length; k += FINAL_BATCH_SIZE) {
const subBatch = validOrders.slice(k, k + FINAL_BATCH_SIZE);
const placeholders = subBatch.map((_, idx) => {
const base = idx * 15;
return `($${base + 1}, $${base + 2}, $${base + 3}, $${base + 4}, $${base + 5}, $${base + 6}, $${base + 7}, $${base + 8}, $${base + 9}, $${base + 10}, $${base + 11}, $${base + 12}, $${base + 13}, $${base + 14}, $${base + 15})`;
}).join(',');
const batchValues = subBatch.flatMap(o => [
o.order_number,
o.pid,
o.sku || 'NO-SKU',
o.date,
o.price,
o.quantity,
o.discount,
o.tax,
o.tax_included,
o.shipping,
o.customer,
o.customer_name,
o.status,
o.canceled,
o.costeach
]);
const [result] = await localConnection.query(`
WITH inserted_orders AS (
INSERT INTO orders (
order_number, pid, SKU, date, price, quantity, discount,
tax, tax_included, shipping, customer, customer_name,
status, canceled, costeach
)
VALUES ${placeholders}
ON CONFLICT (order_number, pid) DO UPDATE SET
SKU = EXCLUDED.SKU,
date = EXCLUDED.date,
price = EXCLUDED.price,
quantity = EXCLUDED.quantity,
discount = EXCLUDED.discount,
tax = EXCLUDED.tax,
tax_included = EXCLUDED.tax_included,
shipping = EXCLUDED.shipping,
customer = EXCLUDED.customer,
customer_name = EXCLUDED.customer_name,
status = EXCLUDED.status,
canceled = EXCLUDED.canceled,
costeach = EXCLUDED.costeach
RETURNING xmax, xmin
)
SELECT
COUNT(*) FILTER (WHERE xmax = 0) as inserted,
COUNT(*) FILTER (WHERE xmax <> 0) as updated
FROM inserted_orders
`, batchValues);
const { inserted, updated } = result.rows[0];
recordsAdded += inserted;
recordsUpdated += updated;
importedCount += subBatch.length;
}
cumulativeProcessedOrders += processedOrders.size;
outputProgress({
status: "running",
operation: "Orders import",
message: `Importing orders: ${cumulativeProcessedOrders} of ${totalUniqueOrders}`,
current: cumulativeProcessedOrders,
total: totalUniqueOrders,
elapsed: formatElapsedTime((Date.now() - startTime) / 1000),
remaining: estimateRemaining(startTime, cumulativeProcessedOrders, totalUniqueOrders),
rate: calculateRate(startTime, cumulativeProcessedOrders)
});
} }
} }
// Clean up temporary tables after ALL processing is complete // Update sync status
await localConnection.query(`
DROP TEMPORARY TABLE IF EXISTS temp_order_items;
DROP TEMPORARY TABLE IF EXISTS temp_order_meta;
DROP TEMPORARY TABLE IF EXISTS temp_order_discounts;
DROP TEMPORARY TABLE IF EXISTS temp_order_taxes;
DROP TEMPORARY TABLE IF EXISTS temp_order_costs;
`);
// Only update sync status if we get here (no errors thrown)
await localConnection.query(` await localConnection.query(`
INSERT INTO sync_status (table_name, last_sync_timestamp) INSERT INTO sync_status (table_name, last_sync_timestamp)
VALUES ('orders', NOW()) VALUES ('orders', NOW())
ON DUPLICATE KEY UPDATE last_sync_timestamp = NOW() ON CONFLICT (table_name) DO UPDATE SET
last_sync_timestamp = NOW()
`); `);
return { return {

File diff suppressed because it is too large Load Diff

View File

@@ -10,22 +10,38 @@ async function importPurchaseOrders(prodConnection, localConnection, incremental
const [syncInfo] = await localConnection.query( const [syncInfo] = await localConnection.query(
"SELECT last_sync_timestamp FROM sync_status WHERE table_name = 'purchase_orders'" "SELECT last_sync_timestamp FROM sync_status WHERE table_name = 'purchase_orders'"
); );
const lastSyncTime = syncInfo?.[0]?.last_sync_timestamp || '1970-01-01'; const lastSyncTime = syncInfo?.rows?.[0]?.last_sync_timestamp || '1970-01-01';
console.log('Purchase Orders: Using last sync time:', lastSyncTime); console.log('Purchase Orders: Using last sync time:', lastSyncTime);
// Insert temporary table creation query for purchase orders // Create temporary tables with PostgreSQL syntax
await localConnection.query(` await localConnection.query(`
CREATE TABLE IF NOT EXISTS temp_purchase_orders ( DROP TABLE IF EXISTS temp_purchase_orders;
po_id INT UNSIGNED NOT NULL, DROP TABLE IF EXISTS temp_po_receivings;
pid INT UNSIGNED NOT NULL,
CREATE TEMP TABLE temp_purchase_orders (
po_id INTEGER NOT NULL,
pid INTEGER NOT NULL,
vendor VARCHAR(255), vendor VARCHAR(255),
date DATE, date DATE,
expected_date DATE, expected_date DATE,
status INT, status INTEGER,
notes TEXT, notes TEXT,
PRIMARY KEY (po_id, pid) PRIMARY KEY (po_id, pid)
) ENGINE=InnoDB DEFAULT CHARSET=utf8; );
CREATE TEMP TABLE temp_po_receivings (
po_id INTEGER,
pid INTEGER NOT NULL,
receiving_id INTEGER NOT NULL,
qty_each INTEGER,
cost_each DECIMAL(10,3),
received_date TIMESTAMP,
received_by INTEGER,
received_by_name VARCHAR(255),
is_alt_po INTEGER,
PRIMARY KEY (receiving_id, pid)
);
`); `);
outputProgress({ outputProgress({
@@ -33,8 +49,8 @@ async function importPurchaseOrders(prodConnection, localConnection, incremental
status: "running", status: "running",
}); });
// Get column names first // Get column names - Keep MySQL compatible for production
const [columns] = await localConnection.query(` const [columns] = await prodConnection.query(`
SELECT COLUMN_NAME SELECT COLUMN_NAME
FROM INFORMATION_SCHEMA.COLUMNS FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_NAME = 'purchase_orders' WHERE TABLE_NAME = 'purchase_orders'
@@ -60,7 +76,7 @@ async function importPurchaseOrders(prodConnection, localConnection, incremental
? [lastSyncTime, lastSyncTime, lastSyncTime, lastSyncTime, lastSyncTime, lastSyncTime, lastSyncTime, lastSyncTime] ? [lastSyncTime, lastSyncTime, lastSyncTime, lastSyncTime, lastSyncTime, lastSyncTime, lastSyncTime, lastSyncTime]
: []; : [];
// First get all relevant PO IDs with basic info // First get all relevant PO IDs with basic info - Keep MySQL compatible for production
const [[{ total }]] = await prodConnection.query(` const [[{ total }]] = await prodConnection.query(`
SELECT COUNT(*) as total SELECT COUNT(*) as total
FROM ( FROM (
@@ -99,6 +115,7 @@ async function importPurchaseOrders(prodConnection, localConnection, incremental
console.log('Purchase Orders: Found changes:', total); console.log('Purchase Orders: Found changes:', total);
// Get PO list - Keep MySQL compatible for production
const [poList] = await prodConnection.query(` const [poList] = await prodConnection.query(`
SELECT DISTINCT SELECT DISTINCT
COALESCE(p.po_id, r.receiving_id) as po_id, COALESCE(p.po_id, r.receiving_id) as po_id,
@@ -185,7 +202,7 @@ async function importPurchaseOrders(prodConnection, localConnection, incremental
const batch = poList.slice(i, Math.min(i + BATCH_SIZE, poList.length)); const batch = poList.slice(i, Math.min(i + BATCH_SIZE, poList.length));
const poIds = batch.map(po => po.po_id); const poIds = batch.map(po => po.po_id);
// Get all products for these POs in one query // Get all products for these POs in one query - Keep MySQL compatible for production
const [poProducts] = await prodConnection.query(` const [poProducts] = await prodConnection.query(`
SELECT SELECT
pop.po_id, pop.po_id,
@@ -207,7 +224,7 @@ async function importPurchaseOrders(prodConnection, localConnection, incremental
const productPids = [...new Set(productBatch.map(p => p.pid))]; const productPids = [...new Set(productBatch.map(p => p.pid))];
const batchPoIds = [...new Set(productBatch.map(p => p.po_id))]; const batchPoIds = [...new Set(productBatch.map(p => p.po_id))];
// Get receivings for this batch with employee names // Get receivings for this batch with employee names - Keep MySQL compatible for production
const [receivings] = await prodConnection.query(` const [receivings] = await prodConnection.query(`
SELECT SELECT
r.po_id, r.po_id,
@@ -232,315 +249,176 @@ async function importPurchaseOrders(prodConnection, localConnection, incremental
ORDER BY r.po_id, rp.pid, rp.received_date ORDER BY r.po_id, rp.pid, rp.received_date
`, [batchPoIds, productPids]); `, [batchPoIds, productPids]);
// Create maps for this sub-batch // Insert receivings into temp table
const poProductMap = new Map(); if (receivings.length > 0) {
productBatch.forEach(product => { const placeholders = receivings.map((_, idx) => {
const key = `${product.po_id}-${product.pid}`; const base = idx * 9;
poProductMap.set(key, product); return `($${base + 1}, $${base + 2}, $${base + 3}, $${base + 4}, $${base + 5}, $${base + 6}, $${base + 7}, $${base + 8}, $${base + 9})`;
}); }).join(',');
const receivingMap = new Map(); const values = receivings.flatMap(r => [
const altReceivingMap = new Map(); r.po_id,
const noPOReceivingMap = new Map(); r.pid,
r.receiving_id,
receivings.forEach(receiving => { r.qty_each,
const key = `${receiving.po_id}-${receiving.pid}`; r.cost_each,
if (receiving.is_alt_po === 2) { r.received_date,
// No PO r.received_by,
if (!noPOReceivingMap.has(receiving.pid)) { r.received_by_name,
noPOReceivingMap.set(receiving.pid, []); r.is_alt_po
} ]);
noPOReceivingMap.get(receiving.pid).push(receiving);
} else if (receiving.is_alt_po === 1) {
// Different PO
if (!altReceivingMap.has(receiving.pid)) {
altReceivingMap.set(receiving.pid, []);
}
altReceivingMap.get(receiving.pid).push(receiving);
} else {
// Original PO
if (!receivingMap.has(key)) {
receivingMap.set(key, []);
}
receivingMap.get(key).push(receiving);
}
});
// Verify PIDs exist await localConnection.query(`
const [existingPids] = await localConnection.query( INSERT INTO temp_po_receivings (
'SELECT pid FROM products WHERE pid IN (?)', po_id, pid, receiving_id, qty_each, cost_each, received_date,
[productPids] received_by, received_by_name, is_alt_po
); )
const validPids = new Set(existingPids.map(p => p.pid)); VALUES ${placeholders}
ON CONFLICT (receiving_id, pid) DO UPDATE SET
po_id = EXCLUDED.po_id,
qty_each = EXCLUDED.qty_each,
cost_each = EXCLUDED.cost_each,
received_date = EXCLUDED.received_date,
received_by = EXCLUDED.received_by,
received_by_name = EXCLUDED.received_by_name,
is_alt_po = EXCLUDED.is_alt_po
`, values);
}
// First check which PO lines already exist and get their current values // Process each PO product
const poLines = Array.from(poProductMap.values()) for (const product of productBatch) {
.filter(p => validPids.has(p.pid)) const po = batch.find(p => p.po_id === product.po_id);
.map(p => [p.po_id, p.pid]); if (!po) continue;
const [existingPOs] = await localConnection.query( // Insert into temp_purchase_orders
`SELECT ${columnNames.join(',')} FROM purchase_orders WHERE (po_id, pid) IN (${poLines.map(() => "(?,?)").join(",")})`, const placeholders = `($1, $2, $3, $4, $5, $6, $7)`;
poLines.flat() const values = [
); product.po_id,
const existingPOMap = new Map( product.pid,
existingPOs.map(po => [`${po.po_id}-${po.pid}`, po]) po.vendor,
); po.date,
po.expected_date,
po.status,
po.notes || po.long_note
];
// Split into inserts and updates await localConnection.query(`
const insertsAndUpdates = { inserts: [], updates: [] }; INSERT INTO temp_purchase_orders (
let batchProcessed = 0; po_id, pid, vendor, date, expected_date, status, notes
)
VALUES ${placeholders}
ON CONFLICT (po_id, pid) DO UPDATE SET
vendor = EXCLUDED.vendor,
date = EXCLUDED.date,
expected_date = EXCLUDED.expected_date,
status = EXCLUDED.status,
notes = EXCLUDED.notes
`, values);
for (const po of batch) { processed++;
const poProducts = Array.from(poProductMap.values())
.filter(p => p.po_id === po.po_id && validPids.has(p.pid)); // Update progress periodically
if (Date.now() - lastProgressUpdate > PROGRESS_INTERVAL) {
for (const product of poProducts) { outputProgress({
const key = `${po.po_id}-${product.pid}`; status: "running",
const receivingHistory = receivingMap.get(key) || []; operation: "Purchase orders import",
const altReceivingHistory = altReceivingMap.get(product.pid) || []; message: `Processing purchase orders: ${processed} of ${totalItems}`,
const noPOReceivingHistory = noPOReceivingMap.get(product.pid) || []; current: processed,
total: totalItems,
// Combine all receivings and sort by date elapsed: formatElapsedTime((Date.now() - startTime) / 1000),
const allReceivings = [ remaining: estimateRemaining(startTime, processed, totalItems),
...receivingHistory.map(r => ({ ...r, type: 'original' })), rate: calculateRate(startTime, processed)
...altReceivingHistory.map(r => ({ ...r, type: 'alternate' })),
...noPOReceivingHistory.map(r => ({ ...r, type: 'no_po' }))
].sort((a, b) => new Date(a.received_date || '9999-12-31') - new Date(b.received_date || '9999-12-31'));
// Split receivings into original PO and others
const originalPOReceivings = allReceivings.filter(r => r.type === 'original');
const otherReceivings = allReceivings.filter(r => r.type !== 'original');
// Track FIFO fulfillment
let remainingToFulfill = product.ordered;
const fulfillmentTracking = [];
let totalReceived = 0;
let actualCost = null; // Will store the cost of the first receiving that fulfills this PO
let firstFulfillmentReceiving = null;
let lastFulfillmentReceiving = null;
for (const receiving of allReceivings) {
// Convert quantities to base units using supplier data
const baseQtyReceived = receiving.qty_each * (
receiving.type === 'original' ? 1 :
Math.max(1, product.supplier_qty_per_unit || 1)
);
const qtyToApply = Math.min(remainingToFulfill, baseQtyReceived);
if (qtyToApply > 0) {
// If this is the first receiving being applied, use its cost
if (actualCost === null && receiving.cost_each > 0) {
actualCost = receiving.cost_each;
firstFulfillmentReceiving = receiving;
}
lastFulfillmentReceiving = receiving;
fulfillmentTracking.push({
receiving_id: receiving.receiving_id,
qty_applied: qtyToApply,
qty_total: baseQtyReceived,
cost: receiving.cost_each || actualCost || product.cost_each,
date: receiving.received_date,
received_by: receiving.received_by,
received_by_name: receiving.received_by_name || 'Unknown',
type: receiving.type,
remaining_qty: baseQtyReceived - qtyToApply
});
remainingToFulfill -= qtyToApply;
} else {
// Track excess receivings
fulfillmentTracking.push({
receiving_id: receiving.receiving_id,
qty_applied: 0,
qty_total: baseQtyReceived,
cost: receiving.cost_each || actualCost || product.cost_each,
date: receiving.received_date,
received_by: receiving.received_by,
received_by_name: receiving.received_by_name || 'Unknown',
type: receiving.type,
is_excess: true
});
}
totalReceived += baseQtyReceived;
}
const receiving_status = !totalReceived ? 1 : // created
remainingToFulfill > 0 ? 30 : // partial
40; // full
function formatDate(dateStr) {
if (!dateStr) return null;
if (dateStr === '0000-00-00' || dateStr === '0000-00-00 00:00:00') return null;
if (typeof dateStr === 'string' && !dateStr.match(/^\d{4}-\d{2}-\d{2}/)) return null;
try {
const date = new Date(dateStr);
if (isNaN(date.getTime())) return null;
if (date.getFullYear() < 1900 || date.getFullYear() > 2100) return null;
return date.toISOString().split('T')[0];
} catch (e) {
return null;
}
}
const rowValues = columnNames.map(col => {
switch (col) {
case 'po_id': return po.po_id;
case 'vendor': return po.vendor;
case 'date': return formatDate(po.date);
case 'expected_date': return formatDate(po.expected_date);
case 'pid': return product.pid;
case 'sku': return product.sku;
case 'name': return product.name;
case 'cost_price': return actualCost || product.cost_each;
case 'po_cost_price': return product.cost_each;
case 'status': return po.status;
case 'notes': return po.notes;
case 'long_note': return po.long_note;
case 'ordered': return product.ordered;
case 'received': return totalReceived;
case 'unfulfilled': return remainingToFulfill;
case 'excess_received': return Math.max(0, totalReceived - product.ordered);
case 'received_date': return formatDate(firstFulfillmentReceiving?.received_date);
case 'last_received_date': return formatDate(lastFulfillmentReceiving?.received_date);
case 'received_by': return firstFulfillmentReceiving?.received_by_name || null;
case 'receiving_status': return receiving_status;
case 'receiving_history': return JSON.stringify({
fulfillment: fulfillmentTracking,
ordered_qty: product.ordered,
total_received: totalReceived,
remaining_unfulfilled: remainingToFulfill,
excess_received: Math.max(0, totalReceived - product.ordered),
po_cost: product.cost_each,
actual_cost: actualCost || product.cost_each
});
default: return null;
}
}); });
lastProgressUpdate = Date.now();
if (existingPOMap.has(key)) {
const existing = existingPOMap.get(key);
// Check if any values are different
const hasChanges = columnNames.some(col => {
const newVal = rowValues[columnNames.indexOf(col)];
const oldVal = existing[col] ?? null;
// Special handling for numbers to avoid type coercion issues
if (typeof newVal === 'number' && typeof oldVal === 'number') {
return Math.abs(newVal - oldVal) > 0.00001; // Allow for tiny floating point differences
}
// Special handling for receiving_history - parse and compare
if (col === 'receiving_history') {
const newHistory = JSON.parse(newVal || '{}');
const oldHistory = JSON.parse(oldVal || '{}');
return JSON.stringify(newHistory) !== JSON.stringify(oldHistory);
}
return newVal !== oldVal;
});
if (hasChanges) {
insertsAndUpdates.updates.push({
po_id: po.po_id,
pid: product.pid,
values: rowValues
});
}
} else {
insertsAndUpdates.inserts.push({
po_id: po.po_id,
pid: product.pid,
values: rowValues
});
}
batchProcessed++;
} }
} }
// Handle inserts
if (insertsAndUpdates.inserts.length > 0) {
const insertPlaceholders = insertsAndUpdates.inserts
.map(() => `(${Array(columnNames.length).fill("?").join(",")})`)
.join(",");
const insertResult = await localConnection.query(`
INSERT INTO purchase_orders (${columnNames.join(",")})
VALUES ${insertPlaceholders}
`, insertsAndUpdates.inserts.map(i => i.values).flat());
const affectedRows = insertResult[0].affectedRows;
// For an upsert, MySQL counts rows twice for updates
// So if affectedRows is odd, we have (updates * 2 + inserts)
const updates = Math.floor(affectedRows / 2);
const inserts = affectedRows - (updates * 2);
recordsAdded += inserts;
recordsUpdated += Math.floor(updates); // Ensure we never have fractional updates
processed += batchProcessed;
}
// Handle updates - now we know these actually have changes
if (insertsAndUpdates.updates.length > 0) {
const updatePlaceholders = insertsAndUpdates.updates
.map(() => `(${Array(columnNames.length).fill("?").join(",")})`)
.join(",");
const updateResult = await localConnection.query(`
INSERT INTO purchase_orders (${columnNames.join(",")})
VALUES ${updatePlaceholders}
ON DUPLICATE KEY UPDATE ${columnNames
.filter((col) => col !== "po_id" && col !== "pid")
.map((col) => `${col} = VALUES(${col})`)
.join(",")};
`, insertsAndUpdates.updates.map(u => u.values).flat());
const affectedRows = updateResult[0].affectedRows;
// For an upsert, MySQL counts rows twice for updates
// So if affectedRows is odd, we have (updates * 2 + inserts)
const updates = Math.floor(affectedRows / 2);
const inserts = affectedRows - (updates * 2);
recordsUpdated += Math.floor(updates); // Ensure we never have fractional updates
processed += batchProcessed;
}
// Update progress based on time interval
const now = Date.now();
if (now - lastProgressUpdate >= PROGRESS_INTERVAL || processed === totalItems) {
outputProgress({
status: "running",
operation: "Purchase orders import",
current: processed,
total: totalItems,
elapsed: formatElapsedTime((Date.now() - startTime) / 1000),
remaining: estimateRemaining(startTime, processed, totalItems),
rate: calculateRate(startTime, processed)
});
lastProgressUpdate = now;
}
} }
} }
// Only update sync status if we get here (no errors thrown) // Insert final data into purchase_orders table
const [result] = await localConnection.query(`
WITH inserted_pos AS (
INSERT INTO purchase_orders (
po_id, pid, vendor, date, expected_date, status, notes,
received_qty, received_cost, last_received_date, last_received_by,
alt_po_received_qty, alt_po_last_received_date,
no_po_received_qty, no_po_last_received_date
)
SELECT
po.po_id,
po.pid,
po.vendor,
po.date,
po.expected_date,
po.status,
po.notes,
COALESCE(SUM(CASE WHEN r.is_alt_po = 0 THEN r.qty_each END), 0) as received_qty,
COALESCE(AVG(CASE WHEN r.is_alt_po = 0 THEN r.cost_each END), 0) as received_cost,
MAX(CASE WHEN r.is_alt_po = 0 THEN r.received_date END) as last_received_date,
MAX(CASE WHEN r.is_alt_po = 0 THEN r.received_by_name END) as last_received_by,
COALESCE(SUM(CASE WHEN r.is_alt_po = 1 THEN r.qty_each END), 0) as alt_po_received_qty,
MAX(CASE WHEN r.is_alt_po = 1 THEN r.received_date END) as alt_po_last_received_date,
COALESCE(SUM(CASE WHEN r.is_alt_po = 2 THEN r.qty_each END), 0) as no_po_received_qty,
MAX(CASE WHEN r.is_alt_po = 2 THEN r.received_date END) as no_po_last_received_date
FROM temp_purchase_orders po
LEFT JOIN temp_po_receivings r ON po.pid = r.pid
GROUP BY po.po_id, po.pid, po.vendor, po.date, po.expected_date, po.status, po.notes
ON CONFLICT (po_id, pid) DO UPDATE SET
vendor = EXCLUDED.vendor,
date = EXCLUDED.date,
expected_date = EXCLUDED.expected_date,
status = EXCLUDED.status,
notes = EXCLUDED.notes,
received_qty = EXCLUDED.received_qty,
received_cost = EXCLUDED.received_cost,
last_received_date = EXCLUDED.last_received_date,
last_received_by = EXCLUDED.last_received_by,
alt_po_received_qty = EXCLUDED.alt_po_received_qty,
alt_po_last_received_date = EXCLUDED.alt_po_last_received_date,
no_po_received_qty = EXCLUDED.no_po_received_qty,
no_po_last_received_date = EXCLUDED.no_po_last_received_date
RETURNING xmax
)
SELECT
COUNT(*) FILTER (WHERE xmax = 0) as inserted,
COUNT(*) FILTER (WHERE xmax <> 0) as updated
FROM inserted_pos
`);
recordsAdded = parseInt(result.rows[0].inserted, 10) || 0;
recordsUpdated = parseInt(result.rows[0].updated, 10) || 0;
// Update sync status
await localConnection.query(` await localConnection.query(`
INSERT INTO sync_status (table_name, last_sync_timestamp) INSERT INTO sync_status (table_name, last_sync_timestamp)
VALUES ('purchase_orders', NOW()) VALUES ('purchase_orders', NOW())
ON DUPLICATE KEY UPDATE ON CONFLICT (table_name) DO UPDATE SET
last_sync_timestamp = NOW(), last_sync_timestamp = NOW()
last_sync_id = LAST_INSERT_ID(last_sync_id) `);
// Clean up temporary tables
await localConnection.query(`
DROP TABLE IF EXISTS temp_purchase_orders;
DROP TABLE IF EXISTS temp_po_receivings;
`); `);
return { return {
status: "complete", status: "complete",
totalImported: totalItems, recordsAdded,
recordsAdded: recordsAdded || 0, recordsUpdated,
recordsUpdated: recordsUpdated || 0, totalRecords: processed
incrementalUpdate,
lastSyncTime
}; };
} catch (error) { } catch (error) {
outputProgress({ console.error("Error during purchase orders import:", error);
operation: `${incrementalUpdate ? 'Incremental' : 'Full'} purchase orders import failed`, // Attempt cleanup on error
status: "error", try {
error: error.message, await localConnection.query(`
}); DROP TABLE IF EXISTS temp_purchase_orders;
DROP TABLE IF EXISTS temp_po_receivings;
`);
} catch (cleanupError) {
console.error('Error during cleanup:', cleanupError);
}
throw error; throw error;
} }
} }

View File

@@ -1,5 +1,6 @@
const mysql = require("mysql2/promise"); const mysql = require("mysql2/promise");
const { Client } = require("ssh2"); const { Client } = require("ssh2");
const { Pool } = require('pg');
const dotenv = require("dotenv"); const dotenv = require("dotenv");
const path = require("path"); const path = require("path");
@@ -41,23 +42,90 @@ async function setupSshTunnel(sshConfig) {
async function setupConnections(sshConfig) { async function setupConnections(sshConfig) {
const tunnel = await setupSshTunnel(sshConfig); const tunnel = await setupSshTunnel(sshConfig);
// Setup MySQL connection for production
const prodConnection = await mysql.createConnection({ const prodConnection = await mysql.createConnection({
...sshConfig.prodDbConfig, ...sshConfig.prodDbConfig,
stream: tunnel.stream, stream: tunnel.stream,
}); });
const localConnection = await mysql.createPool({ // Setup PostgreSQL connection pool for local
...sshConfig.localDbConfig, const localPool = new Pool(sshConfig.localDbConfig);
waitForConnections: true,
connectionLimit: 10,
queueLimit: 0
});
return { // Test the PostgreSQL connection
ssh: tunnel.ssh, try {
prodConnection, const client = await localPool.connect();
localConnection await client.query('SELECT NOW()');
client.release();
console.log('PostgreSQL connection successful');
} catch (err) {
console.error('PostgreSQL connection error:', err);
throw err;
}
// Create a wrapper for the PostgreSQL pool to match MySQL interface
const localConnection = {
_client: null,
_transactionActive: false,
query: async (text, params) => {
// If we're not in a transaction, use the pool directly
if (!localConnection._transactionActive) {
const client = await localPool.connect();
try {
const result = await client.query(text, params);
return [result];
} finally {
client.release();
}
}
// If we're in a transaction, use the dedicated client
if (!localConnection._client) {
throw new Error('No active transaction client');
}
const result = await localConnection._client.query(text, params);
return [result];
},
beginTransaction: async () => {
if (localConnection._transactionActive) {
throw new Error('Transaction already active');
}
localConnection._client = await localPool.connect();
await localConnection._client.query('BEGIN');
localConnection._transactionActive = true;
},
commit: async () => {
if (!localConnection._transactionActive) {
throw new Error('No active transaction to commit');
}
await localConnection._client.query('COMMIT');
localConnection._client.release();
localConnection._client = null;
localConnection._transactionActive = false;
},
rollback: async () => {
if (!localConnection._transactionActive) {
throw new Error('No active transaction to rollback');
}
await localConnection._client.query('ROLLBACK');
localConnection._client.release();
localConnection._client = null;
localConnection._transactionActive = false;
},
end: async () => {
if (localConnection._client) {
localConnection._client.release();
localConnection._client = null;
}
await localPool.end();
}
}; };
return { prodConnection, localConnection, tunnel };
} }
// Helper function to close connections // Helper function to close connections

View File

@@ -1,4 +1,4 @@
const mysql = require('mysql2/promise'); const { Client } = require('pg');
const path = require('path'); const path = require('path');
const dotenv = require('dotenv'); const dotenv = require('dotenv');
const fs = require('fs'); const fs = require('fs');
@@ -10,7 +10,7 @@ const dbConfig = {
user: process.env.DB_USER, user: process.env.DB_USER,
password: process.env.DB_PASSWORD, password: process.env.DB_PASSWORD,
database: process.env.DB_NAME, database: process.env.DB_NAME,
multipleStatements: true port: process.env.DB_PORT || 5432
}; };
// Helper function to output progress in JSON format // Helper function to output progress in JSON format
@@ -120,30 +120,26 @@ async function resetDatabase() {
} }
}); });
const connection = await mysql.createConnection(dbConfig); const client = new Client(dbConfig);
await client.connect();
try { try {
// Check MySQL privileges // Check PostgreSQL version and user
outputProgress({ outputProgress({
operation: 'Checking privileges', operation: 'Checking database',
message: 'Verifying MySQL user privileges...' message: 'Verifying PostgreSQL version and user privileges...'
}); });
const [grants] = await connection.query('SHOW GRANTS'); const versionResult = await client.query('SELECT version()');
outputProgress({ const userResult = await client.query('SELECT current_user, current_database()');
operation: 'User privileges',
message: {
grants: grants.map(g => Object.values(g)[0])
}
});
// Enable warnings as errors
await connection.query('SET SESSION sql_notes = 1');
// Log database config (without sensitive info)
outputProgress({ outputProgress({
operation: 'Database config', operation: 'Database info',
message: `Using database: ${dbConfig.database} on host: ${dbConfig.host}` message: {
version: versionResult.rows[0].version,
user: userResult.rows[0].current_user,
database: userResult.rows[0].current_database
}
}); });
// Get list of all tables in the current database // Get list of all tables in the current database
@@ -152,14 +148,14 @@ async function resetDatabase() {
message: 'Retrieving all table names...' message: 'Retrieving all table names...'
}); });
const [tables] = await connection.query(` const tablesResult = await client.query(`
SELECT GROUP_CONCAT(table_name) as tables SELECT string_agg(tablename, ', ') as tables
FROM information_schema.tables FROM pg_tables
WHERE table_schema = DATABASE() WHERE schemaname = 'public'
AND table_name NOT IN ('users', 'import_history', 'calculate_history') AND tablename NOT IN ('users', 'calculate_history', 'import_history');
`); `);
if (!tables[0].tables) { if (!tablesResult.rows[0].tables) {
outputProgress({ outputProgress({
operation: 'No tables found', operation: 'No tables found',
message: 'Database is already empty' message: 'Database is already empty'
@@ -170,20 +166,73 @@ async function resetDatabase() {
message: 'Dropping all existing tables...' message: 'Dropping all existing tables...'
}); });
await connection.query('SET FOREIGN_KEY_CHECKS = 0'); // Disable triggers/foreign key checks
const dropQuery = ` await client.query('SET session_replication_role = \'replica\';');
DROP TABLE IF EXISTS
${tables[0].tables // Drop all tables except users
.split(',') const tables = tablesResult.rows[0].tables.split(', ');
.filter(table => !['users', 'calculate_history'].includes(table)) for (const table of tables) {
.map(table => '`' + table + '`') if (!['users'].includes(table)) {
.join(', ')} await client.query(`DROP TABLE IF EXISTS "${table}" CASCADE`);
`; }
await connection.query(dropQuery); }
await connection.query('SET FOREIGN_KEY_CHECKS = 1');
// Only drop types if we're not preserving history tables
const historyTablesExist = await client.query(`
SELECT EXISTS (
SELECT FROM pg_tables
WHERE schemaname = 'public'
AND tablename IN ('calculate_history', 'import_history')
);
`);
if (!historyTablesExist.rows[0].exists) {
await client.query('DROP TYPE IF EXISTS calculation_status CASCADE;');
await client.query('DROP TYPE IF EXISTS module_name CASCADE;');
}
// Re-enable triggers/foreign key checks
await client.query('SET session_replication_role = \'origin\';');
} }
// Read and execute main schema (core tables) // Create enum types if they don't exist
outputProgress({
operation: 'Creating enum types',
message: 'Setting up required enum types...'
});
// Check if types exist before creating
const typesExist = await client.query(`
SELECT EXISTS (
SELECT 1 FROM pg_type
WHERE typname = 'calculation_status'
) as calc_status_exists,
EXISTS (
SELECT 1 FROM pg_type
WHERE typname = 'module_name'
) as module_name_exists;
`);
if (!typesExist.rows[0].calc_status_exists) {
await client.query(`CREATE TYPE calculation_status AS ENUM ('running', 'completed', 'failed', 'cancelled')`);
}
if (!typesExist.rows[0].module_name_exists) {
await client.query(`
CREATE TYPE module_name AS ENUM (
'product_metrics',
'time_aggregates',
'financial_metrics',
'vendor_metrics',
'category_metrics',
'brand_metrics',
'sales_forecasts',
'abc_classification'
)
`);
}
// Read and execute main schema first (core tables)
outputProgress({ outputProgress({
operation: 'Running database setup', operation: 'Running database setup',
message: 'Creating core tables...' message: 'Creating core tables...'
@@ -223,35 +272,24 @@ async function resetDatabase() {
for (let i = 0; i < statements.length; i++) { for (let i = 0; i < statements.length; i++) {
const stmt = statements[i]; const stmt = statements[i];
try { try {
const [result, fields] = await connection.query(stmt); const result = await client.query(stmt);
// Check for warnings
const [warnings] = await connection.query('SHOW WARNINGS');
if (warnings && warnings.length > 0) {
outputProgress({
status: 'warning',
operation: 'SQL Warning',
statement: i + 1,
warnings: warnings
});
}
// Verify if table was created (if this was a CREATE TABLE statement) // Verify if table was created (if this was a CREATE TABLE statement)
if (stmt.trim().toLowerCase().startsWith('create table')) { if (stmt.trim().toLowerCase().startsWith('create table')) {
const tableName = stmt.match(/create\s+table\s+(?:if\s+not\s+exists\s+)?`?(\w+)`?/i)?.[1]; const tableName = stmt.match(/create\s+table\s+(?:if\s+not\s+exists\s+)?["]?(\w+)["]?/i)?.[1];
if (tableName) { if (tableName) {
const [tableExists] = await connection.query(` const tableExists = await client.query(`
SELECT COUNT(*) as count SELECT COUNT(*) as count
FROM information_schema.tables FROM information_schema.tables
WHERE table_schema = DATABASE() WHERE table_schema = 'public'
AND table_name = ? AND table_name = $1
`, [tableName]); `, [tableName]);
outputProgress({ outputProgress({
operation: 'Table Creation Verification', operation: 'Table Creation Verification',
message: { message: {
table: tableName, table: tableName,
exists: tableExists[0].count > 0 exists: tableExists.rows[0].count > 0
} }
}); });
} }
@@ -263,7 +301,7 @@ async function resetDatabase() {
statement: i + 1, statement: i + 1,
total: statements.length, total: statements.length,
preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : ''), preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : ''),
affectedRows: result.affectedRows rowCount: result.rowCount
} }
}); });
} catch (sqlError) { } catch (sqlError) {
@@ -271,8 +309,6 @@ async function resetDatabase() {
status: 'error', status: 'error',
operation: 'SQL Error', operation: 'SQL Error',
error: sqlError.message, error: sqlError.message,
sqlState: sqlError.sqlState,
errno: sqlError.errno,
statement: stmt, statement: stmt,
statementNumber: i + 1 statementNumber: i + 1
}); });
@@ -280,66 +316,12 @@ async function resetDatabase() {
} }
} }
// List all tables in the database after schema execution // Verify core tables were created
outputProgress({ const existingTables = (await client.query(`
operation: 'Debug database', SELECT table_name
message: {
currentDatabase: (await connection.query('SELECT DATABASE() as db'))[0][0].db
}
});
const [allTables] = await connection.query(`
SELECT
table_schema,
table_name,
engine,
create_time,
table_rows
FROM information_schema.tables FROM information_schema.tables
WHERE table_schema = DATABASE() WHERE table_schema = 'public'
`); `)).rows.map(t => t.table_name);
if (allTables.length === 0) {
outputProgress({
operation: 'Warning',
message: 'No tables found in database after schema execution'
});
} else {
outputProgress({
operation: 'Tables after schema execution',
message: {
count: allTables.length,
tables: allTables.map(t => ({
schema: t.table_schema,
name: t.table_name,
engine: t.engine,
created: t.create_time,
rows: t.table_rows
}))
}
});
}
// Also check table status
const [tableStatus] = await connection.query('SHOW TABLE STATUS');
outputProgress({
operation: 'Table Status',
message: {
tables: tableStatus.map(t => ({
name: t.Name,
engine: t.Engine,
version: t.Version,
rowFormat: t.Row_format,
rows: t.Rows,
createTime: t.Create_time,
updateTime: t.Update_time
}))
}
});
// Verify core tables were created using SHOW TABLES
const [showTables] = await connection.query('SHOW TABLES');
const existingTables = showTables.map(t => Object.values(t)[0]);
outputProgress({ outputProgress({
operation: 'Core tables verification', operation: 'Core tables verification',
@@ -359,22 +341,12 @@ async function resetDatabase() {
); );
} }
// Verify all core tables use InnoDB
const [engineStatus] = await connection.query('SHOW TABLE STATUS WHERE Name IN (?)', [CORE_TABLES]);
const nonInnoDBTables = engineStatus.filter(t => t.Engine !== 'InnoDB');
if (nonInnoDBTables.length > 0) {
throw new Error(
`Tables using non-InnoDB engine: ${nonInnoDBTables.map(t => t.Name).join(', ')}`
);
}
outputProgress({ outputProgress({
operation: 'Core tables created', operation: 'Core tables created',
message: `Successfully created tables: ${CORE_TABLES.join(', ')}` message: `Successfully created tables: ${CORE_TABLES.join(', ')}`
}); });
// Read and execute config schema // Now read and execute config schema (since core tables exist)
outputProgress({ outputProgress({
operation: 'Running config setup', operation: 'Running config setup',
message: 'Creating configuration tables...' message: 'Creating configuration tables...'
@@ -400,18 +372,7 @@ async function resetDatabase() {
for (let i = 0; i < configStatements.length; i++) { for (let i = 0; i < configStatements.length; i++) {
const stmt = configStatements[i]; const stmt = configStatements[i];
try { try {
const [result, fields] = await connection.query(stmt); const result = await client.query(stmt);
// Check for warnings
const [warnings] = await connection.query('SHOW WARNINGS');
if (warnings && warnings.length > 0) {
outputProgress({
status: 'warning',
operation: 'Config SQL Warning',
statement: i + 1,
warnings: warnings
});
}
outputProgress({ outputProgress({
operation: 'Config SQL Progress', operation: 'Config SQL Progress',
@@ -419,7 +380,7 @@ async function resetDatabase() {
statement: i + 1, statement: i + 1,
total: configStatements.length, total: configStatements.length,
preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : ''), preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : ''),
affectedRows: result.affectedRows rowCount: result.rowCount
} }
}); });
} catch (sqlError) { } catch (sqlError) {
@@ -427,8 +388,6 @@ async function resetDatabase() {
status: 'error', status: 'error',
operation: 'Config SQL Error', operation: 'Config SQL Error',
error: sqlError.message, error: sqlError.message,
sqlState: sqlError.sqlState,
errno: sqlError.errno,
statement: stmt, statement: stmt,
statementNumber: i + 1 statementNumber: i + 1
}); });
@@ -436,33 +395,6 @@ async function resetDatabase() {
} }
} }
// Verify config tables were created
const [showConfigTables] = await connection.query('SHOW TABLES');
const existingConfigTables = showConfigTables.map(t => Object.values(t)[0]);
outputProgress({
operation: 'Config tables verification',
message: {
found: existingConfigTables,
expected: CONFIG_TABLES
}
});
const missingConfigTables = CONFIG_TABLES.filter(
t => !existingConfigTables.includes(t)
);
if (missingConfigTables.length > 0) {
throw new Error(
`Failed to create config tables: ${missingConfigTables.join(', ')}`
);
}
outputProgress({
operation: 'Config tables created',
message: `Successfully created tables: ${CONFIG_TABLES.join(', ')}`
});
// Read and execute metrics schema (metrics tables) // Read and execute metrics schema (metrics tables)
outputProgress({ outputProgress({
operation: 'Running metrics setup', operation: 'Running metrics setup',
@@ -489,18 +421,7 @@ async function resetDatabase() {
for (let i = 0; i < metricsStatements.length; i++) { for (let i = 0; i < metricsStatements.length; i++) {
const stmt = metricsStatements[i]; const stmt = metricsStatements[i];
try { try {
const [result, fields] = await connection.query(stmt); const result = await client.query(stmt);
// Check for warnings
const [warnings] = await connection.query('SHOW WARNINGS');
if (warnings && warnings.length > 0) {
outputProgress({
status: 'warning',
operation: 'Metrics SQL Warning',
statement: i + 1,
warnings: warnings
});
}
outputProgress({ outputProgress({
operation: 'Metrics SQL Progress', operation: 'Metrics SQL Progress',
@@ -508,7 +429,7 @@ async function resetDatabase() {
statement: i + 1, statement: i + 1,
total: metricsStatements.length, total: metricsStatements.length,
preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : ''), preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : ''),
affectedRows: result.affectedRows rowCount: result.rowCount
} }
}); });
} catch (sqlError) { } catch (sqlError) {
@@ -516,8 +437,6 @@ async function resetDatabase() {
status: 'error', status: 'error',
operation: 'Metrics SQL Error', operation: 'Metrics SQL Error',
error: sqlError.message, error: sqlError.message,
sqlState: sqlError.sqlState,
errno: sqlError.errno,
statement: stmt, statement: stmt,
statementNumber: i + 1 statementNumber: i + 1
}); });
@@ -539,7 +458,7 @@ async function resetDatabase() {
}); });
process.exit(1); process.exit(1);
} finally { } finally {
await connection.end(); await client.end();
} }
} }

View File

@@ -1,4 +1,4 @@
const mysql = require('mysql2/promise'); const { Client } = require('pg');
const path = require('path'); const path = require('path');
const fs = require('fs'); const fs = require('fs');
require('dotenv').config({ path: path.resolve(__dirname, '../.env') }); require('dotenv').config({ path: path.resolve(__dirname, '../.env') });
@@ -8,7 +8,7 @@ const dbConfig = {
user: process.env.DB_USER, user: process.env.DB_USER,
password: process.env.DB_PASSWORD, password: process.env.DB_PASSWORD,
database: process.env.DB_NAME, database: process.env.DB_NAME,
multipleStatements: true port: process.env.DB_PORT || 5432
}; };
function outputProgress(data) { function outputProgress(data) {
@@ -34,8 +34,8 @@ const METRICS_TABLES = [
'sales_forecasts', 'sales_forecasts',
'temp_purchase_metrics', 'temp_purchase_metrics',
'temp_sales_metrics', 'temp_sales_metrics',
'vendor_metrics', //before vendor_details for foreign key 'vendor_metrics',
'vendor_time_metrics', //before vendor_details for foreign key 'vendor_time_metrics',
'vendor_details' 'vendor_details'
]; ];
@@ -90,31 +90,31 @@ function splitSQLStatements(sql) {
} }
async function resetMetrics() { async function resetMetrics() {
let connection; let client;
try { try {
outputProgress({ outputProgress({
operation: 'Starting metrics reset', operation: 'Starting metrics reset',
message: 'Connecting to database...' message: 'Connecting to database...'
}); });
connection = await mysql.createConnection(dbConfig); client = new Client(dbConfig);
await connection.beginTransaction(); await client.connect();
// First verify current state // First verify current state
const [initialTables] = await connection.query(` const initialTables = await client.query(`
SELECT TABLE_NAME as name SELECT tablename as name
FROM information_schema.tables FROM pg_tables
WHERE TABLE_SCHEMA = DATABASE() WHERE schemaname = 'public'
AND TABLE_NAME IN (?) AND tablename = ANY($1)
`, [METRICS_TABLES]); `, [METRICS_TABLES]);
outputProgress({ outputProgress({
operation: 'Initial state', operation: 'Initial state',
message: `Found ${initialTables.length} existing metrics tables: ${initialTables.map(t => t.name).join(', ')}` message: `Found ${initialTables.rows.length} existing metrics tables: ${initialTables.rows.map(t => t.name).join(', ')}`
}); });
// Disable foreign key checks at the start // Disable foreign key checks at the start
await connection.query('SET FOREIGN_KEY_CHECKS = 0'); await client.query('SET session_replication_role = \'replica\'');
// Drop all metrics tables in reverse order to handle dependencies // Drop all metrics tables in reverse order to handle dependencies
outputProgress({ outputProgress({
@@ -124,17 +124,17 @@ async function resetMetrics() {
for (const table of [...METRICS_TABLES].reverse()) { for (const table of [...METRICS_TABLES].reverse()) {
try { try {
await connection.query(`DROP TABLE IF EXISTS ${table}`); await client.query(`DROP TABLE IF EXISTS "${table}" CASCADE`);
// Verify the table was actually dropped // Verify the table was actually dropped
const [checkDrop] = await connection.query(` const checkDrop = await client.query(`
SELECT COUNT(*) as count SELECT COUNT(*) as count
FROM information_schema.tables FROM pg_tables
WHERE TABLE_SCHEMA = DATABASE() WHERE schemaname = 'public'
AND TABLE_NAME = ? AND tablename = $1
`, [table]); `, [table]);
if (checkDrop[0].count > 0) { if (parseInt(checkDrop.rows[0].count) > 0) {
throw new Error(`Failed to drop table ${table} - table still exists`); throw new Error(`Failed to drop table ${table} - table still exists`);
} }
@@ -153,15 +153,15 @@ async function resetMetrics() {
} }
// Verify all tables were dropped // Verify all tables were dropped
const [afterDrop] = await connection.query(` const afterDrop = await client.query(`
SELECT TABLE_NAME as name SELECT tablename as name
FROM information_schema.tables FROM pg_tables
WHERE TABLE_SCHEMA = DATABASE() WHERE schemaname = 'public'
AND TABLE_NAME IN (?) AND tablename = ANY($1)
`, [METRICS_TABLES]); `, [METRICS_TABLES]);
if (afterDrop.length > 0) { if (afterDrop.rows.length > 0) {
throw new Error(`Failed to drop all tables. Remaining tables: ${afterDrop.map(t => t.name).join(', ')}`); throw new Error(`Failed to drop all tables. Remaining tables: ${afterDrop.rows.map(t => t.name).join(', ')}`);
} }
// Read metrics schema // Read metrics schema
@@ -187,39 +187,26 @@ async function resetMetrics() {
for (let i = 0; i < statements.length; i++) { for (let i = 0; i < statements.length; i++) {
const stmt = statements[i]; const stmt = statements[i];
try { try {
await connection.query(stmt); const result = await client.query(stmt);
// Check for warnings
const [warnings] = await connection.query('SHOW WARNINGS');
if (warnings && warnings.length > 0) {
outputProgress({
status: 'warning',
operation: 'SQL Warning',
message: {
statement: i + 1,
warnings: warnings
}
});
}
// If this is a CREATE TABLE statement, verify the table was created // If this is a CREATE TABLE statement, verify the table was created
if (stmt.trim().toLowerCase().startsWith('create table')) { if (stmt.trim().toLowerCase().startsWith('create table')) {
const tableName = stmt.match(/create\s+table\s+(?:if\s+not\s+exists\s+)?`?(\w+)`?/i)?.[1]; const tableName = stmt.match(/create\s+table\s+(?:if\s+not\s+exists\s+)?["]?(\w+)["]?/i)?.[1];
if (tableName) { if (tableName) {
const [checkCreate] = await connection.query(` const checkCreate = await client.query(`
SELECT TABLE_NAME as name, CREATE_TIME as created SELECT tablename as name
FROM information_schema.tables FROM pg_tables
WHERE TABLE_SCHEMA = DATABASE() WHERE schemaname = 'public'
AND TABLE_NAME = ? AND tablename = $1
`, [tableName]); `, [tableName]);
if (checkCreate.length === 0) { if (checkCreate.rows.length === 0) {
throw new Error(`Failed to create table ${tableName} - table does not exist after CREATE statement`); throw new Error(`Failed to create table ${tableName} - table does not exist after CREATE statement`);
} }
outputProgress({ outputProgress({
operation: 'Table created', operation: 'Table created',
message: `Successfully created table: ${tableName} at ${checkCreate[0].created}` message: `Successfully created table: ${tableName}`
}); });
} }
} }
@@ -229,7 +216,8 @@ async function resetMetrics() {
message: { message: {
statement: i + 1, statement: i + 1,
total: statements.length, total: statements.length,
preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : '') preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : ''),
rowCount: result.rowCount
} }
}); });
} catch (sqlError) { } catch (sqlError) {
@@ -238,8 +226,6 @@ async function resetMetrics() {
operation: 'SQL Error', operation: 'SQL Error',
message: { message: {
error: sqlError.message, error: sqlError.message,
sqlState: sqlError.sqlState,
errno: sqlError.errno,
statement: stmt, statement: stmt,
statementNumber: i + 1 statementNumber: i + 1
} }
@@ -249,7 +235,7 @@ async function resetMetrics() {
} }
// Re-enable foreign key checks after all tables are created // Re-enable foreign key checks after all tables are created
await connection.query('SET FOREIGN_KEY_CHECKS = 1'); await client.query('SET session_replication_role = \'origin\'');
// Verify metrics tables were created // Verify metrics tables were created
outputProgress({ outputProgress({
@@ -257,37 +243,36 @@ async function resetMetrics() {
message: 'Checking all metrics tables were created...' message: 'Checking all metrics tables were created...'
}); });
const [metricsTablesResult] = await connection.query(` const metricsTablesResult = await client.query(`
SELECT SELECT tablename as name
TABLE_NAME as name, FROM pg_tables
TABLE_ROWS as \`rows\`, WHERE schemaname = 'public'
CREATE_TIME as created AND tablename = ANY($1)
FROM information_schema.tables
WHERE TABLE_SCHEMA = DATABASE()
AND TABLE_NAME IN (?)
`, [METRICS_TABLES]); `, [METRICS_TABLES]);
outputProgress({ outputProgress({
operation: 'Tables found', operation: 'Tables found',
message: `Found ${metricsTablesResult.length} tables: ${metricsTablesResult.map(t => message: `Found ${metricsTablesResult.rows.length} tables: ${metricsTablesResult.rows.map(t => t.name).join(', ')}`
`${t.name} (created: ${t.created})`
).join(', ')}`
}); });
const existingMetricsTables = metricsTablesResult.map(t => t.name); const existingMetricsTables = metricsTablesResult.rows.map(t => t.name);
const missingMetricsTables = METRICS_TABLES.filter(t => !existingMetricsTables.includes(t)); const missingMetricsTables = METRICS_TABLES.filter(t => !existingMetricsTables.includes(t));
if (missingMetricsTables.length > 0) { if (missingMetricsTables.length > 0) {
// Do one final check of the actual tables // Do one final check of the actual tables
const [finalCheck] = await connection.query('SHOW TABLES'); const finalCheck = await client.query(`
SELECT tablename as name
FROM pg_tables
WHERE schemaname = 'public'
`);
outputProgress({ outputProgress({
operation: 'Final table check', operation: 'Final table check',
message: `All database tables: ${finalCheck.map(t => Object.values(t)[0]).join(', ')}` message: `All database tables: ${finalCheck.rows.map(t => t.name).join(', ')}`
}); });
throw new Error(`Failed to create metrics tables: ${missingMetricsTables.join(', ')}`); throw new Error(`Failed to create metrics tables: ${missingMetricsTables.join(', ')}`);
} }
await connection.commit(); await client.query('COMMIT');
outputProgress({ outputProgress({
status: 'complete', status: 'complete',
@@ -302,17 +287,17 @@ async function resetMetrics() {
stack: error.stack stack: error.stack
}); });
if (connection) { if (client) {
await connection.rollback(); await client.query('ROLLBACK');
// Make sure to re-enable foreign key checks even if there's an error // Make sure to re-enable foreign key checks even if there's an error
await connection.query('SET FOREIGN_KEY_CHECKS = 1').catch(() => {}); await client.query('SET session_replication_role = \'origin\'').catch(() => {});
} }
throw error; throw error;
} finally { } finally {
if (connection) { if (client) {
// One final attempt to ensure foreign key checks are enabled // One final attempt to ensure foreign key checks are enabled
await connection.query('SET FOREIGN_KEY_CHECKS = 1').catch(() => {}); await client.query('SET session_replication_role = \'origin\'').catch(() => {});
await connection.end(); await client.end();
} }
} }
} }

View File

@@ -6,24 +6,24 @@ router.get('/stats', async (req, res) => {
try { try {
const pool = req.app.locals.pool; const pool = req.app.locals.pool;
const [results] = await pool.query(` const { rows: [results] } = await pool.query(`
SELECT SELECT
COALESCE( COALESCE(
ROUND( ROUND(
(SUM(o.price * o.quantity - p.cost_price * o.quantity) / (SUM(o.price * o.quantity - p.cost_price * o.quantity) /
NULLIF(SUM(o.price * o.quantity), 0)) * 100, 1 NULLIF(SUM(o.price * o.quantity), 0) * 100)::numeric, 1
), ),
0 0
) as profitMargin, ) as profitMargin,
COALESCE( COALESCE(
ROUND( ROUND(
(AVG(p.price / NULLIF(p.cost_price, 0) - 1) * 100), 1 (AVG(p.price / NULLIF(p.cost_price, 0) - 1) * 100)::numeric, 1
), ),
0 0
) as averageMarkup, ) as averageMarkup,
COALESCE( COALESCE(
ROUND( ROUND(
SUM(o.quantity) / NULLIF(AVG(p.stock_quantity), 0), 2 (SUM(o.quantity) / NULLIF(AVG(p.stock_quantity), 0))::numeric, 2
), ),
0 0
) as stockTurnoverRate, ) as stockTurnoverRate,
@@ -31,23 +31,23 @@ router.get('/stats', async (req, res) => {
COALESCE(COUNT(DISTINCT p.categories), 0) as categoryCount, COALESCE(COUNT(DISTINCT p.categories), 0) as categoryCount,
COALESCE( COALESCE(
ROUND( ROUND(
AVG(o.price * o.quantity), 2 AVG(o.price * o.quantity)::numeric, 2
), ),
0 0
) as averageOrderValue ) as averageOrderValue
FROM products p FROM products p
LEFT JOIN orders o ON p.pid = o.pid LEFT JOIN orders o ON p.pid = o.pid
WHERE o.date >= DATE_SUB(CURDATE(), INTERVAL 30 DAY) WHERE o.date >= CURRENT_DATE - INTERVAL '30 days'
`); `);
// Ensure all values are numbers // Ensure all values are numbers
const stats = { const stats = {
profitMargin: Number(results[0].profitMargin) || 0, profitMargin: Number(results.profitmargin) || 0,
averageMarkup: Number(results[0].averageMarkup) || 0, averageMarkup: Number(results.averagemarkup) || 0,
stockTurnoverRate: Number(results[0].stockTurnoverRate) || 0, stockTurnoverRate: Number(results.stockturnoverrate) || 0,
vendorCount: Number(results[0].vendorCount) || 0, vendorCount: Number(results.vendorcount) || 0,
categoryCount: Number(results[0].categoryCount) || 0, categoryCount: Number(results.categorycount) || 0,
averageOrderValue: Number(results[0].averageOrderValue) || 0 averageOrderValue: Number(results.averageordervalue) || 0
}; };
res.json(stats); res.json(stats);
@@ -63,13 +63,13 @@ router.get('/profit', async (req, res) => {
const pool = req.app.locals.pool; const pool = req.app.locals.pool;
// Get profit margins by category with full path // Get profit margins by category with full path
const [byCategory] = await pool.query(` const { rows: byCategory } = await pool.query(`
WITH RECURSIVE category_path AS ( WITH RECURSIVE category_path AS (
SELECT SELECT
c.cat_id, c.cat_id,
c.name, c.name,
c.parent_id, c.parent_id,
CAST(c.name AS CHAR(1000)) as path c.name::text as path
FROM categories c FROM categories c
WHERE c.parent_id IS NULL WHERE c.parent_id IS NULL
@@ -79,7 +79,7 @@ router.get('/profit', async (req, res) => {
c.cat_id, c.cat_id,
c.name, c.name,
c.parent_id, c.parent_id,
CONCAT(cp.path, ' > ', c.name) cp.path || ' > ' || c.name
FROM categories c FROM categories c
JOIN category_path cp ON c.parent_id = cp.cat_id JOIN category_path cp ON c.parent_id = cp.cat_id
) )
@@ -88,53 +88,46 @@ router.get('/profit', async (req, res) => {
cp.path as categoryPath, cp.path as categoryPath,
ROUND( ROUND(
(SUM(o.price * o.quantity - p.cost_price * o.quantity) / (SUM(o.price * o.quantity - p.cost_price * o.quantity) /
NULLIF(SUM(o.price * o.quantity), 0)) * 100, 1 NULLIF(SUM(o.price * o.quantity), 0) * 100)::numeric, 1
) as profitMargin, ) as profitMargin,
CAST(SUM(o.price * o.quantity) AS DECIMAL(15,3)) as revenue, ROUND(SUM(o.price * o.quantity)::numeric, 3) as revenue,
CAST(SUM(p.cost_price * o.quantity) AS DECIMAL(15,3)) as cost ROUND(SUM(p.cost_price * o.quantity)::numeric, 3) as cost
FROM products p FROM products p
LEFT JOIN orders o ON p.pid = o.pid LEFT JOIN orders o ON p.pid = o.pid
JOIN product_categories pc ON p.pid = pc.pid JOIN product_categories pc ON p.pid = pc.pid
JOIN categories c ON pc.cat_id = c.cat_id JOIN categories c ON pc.cat_id = c.cat_id
JOIN category_path cp ON c.cat_id = cp.cat_id JOIN category_path cp ON c.cat_id = cp.cat_id
WHERE o.date >= DATE_SUB(CURDATE(), INTERVAL 30 DAY) WHERE o.date >= CURRENT_DATE - INTERVAL '30 days'
GROUP BY c.name, cp.path GROUP BY c.name, cp.path
ORDER BY profitMargin DESC ORDER BY profitMargin DESC
LIMIT 10 LIMIT 10
`); `);
// Get profit margin trend over time // Get profit margin trend over time
const [overTime] = await pool.query(` const { rows: overTime } = await pool.query(`
SELECT SELECT
formatted_date as date, to_char(o.date, 'YYYY-MM-DD') as date,
ROUND( ROUND(
(SUM(o.price * o.quantity - p.cost_price * o.quantity) / (SUM(o.price * o.quantity - p.cost_price * o.quantity) /
NULLIF(SUM(o.price * o.quantity), 0)) * 100, 1 NULLIF(SUM(o.price * o.quantity), 0) * 100)::numeric, 1
) as profitMargin, ) as profitMargin,
CAST(SUM(o.price * o.quantity) AS DECIMAL(15,3)) as revenue, ROUND(SUM(o.price * o.quantity)::numeric, 3) as revenue,
CAST(SUM(p.cost_price * o.quantity) AS DECIMAL(15,3)) as cost ROUND(SUM(p.cost_price * o.quantity)::numeric, 3) as cost
FROM products p FROM products p
LEFT JOIN orders o ON p.pid = o.pid LEFT JOIN orders o ON p.pid = o.pid
CROSS JOIN ( WHERE o.date >= CURRENT_DATE - INTERVAL '30 days'
SELECT DATE_FORMAT(o.date, '%Y-%m-%d') as formatted_date GROUP BY to_char(o.date, 'YYYY-MM-DD')
FROM orders o ORDER BY date
WHERE o.date >= DATE_SUB(CURDATE(), INTERVAL 30 DAY)
GROUP BY DATE_FORMAT(o.date, '%Y-%m-%d')
) dates
WHERE o.date >= DATE_SUB(CURDATE(), INTERVAL 30 DAY)
AND DATE_FORMAT(o.date, '%Y-%m-%d') = dates.formatted_date
GROUP BY formatted_date
ORDER BY formatted_date
`); `);
// Get top performing products with category paths // Get top performing products with category paths
const [topProducts] = await pool.query(` const { rows: topProducts } = await pool.query(`
WITH RECURSIVE category_path AS ( WITH RECURSIVE category_path AS (
SELECT SELECT
c.cat_id, c.cat_id,
c.name, c.name,
c.parent_id, c.parent_id,
CAST(c.name AS CHAR(1000)) as path c.name::text as path
FROM categories c FROM categories c
WHERE c.parent_id IS NULL WHERE c.parent_id IS NULL
@@ -144,7 +137,7 @@ router.get('/profit', async (req, res) => {
c.cat_id, c.cat_id,
c.name, c.name,
c.parent_id, c.parent_id,
CONCAT(cp.path, ' > ', c.name) cp.path || ' > ' || c.name
FROM categories c FROM categories c
JOIN category_path cp ON c.parent_id = cp.cat_id JOIN category_path cp ON c.parent_id = cp.cat_id
) )
@@ -154,18 +147,18 @@ router.get('/profit', async (req, res) => {
cp.path as categoryPath, cp.path as categoryPath,
ROUND( ROUND(
(SUM(o.price * o.quantity - p.cost_price * o.quantity) / (SUM(o.price * o.quantity - p.cost_price * o.quantity) /
NULLIF(SUM(o.price * o.quantity), 0)) * 100, 1 NULLIF(SUM(o.price * o.quantity), 0) * 100)::numeric, 1
) as profitMargin, ) as profitMargin,
CAST(SUM(o.price * o.quantity) AS DECIMAL(15,3)) as revenue, ROUND(SUM(o.price * o.quantity)::numeric, 3) as revenue,
CAST(SUM(p.cost_price * o.quantity) AS DECIMAL(15,3)) as cost ROUND(SUM(p.cost_price * o.quantity)::numeric, 3) as cost
FROM products p FROM products p
LEFT JOIN orders o ON p.pid = o.pid LEFT JOIN orders o ON p.pid = o.pid
JOIN product_categories pc ON p.pid = pc.pid JOIN product_categories pc ON p.pid = pc.pid
JOIN categories c ON pc.cat_id = c.cat_id JOIN categories c ON pc.cat_id = c.cat_id
JOIN category_path cp ON c.cat_id = cp.cat_id JOIN category_path cp ON c.cat_id = cp.cat_id
WHERE o.date >= DATE_SUB(CURDATE(), INTERVAL 30 DAY) WHERE o.date >= CURRENT_DATE - INTERVAL '30 days'
GROUP BY p.pid, p.title, c.name, cp.path GROUP BY p.pid, p.title, c.name, cp.path
HAVING revenue > 0 HAVING SUM(o.price * o.quantity) > 0
ORDER BY profitMargin DESC ORDER BY profitMargin DESC
LIMIT 10 LIMIT 10
`); `);
@@ -185,7 +178,7 @@ router.get('/vendors', async (req, res) => {
console.log('Fetching vendor performance data...'); console.log('Fetching vendor performance data...');
// First check if we have any vendors with sales // First check if we have any vendors with sales
const [checkData] = await pool.query(` const { rows: [checkData] } = await pool.query(`
SELECT COUNT(DISTINCT p.vendor) as vendor_count, SELECT COUNT(DISTINCT p.vendor) as vendor_count,
COUNT(DISTINCT o.order_number) as order_count COUNT(DISTINCT o.order_number) as order_count
FROM products p FROM products p
@@ -193,39 +186,39 @@ router.get('/vendors', async (req, res) => {
WHERE p.vendor IS NOT NULL WHERE p.vendor IS NOT NULL
`); `);
console.log('Vendor data check:', checkData[0]); console.log('Vendor data check:', checkData);
// Get vendor performance metrics // Get vendor performance metrics
const [performance] = await pool.query(` const { rows: performance } = await pool.query(`
WITH monthly_sales AS ( WITH monthly_sales AS (
SELECT SELECT
p.vendor, p.vendor,
CAST(SUM(CASE ROUND(SUM(CASE
WHEN o.date >= DATE_SUB(CURDATE(), INTERVAL 30 DAY) WHEN o.date >= CURRENT_DATE - INTERVAL '30 days'
THEN o.price * o.quantity THEN o.price * o.quantity
ELSE 0 ELSE 0
END) AS DECIMAL(15,3)) as current_month, END)::numeric, 3) as current_month,
CAST(SUM(CASE ROUND(SUM(CASE
WHEN o.date >= DATE_SUB(CURDATE(), INTERVAL 60 DAY) WHEN o.date >= CURRENT_DATE - INTERVAL '60 days'
AND o.date < DATE_SUB(CURDATE(), INTERVAL 30 DAY) AND o.date < CURRENT_DATE - INTERVAL '30 days'
THEN o.price * o.quantity THEN o.price * o.quantity
ELSE 0 ELSE 0
END) AS DECIMAL(15,3)) as previous_month END)::numeric, 3) as previous_month
FROM products p FROM products p
LEFT JOIN orders o ON p.pid = o.pid LEFT JOIN orders o ON p.pid = o.pid
WHERE p.vendor IS NOT NULL WHERE p.vendor IS NOT NULL
AND o.date >= DATE_SUB(CURDATE(), INTERVAL 60 DAY) AND o.date >= CURRENT_DATE - INTERVAL '60 days'
GROUP BY p.vendor GROUP BY p.vendor
) )
SELECT SELECT
p.vendor, p.vendor,
CAST(SUM(o.price * o.quantity) AS DECIMAL(15,3)) as salesVolume, ROUND(SUM(o.price * o.quantity)::numeric, 3) as salesVolume,
COALESCE(ROUND( COALESCE(ROUND(
(SUM(o.price * o.quantity - p.cost_price * o.quantity) / (SUM(o.price * o.quantity - p.cost_price * o.quantity) /
NULLIF(SUM(o.price * o.quantity), 0)) * 100, 1 NULLIF(SUM(o.price * o.quantity), 0) * 100)::numeric, 1
), 0) as profitMargin, ), 0) as profitMargin,
COALESCE(ROUND( COALESCE(ROUND(
SUM(o.quantity) / NULLIF(AVG(p.stock_quantity), 0), 1 (SUM(o.quantity) / NULLIF(AVG(p.stock_quantity), 0))::numeric, 1
), 0) as stockTurnover, ), 0) as stockTurnover,
COUNT(DISTINCT p.pid) as productCount, COUNT(DISTINCT p.pid) as productCount,
ROUND( ROUND(
@@ -236,7 +229,7 @@ router.get('/vendors', async (req, res) => {
LEFT JOIN orders o ON p.pid = o.pid LEFT JOIN orders o ON p.pid = o.pid
LEFT JOIN monthly_sales ms ON p.vendor = ms.vendor LEFT JOIN monthly_sales ms ON p.vendor = ms.vendor
WHERE p.vendor IS NOT NULL WHERE p.vendor IS NOT NULL
AND o.date >= DATE_SUB(CURDATE(), INTERVAL 30 DAY) AND o.date >= CURRENT_DATE - INTERVAL '30 days'
GROUP BY p.vendor, ms.current_month, ms.previous_month GROUP BY p.vendor, ms.current_month, ms.previous_month
ORDER BY salesVolume DESC ORDER BY salesVolume DESC
LIMIT 10 LIMIT 10
@@ -244,45 +237,7 @@ router.get('/vendors', async (req, res) => {
console.log('Performance data:', performance); console.log('Performance data:', performance);
// Get vendor comparison data res.json({ performance });
const [comparison] = await pool.query(`
SELECT
p.vendor,
CAST(COALESCE(ROUND(SUM(o.price * o.quantity) / NULLIF(COUNT(DISTINCT p.pid), 0), 2), 0) AS DECIMAL(15,3)) as salesPerProduct,
COALESCE(ROUND(AVG((o.price - p.cost_price) / NULLIF(o.price, 0) * 100), 1), 0) as averageMargin,
COUNT(DISTINCT p.pid) as size
FROM products p
LEFT JOIN orders o ON p.pid = o.pid AND o.date >= DATE_SUB(CURDATE(), INTERVAL 30 DAY)
WHERE p.vendor IS NOT NULL
GROUP BY p.vendor
ORDER BY salesPerProduct DESC
LIMIT 20
`);
console.log('Comparison data:', comparison);
// Get vendor sales trends
const [trends] = await pool.query(`
SELECT
p.vendor,
DATE_FORMAT(o.date, '%b %Y') as month,
CAST(COALESCE(SUM(o.price * o.quantity), 0) AS DECIMAL(15,3)) as sales
FROM products p
LEFT JOIN orders o ON p.pid = o.pid
WHERE p.vendor IS NOT NULL
AND o.date >= DATE_SUB(CURDATE(), INTERVAL 6 MONTH)
GROUP BY
p.vendor,
DATE_FORMAT(o.date, '%b %Y'),
DATE_FORMAT(o.date, '%Y-%m')
ORDER BY
p.vendor,
DATE_FORMAT(o.date, '%Y-%m')
`);
console.log('Trends data:', trends);
res.json({ performance, comparison, trends });
} catch (error) { } catch (error) {
console.error('Error fetching vendor performance:', error); console.error('Error fetching vendor performance:', error);
res.status(500).json({ error: 'Failed to fetch vendor performance' }); res.status(500).json({ error: 'Failed to fetch vendor performance' });

View File

@@ -6,7 +6,7 @@ router.get('/', async (req, res) => {
const pool = req.app.locals.pool; const pool = req.app.locals.pool;
try { try {
// Get all categories with metrics and hierarchy info // Get all categories with metrics and hierarchy info
const [categories] = await pool.query(` const { rows: categories } = await pool.query(`
SELECT SELECT
c.cat_id, c.cat_id,
c.name, c.name,
@@ -18,7 +18,7 @@ router.get('/', async (req, res) => {
p.type as parent_type, p.type as parent_type,
COALESCE(cm.product_count, 0) as product_count, COALESCE(cm.product_count, 0) as product_count,
COALESCE(cm.active_products, 0) as active_products, COALESCE(cm.active_products, 0) as active_products,
CAST(COALESCE(cm.total_value, 0) AS DECIMAL(15,3)) as total_value, ROUND(COALESCE(cm.total_value, 0)::numeric, 3) as total_value,
COALESCE(cm.avg_margin, 0) as avg_margin, COALESCE(cm.avg_margin, 0) as avg_margin,
COALESCE(cm.turnover_rate, 0) as turnover_rate, COALESCE(cm.turnover_rate, 0) as turnover_rate,
COALESCE(cm.growth_rate, 0) as growth_rate COALESCE(cm.growth_rate, 0) as growth_rate
@@ -39,22 +39,22 @@ router.get('/', async (req, res) => {
`); `);
// Get overall stats // Get overall stats
const [stats] = await pool.query(` const { rows: [stats] } = await pool.query(`
SELECT SELECT
COUNT(DISTINCT c.cat_id) as totalCategories, COUNT(DISTINCT c.cat_id) as totalCategories,
COUNT(DISTINCT CASE WHEN c.status = 'active' THEN c.cat_id END) as activeCategories, COUNT(DISTINCT CASE WHEN c.status = 'active' THEN c.cat_id END) as activeCategories,
CAST(COALESCE(SUM(cm.total_value), 0) AS DECIMAL(15,3)) as totalValue, ROUND(COALESCE(SUM(cm.total_value), 0)::numeric, 3) as totalValue,
COALESCE(ROUND(AVG(NULLIF(cm.avg_margin, 0)), 1), 0) as avgMargin, COALESCE(ROUND(AVG(NULLIF(cm.avg_margin, 0))::numeric, 1), 0) as avgMargin,
COALESCE(ROUND(AVG(NULLIF(cm.growth_rate, 0)), 1), 0) as avgGrowth COALESCE(ROUND(AVG(NULLIF(cm.growth_rate, 0))::numeric, 1), 0) as avgGrowth
FROM categories c FROM categories c
LEFT JOIN category_metrics cm ON c.cat_id = cm.category_id LEFT JOIN category_metrics cm ON c.cat_id = cm.category_id
`); `);
// Get type counts for filtering // Get type counts for filtering
const [typeCounts] = await pool.query(` const { rows: typeCounts } = await pool.query(`
SELECT SELECT
type, type,
COUNT(*) as count COUNT(*)::integer as count
FROM categories FROM categories
GROUP BY type GROUP BY type
ORDER BY type ORDER BY type
@@ -81,14 +81,14 @@ router.get('/', async (req, res) => {
})), })),
typeCounts: typeCounts.map(tc => ({ typeCounts: typeCounts.map(tc => ({
type: tc.type, type: tc.type,
count: parseInt(tc.count) count: tc.count // Already cast to integer in the query
})), })),
stats: { stats: {
totalCategories: parseInt(stats[0].totalCategories), totalCategories: parseInt(stats.totalcategories),
activeCategories: parseInt(stats[0].activeCategories), activeCategories: parseInt(stats.activecategories),
totalValue: parseFloat(stats[0].totalValue), totalValue: parseFloat(stats.totalvalue),
avgMargin: parseFloat(stats[0].avgMargin), avgMargin: parseFloat(stats.avgmargin),
avgGrowth: parseFloat(stats[0].avgGrowth) avgGrowth: parseFloat(stats.avggrowth)
} }
}); });
} catch (error) { } catch (error) {

View File

@@ -13,22 +13,22 @@ router.get('/', async (req, res) => {
try { try {
console.log('[Config Route] Fetching configuration values...'); console.log('[Config Route] Fetching configuration values...');
const [stockThresholds] = await pool.query('SELECT * FROM stock_thresholds WHERE id = 1'); const { rows: stockThresholds } = await pool.query('SELECT * FROM stock_thresholds WHERE id = 1');
console.log('[Config Route] Stock thresholds:', stockThresholds); console.log('[Config Route] Stock thresholds:', stockThresholds);
const [leadTimeThresholds] = await pool.query('SELECT * FROM lead_time_thresholds WHERE id = 1'); const { rows: leadTimeThresholds } = await pool.query('SELECT * FROM lead_time_thresholds WHERE id = 1');
console.log('[Config Route] Lead time thresholds:', leadTimeThresholds); console.log('[Config Route] Lead time thresholds:', leadTimeThresholds);
const [salesVelocityConfig] = await pool.query('SELECT * FROM sales_velocity_config WHERE id = 1'); const { rows: salesVelocityConfig } = await pool.query('SELECT * FROM sales_velocity_config WHERE id = 1');
console.log('[Config Route] Sales velocity config:', salesVelocityConfig); console.log('[Config Route] Sales velocity config:', salesVelocityConfig);
const [abcConfig] = await pool.query('SELECT * FROM abc_classification_config WHERE id = 1'); const { rows: abcConfig } = await pool.query('SELECT * FROM abc_classification_config WHERE id = 1');
console.log('[Config Route] ABC config:', abcConfig); console.log('[Config Route] ABC config:', abcConfig);
const [safetyStockConfig] = await pool.query('SELECT * FROM safety_stock_config WHERE id = 1'); const { rows: safetyStockConfig } = await pool.query('SELECT * FROM safety_stock_config WHERE id = 1');
console.log('[Config Route] Safety stock config:', safetyStockConfig); console.log('[Config Route] Safety stock config:', safetyStockConfig);
const [turnoverConfig] = await pool.query('SELECT * FROM turnover_config WHERE id = 1'); const { rows: turnoverConfig } = await pool.query('SELECT * FROM turnover_config WHERE id = 1');
console.log('[Config Route] Turnover config:', turnoverConfig); console.log('[Config Route] Turnover config:', turnoverConfig);
const response = { const response = {
@@ -53,14 +53,14 @@ router.put('/stock-thresholds/:id', async (req, res) => {
const pool = req.app.locals.pool; const pool = req.app.locals.pool;
try { try {
const { critical_days, reorder_days, overstock_days, low_stock_threshold, min_reorder_quantity } = req.body; const { critical_days, reorder_days, overstock_days, low_stock_threshold, min_reorder_quantity } = req.body;
const [result] = await pool.query( const { rows } = await pool.query(
`UPDATE stock_thresholds `UPDATE stock_thresholds
SET critical_days = ?, SET critical_days = $1,
reorder_days = ?, reorder_days = $2,
overstock_days = ?, overstock_days = $3,
low_stock_threshold = ?, low_stock_threshold = $4,
min_reorder_quantity = ? min_reorder_quantity = $5
WHERE id = ?`, WHERE id = $6`,
[critical_days, reorder_days, overstock_days, low_stock_threshold, min_reorder_quantity, req.params.id] [critical_days, reorder_days, overstock_days, low_stock_threshold, min_reorder_quantity, req.params.id]
); );
res.json({ success: true }); res.json({ success: true });
@@ -75,12 +75,12 @@ router.put('/lead-time-thresholds/:id', async (req, res) => {
const pool = req.app.locals.pool; const pool = req.app.locals.pool;
try { try {
const { target_days, warning_days, critical_days } = req.body; const { target_days, warning_days, critical_days } = req.body;
const [result] = await pool.query( const { rows } = await pool.query(
`UPDATE lead_time_thresholds `UPDATE lead_time_thresholds
SET target_days = ?, SET target_days = $1,
warning_days = ?, warning_days = $2,
critical_days = ? critical_days = $3
WHERE id = ?`, WHERE id = $4`,
[target_days, warning_days, critical_days, req.params.id] [target_days, warning_days, critical_days, req.params.id]
); );
res.json({ success: true }); res.json({ success: true });
@@ -95,12 +95,12 @@ router.put('/sales-velocity/:id', async (req, res) => {
const pool = req.app.locals.pool; const pool = req.app.locals.pool;
try { try {
const { daily_window_days, weekly_window_days, monthly_window_days } = req.body; const { daily_window_days, weekly_window_days, monthly_window_days } = req.body;
const [result] = await pool.query( const { rows } = await pool.query(
`UPDATE sales_velocity_config `UPDATE sales_velocity_config
SET daily_window_days = ?, SET daily_window_days = $1,
weekly_window_days = ?, weekly_window_days = $2,
monthly_window_days = ? monthly_window_days = $3
WHERE id = ?`, WHERE id = $4`,
[daily_window_days, weekly_window_days, monthly_window_days, req.params.id] [daily_window_days, weekly_window_days, monthly_window_days, req.params.id]
); );
res.json({ success: true }); res.json({ success: true });
@@ -115,12 +115,12 @@ router.put('/abc-classification/:id', async (req, res) => {
const pool = req.app.locals.pool; const pool = req.app.locals.pool;
try { try {
const { a_threshold, b_threshold, classification_period_days } = req.body; const { a_threshold, b_threshold, classification_period_days } = req.body;
const [result] = await pool.query( const { rows } = await pool.query(
`UPDATE abc_classification_config `UPDATE abc_classification_config
SET a_threshold = ?, SET a_threshold = $1,
b_threshold = ?, b_threshold = $2,
classification_period_days = ? classification_period_days = $3
WHERE id = ?`, WHERE id = $4`,
[a_threshold, b_threshold, classification_period_days, req.params.id] [a_threshold, b_threshold, classification_period_days, req.params.id]
); );
res.json({ success: true }); res.json({ success: true });
@@ -135,11 +135,11 @@ router.put('/safety-stock/:id', async (req, res) => {
const pool = req.app.locals.pool; const pool = req.app.locals.pool;
try { try {
const { coverage_days, service_level } = req.body; const { coverage_days, service_level } = req.body;
const [result] = await pool.query( const { rows } = await pool.query(
`UPDATE safety_stock_config `UPDATE safety_stock_config
SET coverage_days = ?, SET coverage_days = $1,
service_level = ? service_level = $2
WHERE id = ?`, WHERE id = $3`,
[coverage_days, service_level, req.params.id] [coverage_days, service_level, req.params.id]
); );
res.json({ success: true }); res.json({ success: true });
@@ -154,11 +154,11 @@ router.put('/turnover/:id', async (req, res) => {
const pool = req.app.locals.pool; const pool = req.app.locals.pool;
try { try {
const { calculation_period_days, target_rate } = req.body; const { calculation_period_days, target_rate } = req.body;
const [result] = await pool.query( const { rows } = await pool.query(
`UPDATE turnover_config `UPDATE turnover_config
SET calculation_period_days = ?, SET calculation_period_days = $1,
target_rate = ? target_rate = $2
WHERE id = ?`, WHERE id = $3`,
[calculation_period_days, target_rate, req.params.id] [calculation_period_days, target_rate, req.params.id]
); );
res.json({ success: true }); res.json({ success: true });

View File

@@ -750,8 +750,16 @@ router.post('/full-reset', async (req, res) => {
router.get('/history/import', async (req, res) => { router.get('/history/import', async (req, res) => {
try { try {
const pool = req.app.locals.pool; const pool = req.app.locals.pool;
const [rows] = await pool.query(` const { rows } = await pool.query(`
SELECT * FROM import_history SELECT
id,
start_time,
end_time,
status,
error_message,
rows_processed::integer,
files_processed::integer
FROM import_history
ORDER BY start_time DESC ORDER BY start_time DESC
LIMIT 20 LIMIT 20
`); `);
@@ -766,8 +774,16 @@ router.get('/history/import', async (req, res) => {
router.get('/history/calculate', async (req, res) => { router.get('/history/calculate', async (req, res) => {
try { try {
const pool = req.app.locals.pool; const pool = req.app.locals.pool;
const [rows] = await pool.query(` const { rows } = await pool.query(`
SELECT * FROM calculate_history SELECT
id,
start_time,
end_time,
status,
error_message,
modules_processed::integer,
total_modules::integer
FROM calculate_history
ORDER BY start_time DESC ORDER BY start_time DESC
LIMIT 20 LIMIT 20
`); `);
@@ -782,8 +798,10 @@ router.get('/history/calculate', async (req, res) => {
router.get('/status/modules', async (req, res) => { router.get('/status/modules', async (req, res) => {
try { try {
const pool = req.app.locals.pool; const pool = req.app.locals.pool;
const [rows] = await pool.query(` const { rows } = await pool.query(`
SELECT module_name, last_calculation_timestamp SELECT
module_name,
last_calculation_timestamp::timestamp
FROM calculate_status FROM calculate_status
ORDER BY module_name ORDER BY module_name
`); `);
@@ -798,8 +816,10 @@ router.get('/status/modules', async (req, res) => {
router.get('/status/tables', async (req, res) => { router.get('/status/tables', async (req, res) => {
try { try {
const pool = req.app.locals.pool; const pool = req.app.locals.pool;
const [rows] = await pool.query(` const { rows } = await pool.query(`
SELECT table_name, last_sync_timestamp SELECT
table_name,
last_sync_timestamp::timestamp
FROM sync_status FROM sync_status
ORDER BY table_name ORDER BY table_name
`); `);

View File

@@ -19,16 +19,15 @@ async function executeQuery(sql, params = []) {
router.get('/stock/metrics', async (req, res) => { router.get('/stock/metrics', async (req, res) => {
try { try {
// Get stock metrics // Get stock metrics
const [rows] = await executeQuery(` const { rows: [stockMetrics] } = await executeQuery(`
SELECT SELECT
COALESCE(COUNT(*), 0) as total_products, COALESCE(COUNT(*), 0)::integer as total_products,
COALESCE(COUNT(CASE WHEN stock_quantity > 0 THEN 1 END), 0) as products_in_stock, COALESCE(COUNT(CASE WHEN stock_quantity > 0 THEN 1 END), 0)::integer as products_in_stock,
COALESCE(SUM(CASE WHEN stock_quantity > 0 THEN stock_quantity END), 0) as total_units, COALESCE(SUM(CASE WHEN stock_quantity > 0 THEN stock_quantity END), 0)::integer as total_units,
COALESCE(SUM(CASE WHEN stock_quantity > 0 THEN stock_quantity * cost_price END), 0) as total_cost, ROUND(COALESCE(SUM(CASE WHEN stock_quantity > 0 THEN stock_quantity * cost_price END), 0)::numeric, 3) as total_cost,
COALESCE(SUM(CASE WHEN stock_quantity > 0 THEN stock_quantity * price END), 0) as total_retail ROUND(COALESCE(SUM(CASE WHEN stock_quantity > 0 THEN stock_quantity * price END), 0)::numeric, 3) as total_retail
FROM products FROM products
`); `);
const stockMetrics = rows[0];
console.log('Raw stockMetrics from database:', stockMetrics); console.log('Raw stockMetrics from database:', stockMetrics);
console.log('stockMetrics.total_products:', stockMetrics.total_products); console.log('stockMetrics.total_products:', stockMetrics.total_products);
@@ -38,26 +37,26 @@ router.get('/stock/metrics', async (req, res) => {
console.log('stockMetrics.total_retail:', stockMetrics.total_retail); console.log('stockMetrics.total_retail:', stockMetrics.total_retail);
// Get brand stock values with Other category // Get brand stock values with Other category
const [brandValues] = await executeQuery(` const { rows: brandValues } = await executeQuery(`
WITH brand_totals AS ( WITH brand_totals AS (
SELECT SELECT
COALESCE(brand, 'Unbranded') as brand, COALESCE(brand, 'Unbranded') as brand,
COUNT(DISTINCT pid) as variant_count, COUNT(DISTINCT pid)::integer as variant_count,
COALESCE(SUM(stock_quantity), 0) as stock_units, COALESCE(SUM(stock_quantity), 0)::integer as stock_units,
CAST(COALESCE(SUM(stock_quantity * cost_price), 0) AS DECIMAL(15,3)) as stock_cost, ROUND(COALESCE(SUM(stock_quantity * cost_price), 0)::numeric, 3) as stock_cost,
CAST(COALESCE(SUM(stock_quantity * price), 0) AS DECIMAL(15,3)) as stock_retail ROUND(COALESCE(SUM(stock_quantity * price), 0)::numeric, 3) as stock_retail
FROM products FROM products
WHERE stock_quantity > 0 WHERE stock_quantity > 0
GROUP BY COALESCE(brand, 'Unbranded') GROUP BY COALESCE(brand, 'Unbranded')
HAVING stock_cost > 0 HAVING ROUND(COALESCE(SUM(stock_quantity * cost_price), 0)::numeric, 3) > 0
), ),
other_brands AS ( other_brands AS (
SELECT SELECT
'Other' as brand, 'Other' as brand,
SUM(variant_count) as variant_count, SUM(variant_count)::integer as variant_count,
SUM(stock_units) as stock_units, SUM(stock_units)::integer as stock_units,
CAST(SUM(stock_cost) AS DECIMAL(15,3)) as stock_cost, ROUND(SUM(stock_cost)::numeric, 3) as stock_cost,
CAST(SUM(stock_retail) AS DECIMAL(15,3)) as stock_retail ROUND(SUM(stock_retail)::numeric, 3) as stock_retail
FROM brand_totals FROM brand_totals
WHERE stock_cost <= 5000 WHERE stock_cost <= 5000
), ),
@@ -101,51 +100,50 @@ router.get('/stock/metrics', async (req, res) => {
// Returns purchase order metrics by vendor // Returns purchase order metrics by vendor
router.get('/purchase/metrics', async (req, res) => { router.get('/purchase/metrics', async (req, res) => {
try { try {
const [rows] = await executeQuery(` const { rows: [poMetrics] } = await executeQuery(`
SELECT SELECT
COALESCE(COUNT(DISTINCT CASE COALESCE(COUNT(DISTINCT CASE
WHEN po.receiving_status < ${ReceivingStatus.PartialReceived} WHEN po.receiving_status < $1
THEN po.po_id THEN po.po_id
END), 0) as active_pos, END), 0)::integer as active_pos,
COALESCE(COUNT(DISTINCT CASE COALESCE(COUNT(DISTINCT CASE
WHEN po.receiving_status < ${ReceivingStatus.PartialReceived} WHEN po.receiving_status < $1
AND po.expected_date < CURDATE() AND po.expected_date < CURRENT_DATE
THEN po.po_id THEN po.po_id
END), 0) as overdue_pos, END), 0)::integer as overdue_pos,
COALESCE(SUM(CASE COALESCE(SUM(CASE
WHEN po.receiving_status < ${ReceivingStatus.PartialReceived} WHEN po.receiving_status < $1
THEN po.ordered THEN po.ordered
ELSE 0 ELSE 0
END), 0) as total_units, END), 0)::integer as total_units,
CAST(COALESCE(SUM(CASE ROUND(COALESCE(SUM(CASE
WHEN po.receiving_status < ${ReceivingStatus.PartialReceived} WHEN po.receiving_status < $1
THEN po.ordered * po.cost_price THEN po.ordered * po.cost_price
ELSE 0 ELSE 0
END), 0) AS DECIMAL(15,3)) as total_cost, END), 0)::numeric, 3) as total_cost,
CAST(COALESCE(SUM(CASE ROUND(COALESCE(SUM(CASE
WHEN po.receiving_status < ${ReceivingStatus.PartialReceived} WHEN po.receiving_status < $1
THEN po.ordered * p.price THEN po.ordered * p.price
ELSE 0 ELSE 0
END), 0) AS DECIMAL(15,3)) as total_retail END), 0)::numeric, 3) as total_retail
FROM purchase_orders po FROM purchase_orders po
JOIN products p ON po.pid = p.pid JOIN products p ON po.pid = p.pid
`); `, [ReceivingStatus.PartialReceived]);
const poMetrics = rows[0];
const [vendorOrders] = await executeQuery(` const { rows: vendorOrders } = await executeQuery(`
SELECT SELECT
po.vendor, po.vendor,
COUNT(DISTINCT po.po_id) as orders, COUNT(DISTINCT po.po_id)::integer as orders,
COALESCE(SUM(po.ordered), 0) as units, COALESCE(SUM(po.ordered), 0)::integer as units,
CAST(COALESCE(SUM(po.ordered * po.cost_price), 0) AS DECIMAL(15,3)) as cost, ROUND(COALESCE(SUM(po.ordered * po.cost_price), 0)::numeric, 3) as cost,
CAST(COALESCE(SUM(po.ordered * p.price), 0) AS DECIMAL(15,3)) as retail ROUND(COALESCE(SUM(po.ordered * p.price), 0)::numeric, 3) as retail
FROM purchase_orders po FROM purchase_orders po
JOIN products p ON po.pid = p.pid JOIN products p ON po.pid = p.pid
WHERE po.receiving_status < ${ReceivingStatus.PartialReceived} WHERE po.receiving_status < $1
GROUP BY po.vendor GROUP BY po.vendor
HAVING cost > 0 HAVING ROUND(COALESCE(SUM(po.ordered * po.cost_price), 0)::numeric, 3) > 0
ORDER BY cost DESC ORDER BY cost DESC
`); `, [ReceivingStatus.PartialReceived]);
// Format response to match PurchaseMetricsData interface // Format response to match PurchaseMetricsData interface
const response = { const response = {
@@ -175,21 +173,21 @@ router.get('/purchase/metrics', async (req, res) => {
router.get('/replenishment/metrics', async (req, res) => { router.get('/replenishment/metrics', async (req, res) => {
try { try {
// Get summary metrics // Get summary metrics
const [metrics] = await executeQuery(` const { rows: [metrics] } = await executeQuery(`
SELECT SELECT
COUNT(DISTINCT p.pid) as products_to_replenish, COUNT(DISTINCT p.pid)::integer as products_to_replenish,
COALESCE(SUM(CASE COALESCE(SUM(CASE
WHEN p.stock_quantity < 0 THEN ABS(p.stock_quantity) + pm.reorder_qty WHEN p.stock_quantity < 0 THEN ABS(p.stock_quantity) + pm.reorder_qty
ELSE pm.reorder_qty ELSE pm.reorder_qty
END), 0) as total_units_needed, END), 0)::integer as total_units_needed,
CAST(COALESCE(SUM(CASE ROUND(COALESCE(SUM(CASE
WHEN p.stock_quantity < 0 THEN (ABS(p.stock_quantity) + pm.reorder_qty) * p.cost_price WHEN p.stock_quantity < 0 THEN (ABS(p.stock_quantity) + pm.reorder_qty) * p.cost_price
ELSE pm.reorder_qty * p.cost_price ELSE pm.reorder_qty * p.cost_price
END), 0) AS DECIMAL(15,3)) as total_cost, END), 0)::numeric, 3) as total_cost,
CAST(COALESCE(SUM(CASE ROUND(COALESCE(SUM(CASE
WHEN p.stock_quantity < 0 THEN (ABS(p.stock_quantity) + pm.reorder_qty) * p.price WHEN p.stock_quantity < 0 THEN (ABS(p.stock_quantity) + pm.reorder_qty) * p.price
ELSE pm.reorder_qty * p.price ELSE pm.reorder_qty * p.price
END), 0) AS DECIMAL(15,3)) as total_retail END), 0)::numeric, 3) as total_retail
FROM products p FROM products p
JOIN product_metrics pm ON p.pid = pm.pid JOIN product_metrics pm ON p.pid = pm.pid
WHERE p.replenishable = true WHERE p.replenishable = true
@@ -199,23 +197,23 @@ router.get('/replenishment/metrics', async (req, res) => {
`); `);
// Get top variants to replenish // Get top variants to replenish
const [variants] = await executeQuery(` const { rows: variants } = await executeQuery(`
SELECT SELECT
p.pid, p.pid,
p.title, p.title,
p.stock_quantity as current_stock, p.stock_quantity::integer as current_stock,
CASE CASE
WHEN p.stock_quantity < 0 THEN ABS(p.stock_quantity) + pm.reorder_qty WHEN p.stock_quantity < 0 THEN ABS(p.stock_quantity) + pm.reorder_qty
ELSE pm.reorder_qty ELSE pm.reorder_qty
END as replenish_qty, END::integer as replenish_qty,
CAST(CASE ROUND(CASE
WHEN p.stock_quantity < 0 THEN (ABS(p.stock_quantity) + pm.reorder_qty) * p.cost_price WHEN p.stock_quantity < 0 THEN (ABS(p.stock_quantity) + pm.reorder_qty) * p.cost_price
ELSE pm.reorder_qty * p.cost_price ELSE pm.reorder_qty * p.cost_price
END AS DECIMAL(15,3)) as replenish_cost, END::numeric, 3) as replenish_cost,
CAST(CASE ROUND(CASE
WHEN p.stock_quantity < 0 THEN (ABS(p.stock_quantity) + pm.reorder_qty) * p.price WHEN p.stock_quantity < 0 THEN (ABS(p.stock_quantity) + pm.reorder_qty) * p.price
ELSE pm.reorder_qty * p.price ELSE pm.reorder_qty * p.price
END AS DECIMAL(15,3)) as replenish_retail, END::numeric, 3) as replenish_retail,
pm.stock_status pm.stock_status
FROM products p FROM products p
JOIN product_metrics pm ON p.pid = pm.pid JOIN product_metrics pm ON p.pid = pm.pid
@@ -234,10 +232,10 @@ router.get('/replenishment/metrics', async (req, res) => {
// Format response // Format response
const response = { const response = {
productsToReplenish: parseInt(metrics[0].products_to_replenish) || 0, productsToReplenish: parseInt(metrics.products_to_replenish) || 0,
unitsToReplenish: parseInt(metrics[0].total_units_needed) || 0, unitsToReplenish: parseInt(metrics.total_units_needed) || 0,
replenishmentCost: parseFloat(metrics[0].total_cost) || 0, replenishmentCost: parseFloat(metrics.total_cost) || 0,
replenishmentRetail: parseFloat(metrics[0].total_retail) || 0, replenishmentRetail: parseFloat(metrics.total_retail) || 0,
topVariants: variants.map(v => ({ topVariants: variants.map(v => ({
id: v.pid, id: v.pid,
title: v.title, title: v.title,

View File

@@ -5,26 +5,28 @@ const router = express.Router();
router.get('/trends', async (req, res) => { router.get('/trends', async (req, res) => {
const pool = req.app.locals.pool; const pool = req.app.locals.pool;
try { try {
const [rows] = await pool.query(` const { rows } = await pool.query(`
WITH MonthlyMetrics AS ( WITH MonthlyMetrics AS (
SELECT SELECT
DATE(CONCAT(pta.year, '-', LPAD(pta.month, 2, '0'), '-01')) as date, make_date(pta.year, pta.month, 1) as date,
CAST(COALESCE(SUM(pta.total_revenue), 0) AS DECIMAL(15,3)) as revenue, ROUND(COALESCE(SUM(pta.total_revenue), 0)::numeric, 3) as revenue,
CAST(COALESCE(SUM(pta.total_cost), 0) AS DECIMAL(15,3)) as cost, ROUND(COALESCE(SUM(pta.total_cost), 0)::numeric, 3) as cost,
CAST(COALESCE(SUM(pm.inventory_value), 0) AS DECIMAL(15,3)) as inventory_value, ROUND(COALESCE(SUM(pm.inventory_value), 0)::numeric, 3) as inventory_value,
CASE CASE
WHEN SUM(pm.inventory_value) > 0 WHEN SUM(pm.inventory_value) > 0
THEN CAST((SUM(pta.total_revenue - pta.total_cost) / SUM(pm.inventory_value)) * 100 AS DECIMAL(15,3)) THEN ROUND((SUM(pta.total_revenue - pta.total_cost) / SUM(pm.inventory_value) * 100)::numeric, 3)
ELSE 0 ELSE 0
END as gmroi END as gmroi
FROM product_time_aggregates pta FROM product_time_aggregates pta
JOIN product_metrics pm ON pta.pid = pm.pid JOIN product_metrics pm ON pta.pid = pm.pid
WHERE (pta.year * 100 + pta.month) >= DATE_FORMAT(DATE_SUB(CURDATE(), INTERVAL 12 MONTH), '%Y%m') WHERE (pta.year * 100 + pta.month) >=
EXTRACT(YEAR FROM CURRENT_DATE - INTERVAL '12 months')::integer * 100 +
EXTRACT(MONTH FROM CURRENT_DATE - INTERVAL '12 months')::integer
GROUP BY pta.year, pta.month GROUP BY pta.year, pta.month
ORDER BY date ASC ORDER BY date ASC
) )
SELECT SELECT
DATE_FORMAT(date, '%b %y') as date, to_char(date, 'Mon YY') as date,
revenue, revenue,
inventory_value, inventory_value,
gmroi gmroi

View File

@@ -20,39 +20,46 @@ router.get('/', async (req, res) => {
// Build the WHERE clause // Build the WHERE clause
const conditions = ['o1.canceled = false']; const conditions = ['o1.canceled = false'];
const params = []; const params = [];
let paramCounter = 1;
if (search) { if (search) {
conditions.push('(o1.order_number LIKE ? OR o1.customer LIKE ?)'); conditions.push(`(o1.order_number ILIKE $${paramCounter} OR o1.customer ILIKE $${paramCounter})`);
params.push(`%${search}%`, `%${search}%`); params.push(`%${search}%`);
paramCounter++;
} }
if (status !== 'all') { if (status !== 'all') {
conditions.push('o1.status = ?'); conditions.push(`o1.status = $${paramCounter}`);
params.push(status); params.push(status);
paramCounter++;
} }
if (fromDate) { if (fromDate) {
conditions.push('DATE(o1.date) >= DATE(?)'); conditions.push(`DATE(o1.date) >= DATE($${paramCounter})`);
params.push(fromDate.toISOString()); params.push(fromDate.toISOString());
paramCounter++;
} }
if (toDate) { if (toDate) {
conditions.push('DATE(o1.date) <= DATE(?)'); conditions.push(`DATE(o1.date) <= DATE($${paramCounter})`);
params.push(toDate.toISOString()); params.push(toDate.toISOString());
paramCounter++;
} }
if (minAmount > 0) { if (minAmount > 0) {
conditions.push('total_amount >= ?'); conditions.push(`total_amount >= $${paramCounter}`);
params.push(minAmount); params.push(minAmount);
paramCounter++;
} }
if (maxAmount) { if (maxAmount) {
conditions.push('total_amount <= ?'); conditions.push(`total_amount <= $${paramCounter}`);
params.push(maxAmount); params.push(maxAmount);
paramCounter++;
} }
// Get total count for pagination // Get total count for pagination
const [countResult] = await pool.query(` const { rows: [countResult] } = await pool.query(`
SELECT COUNT(DISTINCT o1.order_number) as total SELECT COUNT(DISTINCT o1.order_number) as total
FROM orders o1 FROM orders o1
LEFT JOIN ( LEFT JOIN (
@@ -63,7 +70,7 @@ router.get('/', async (req, res) => {
WHERE ${conditions.join(' AND ')} WHERE ${conditions.join(' AND ')}
`, params); `, params);
const total = countResult[0].total; const total = countResult.total;
// Get paginated results // Get paginated results
const query = ` const query = `
@@ -75,7 +82,7 @@ router.get('/', async (req, res) => {
o1.payment_method, o1.payment_method,
o1.shipping_method, o1.shipping_method,
COUNT(o2.pid) as items_count, COUNT(o2.pid) as items_count,
CAST(SUM(o2.price * o2.quantity) AS DECIMAL(15,3)) as total_amount ROUND(SUM(o2.price * o2.quantity)::numeric, 3) as total_amount
FROM orders o1 FROM orders o1
JOIN orders o2 ON o1.order_number = o2.order_number JOIN orders o2 ON o1.order_number = o2.order_number
WHERE ${conditions.join(' AND ')} WHERE ${conditions.join(' AND ')}
@@ -91,36 +98,37 @@ router.get('/', async (req, res) => {
? `${sortColumn} ${sortDirection}` ? `${sortColumn} ${sortDirection}`
: `o1.${sortColumn} ${sortDirection}` : `o1.${sortColumn} ${sortDirection}`
} }
LIMIT ? OFFSET ? LIMIT $${paramCounter} OFFSET $${paramCounter + 1}
`; `;
const [rows] = await pool.query(query, [...params, limit, offset]); params.push(limit, offset);
const { rows } = await pool.query(query, params);
// Get order statistics // Get order statistics
const [stats] = await pool.query(` const { rows: [orderStats] } = await pool.query(`
WITH CurrentStats AS ( WITH CurrentStats AS (
SELECT SELECT
COUNT(DISTINCT order_number) as total_orders, COUNT(DISTINCT order_number) as total_orders,
CAST(SUM(price * quantity) AS DECIMAL(15,3)) as total_revenue ROUND(SUM(price * quantity)::numeric, 3) as total_revenue
FROM orders FROM orders
WHERE canceled = false WHERE canceled = false
AND DATE(date) >= DATE_SUB(CURDATE(), INTERVAL 30 DAY) AND DATE(date) >= CURRENT_DATE - INTERVAL '30 days'
), ),
PreviousStats AS ( PreviousStats AS (
SELECT SELECT
COUNT(DISTINCT order_number) as prev_orders, COUNT(DISTINCT order_number) as prev_orders,
CAST(SUM(price * quantity) AS DECIMAL(15,3)) as prev_revenue ROUND(SUM(price * quantity)::numeric, 3) as prev_revenue
FROM orders FROM orders
WHERE canceled = false WHERE canceled = false
AND DATE(date) BETWEEN DATE_SUB(CURDATE(), INTERVAL 60 DAY) AND DATE_SUB(CURDATE(), INTERVAL 30 DAY) AND DATE(date) BETWEEN CURRENT_DATE - INTERVAL '60 days' AND CURRENT_DATE - INTERVAL '30 days'
), ),
OrderValues AS ( OrderValues AS (
SELECT SELECT
order_number, order_number,
CAST(SUM(price * quantity) AS DECIMAL(15,3)) as order_value ROUND(SUM(price * quantity)::numeric, 3) as order_value
FROM orders FROM orders
WHERE canceled = false WHERE canceled = false
AND DATE(date) >= DATE_SUB(CURDATE(), INTERVAL 30 DAY) AND DATE(date) >= CURRENT_DATE - INTERVAL '30 days'
GROUP BY order_number GROUP BY order_number
) )
SELECT SELECT
@@ -128,29 +136,27 @@ router.get('/', async (req, res) => {
cs.total_revenue, cs.total_revenue,
CASE CASE
WHEN ps.prev_orders > 0 WHEN ps.prev_orders > 0
THEN ((cs.total_orders - ps.prev_orders) / ps.prev_orders * 100) THEN ROUND(((cs.total_orders - ps.prev_orders)::numeric / ps.prev_orders * 100), 1)
ELSE 0 ELSE 0
END as order_growth, END as order_growth,
CASE CASE
WHEN ps.prev_revenue > 0 WHEN ps.prev_revenue > 0
THEN ((cs.total_revenue - ps.prev_revenue) / ps.prev_revenue * 100) THEN ROUND(((cs.total_revenue - ps.prev_revenue)::numeric / ps.prev_revenue * 100), 1)
ELSE 0 ELSE 0
END as revenue_growth, END as revenue_growth,
CASE CASE
WHEN cs.total_orders > 0 WHEN cs.total_orders > 0
THEN CAST((cs.total_revenue / cs.total_orders) AS DECIMAL(15,3)) THEN ROUND((cs.total_revenue::numeric / cs.total_orders), 3)
ELSE 0 ELSE 0
END as average_order_value, END as average_order_value,
CASE CASE
WHEN ps.prev_orders > 0 WHEN ps.prev_orders > 0
THEN CAST((ps.prev_revenue / ps.prev_orders) AS DECIMAL(15,3)) THEN ROUND((ps.prev_revenue::numeric / ps.prev_orders), 3)
ELSE 0 ELSE 0
END as prev_average_order_value END as prev_average_order_value
FROM CurrentStats cs FROM CurrentStats cs
CROSS JOIN PreviousStats ps CROSS JOIN PreviousStats ps
`); `);
const orderStats = stats[0];
res.json({ res.json({
orders: rows.map(row => ({ orders: rows.map(row => ({
@@ -189,7 +195,7 @@ router.get('/:orderNumber', async (req, res) => {
const pool = req.app.locals.pool; const pool = req.app.locals.pool;
try { try {
// Get order details // Get order details
const [orderRows] = await pool.query(` const { rows: orderRows } = await pool.query(`
SELECT DISTINCT SELECT DISTINCT
o1.order_number, o1.order_number,
o1.customer, o1.customer,
@@ -200,10 +206,10 @@ router.get('/:orderNumber', async (req, res) => {
o1.shipping_address, o1.shipping_address,
o1.billing_address, o1.billing_address,
COUNT(o2.pid) as items_count, COUNT(o2.pid) as items_count,
CAST(SUM(o2.price * o2.quantity) AS DECIMAL(15,3)) as total_amount ROUND(SUM(o2.price * o2.quantity)::numeric, 3) as total_amount
FROM orders o1 FROM orders o1
JOIN orders o2 ON o1.order_number = o2.order_number JOIN orders o2 ON o1.order_number = o2.order_number
WHERE o1.order_number = ? AND o1.canceled = false WHERE o1.order_number = $1 AND o1.canceled = false
GROUP BY GROUP BY
o1.order_number, o1.order_number,
o1.customer, o1.customer,
@@ -220,17 +226,17 @@ router.get('/:orderNumber', async (req, res) => {
} }
// Get order items // Get order items
const [itemRows] = await pool.query(` const { rows: itemRows } = await pool.query(`
SELECT SELECT
o.pid, o.pid,
p.title, p.title,
p.SKU, p.SKU,
o.quantity, o.quantity,
o.price, o.price,
CAST((o.price * o.quantity) AS DECIMAL(15,3)) as total ROUND((o.price * o.quantity)::numeric, 3) as total
FROM orders o FROM orders o
JOIN products p ON o.pid = p.pid JOIN products p ON o.pid = p.pid
WHERE o.order_number = ? AND o.canceled = false WHERE o.order_number = $1 AND o.canceled = false
`, [req.params.orderNumber]); `, [req.params.orderNumber]);
const order = { const order = {

View File

@@ -20,7 +20,7 @@ router.get('/brands', async (req, res) => {
const pool = req.app.locals.pool; const pool = req.app.locals.pool;
console.log('Fetching brands from database...'); console.log('Fetching brands from database...');
const [results] = await pool.query(` const { rows } = await pool.query(`
SELECT DISTINCT COALESCE(p.brand, 'Unbranded') as brand SELECT DISTINCT COALESCE(p.brand, 'Unbranded') as brand
FROM products p FROM products p
JOIN purchase_orders po ON p.pid = po.pid JOIN purchase_orders po ON p.pid = po.pid
@@ -30,8 +30,8 @@ router.get('/brands', async (req, res) => {
ORDER BY COALESCE(p.brand, 'Unbranded') ORDER BY COALESCE(p.brand, 'Unbranded')
`); `);
console.log(`Found ${results.length} brands:`, results.slice(0, 3)); console.log(`Found ${rows.length} brands:`, rows.slice(0, 3));
res.json(results.map(r => r.brand)); res.json(rows.map(r => r.brand));
} catch (error) { } catch (error) {
console.error('Error fetching brands:', error); console.error('Error fetching brands:', error);
res.status(500).json({ error: 'Failed to fetch brands' }); res.status(500).json({ error: 'Failed to fetch brands' });
@@ -50,6 +50,7 @@ router.get('/', async (req, res) => {
const conditions = ['p.visible = true']; const conditions = ['p.visible = true'];
const params = []; const params = [];
let paramCounter = 1;
// Add default replenishable filter unless explicitly showing non-replenishable // Add default replenishable filter unless explicitly showing non-replenishable
if (req.query.showNonReplenishable !== 'true') { if (req.query.showNonReplenishable !== 'true') {
@@ -58,9 +59,10 @@ router.get('/', async (req, res) => {
// Handle search filter // Handle search filter
if (req.query.search) { if (req.query.search) {
conditions.push('(p.title LIKE ? OR p.SKU LIKE ? OR p.barcode LIKE ?)'); conditions.push(`(p.title ILIKE $${paramCounter} OR p.SKU ILIKE $${paramCounter} OR p.barcode ILIKE $${paramCounter})`);
const searchTerm = `%${req.query.search}%`; const searchTerm = `%${req.query.search}%`;
params.push(searchTerm, searchTerm, searchTerm); params.push(searchTerm);
paramCounter++;
} }
// Handle numeric filters with operators // Handle numeric filters with operators
@@ -84,61 +86,69 @@ router.get('/', async (req, res) => {
if (field) { if (field) {
const operator = req.query[`${key}_operator`] || '='; const operator = req.query[`${key}_operator`] || '=';
if (operator === 'between') { if (operator === 'between') {
// Handle between operator
try { try {
const [min, max] = JSON.parse(value); const [min, max] = JSON.parse(value);
conditions.push(`${field} BETWEEN ? AND ?`); conditions.push(`${field} BETWEEN $${paramCounter} AND $${paramCounter + 1}`);
params.push(min, max); params.push(min, max);
paramCounter += 2;
} catch (e) { } catch (e) {
console.error(`Invalid between value for ${key}:`, value); console.error(`Invalid between value for ${key}:`, value);
} }
} else { } else {
// Handle other operators conditions.push(`${field} ${operator} $${paramCounter}`);
conditions.push(`${field} ${operator} ?`);
params.push(parseFloat(value)); params.push(parseFloat(value));
paramCounter++;
} }
} }
}); });
// Handle select filters // Handle select filters
if (req.query.vendor) { if (req.query.vendor) {
conditions.push('p.vendor = ?'); conditions.push(`p.vendor = $${paramCounter}`);
params.push(req.query.vendor); params.push(req.query.vendor);
paramCounter++;
} }
if (req.query.brand) { if (req.query.brand) {
conditions.push('p.brand = ?'); conditions.push(`p.brand = $${paramCounter}`);
params.push(req.query.brand); params.push(req.query.brand);
paramCounter++;
} }
if (req.query.category) { if (req.query.category) {
conditions.push('p.categories LIKE ?'); conditions.push(`p.categories ILIKE $${paramCounter}`);
params.push(`%${req.query.category}%`); params.push(`%${req.query.category}%`);
paramCounter++;
} }
if (req.query.stockStatus && req.query.stockStatus !== 'all') { if (req.query.stockStatus && req.query.stockStatus !== 'all') {
conditions.push('pm.stock_status = ?'); conditions.push(`pm.stock_status = $${paramCounter}`);
params.push(req.query.stockStatus); params.push(req.query.stockStatus);
paramCounter++;
} }
if (req.query.abcClass) { if (req.query.abcClass) {
conditions.push('pm.abc_class = ?'); conditions.push(`pm.abc_class = $${paramCounter}`);
params.push(req.query.abcClass); params.push(req.query.abcClass);
paramCounter++;
} }
if (req.query.leadTimeStatus) { if (req.query.leadTimeStatus) {
conditions.push('pm.lead_time_status = ?'); conditions.push(`pm.lead_time_status = $${paramCounter}`);
params.push(req.query.leadTimeStatus); params.push(req.query.leadTimeStatus);
paramCounter++;
} }
if (req.query.replenishable !== undefined) { if (req.query.replenishable !== undefined) {
conditions.push('p.replenishable = ?'); conditions.push(`p.replenishable = $${paramCounter}`);
params.push(req.query.replenishable === 'true' ? 1 : 0); params.push(req.query.replenishable === 'true');
paramCounter++;
} }
if (req.query.managingStock !== undefined) { if (req.query.managingStock !== undefined) {
conditions.push('p.managing_stock = ?'); conditions.push(`p.managing_stock = $${paramCounter}`);
params.push(req.query.managingStock === 'true' ? 1 : 0); params.push(req.query.managingStock === 'true');
paramCounter++;
} }
// Combine all conditions with AND // Combine all conditions with AND
@@ -151,17 +161,17 @@ router.get('/', async (req, res) => {
LEFT JOIN product_metrics pm ON p.pid = pm.pid LEFT JOIN product_metrics pm ON p.pid = pm.pid
${whereClause} ${whereClause}
`; `;
const [countResult] = await pool.query(countQuery, params); const { rows: [countResult] } = await pool.query(countQuery, params);
const total = countResult[0].total; const total = countResult.total;
// Get available filters // Get available filters
const [categories] = await pool.query( const { rows: categories } = await pool.query(
'SELECT name FROM categories ORDER BY name' 'SELECT name FROM categories ORDER BY name'
); );
const [vendors] = await pool.query( const { rows: vendors } = await pool.query(
'SELECT DISTINCT vendor FROM products WHERE visible = true AND vendor IS NOT NULL AND vendor != "" ORDER BY vendor' 'SELECT DISTINCT vendor FROM products WHERE visible = true AND vendor IS NOT NULL AND vendor != \'\' ORDER BY vendor'
); );
const [brands] = await pool.query( const { rows: brands } = await pool.query(
'SELECT DISTINCT COALESCE(brand, \'Unbranded\') as brand FROM products WHERE visible = true ORDER BY brand' 'SELECT DISTINCT COALESCE(brand, \'Unbranded\') as brand FROM products WHERE visible = true ORDER BY brand'
); );
@@ -173,7 +183,7 @@ router.get('/', async (req, res) => {
c.cat_id, c.cat_id,
c.name, c.name,
c.parent_id, c.parent_id,
CAST(c.name AS CHAR(1000)) as path CAST(c.name AS text) as path
FROM categories c FROM categories c
WHERE c.parent_id IS NULL WHERE c.parent_id IS NULL
@@ -183,7 +193,7 @@ router.get('/', async (req, res) => {
c.cat_id, c.cat_id,
c.name, c.name,
c.parent_id, c.parent_id,
CONCAT(cp.path, ' > ', c.name) cp.path || ' > ' || c.name
FROM categories c FROM categories c
JOIN category_path cp ON c.parent_id = cp.cat_id JOIN category_path cp ON c.parent_id = cp.cat_id
), ),
@@ -210,7 +220,6 @@ router.get('/', async (req, res) => {
FROM products p FROM products p
), ),
product_leaf_categories AS ( product_leaf_categories AS (
-- Find categories that aren't parents to other categories for this product
SELECT DISTINCT pc.cat_id SELECT DISTINCT pc.cat_id
FROM product_categories pc FROM product_categories pc
WHERE NOT EXISTS ( WHERE NOT EXISTS (
@@ -224,7 +233,7 @@ router.get('/', async (req, res) => {
SELECT SELECT
p.*, p.*,
COALESCE(p.brand, 'Unbranded') as brand, COALESCE(p.brand, 'Unbranded') as brand,
GROUP_CONCAT(DISTINCT CONCAT(c.cat_id, ':', c.name)) as categories, string_agg(DISTINCT (c.cat_id || ':' || c.name), ',') as categories,
pm.daily_sales_avg, pm.daily_sales_avg,
pm.weekly_sales_avg, pm.weekly_sales_avg,
pm.monthly_sales_avg, pm.monthly_sales_avg,
@@ -247,83 +256,32 @@ router.get('/', async (req, res) => {
pm.last_received_date, pm.last_received_date,
pm.abc_class, pm.abc_class,
pm.stock_status, pm.stock_status,
pm.turnover_rate, pm.turnover_rate
pm.current_lead_time,
pm.target_lead_time,
pm.lead_time_status,
pm.reorder_qty,
pm.overstocked_amt,
COALESCE(pm.days_of_inventory / NULLIF(pt.target_days, 0), 0) as stock_coverage_ratio
FROM products p FROM products p
LEFT JOIN product_metrics pm ON p.pid = pm.pid LEFT JOIN product_metrics pm ON p.pid = pm.pid
LEFT JOIN product_categories pc ON p.pid = pc.pid LEFT JOIN product_categories pc ON p.pid = pc.pid
LEFT JOIN categories c ON pc.cat_id = c.cat_id LEFT JOIN categories c ON pc.cat_id = c.cat_id
LEFT JOIN product_thresholds pt ON p.pid = pt.pid ${whereClause}
JOIN product_leaf_categories plc ON c.cat_id = plc.cat_id GROUP BY p.pid, pm.pid
${whereClause ? 'WHERE ' + whereClause.substring(6) : ''}
GROUP BY p.pid
ORDER BY ${sortColumn} ${sortDirection} ORDER BY ${sortColumn} ${sortDirection}
LIMIT ? OFFSET ? LIMIT $${paramCounter} OFFSET $${paramCounter + 1}
`; `;
// Add pagination params to the main query params params.push(limit, offset);
const queryParams = [...params, limit, offset]; const { rows: products } = await pool.query(query, params);
console.log('Query:', query.replace(/\s+/g, ' '));
console.log('Params:', queryParams);
const [rows] = await pool.query(query, queryParams);
// Transform the results
const products = rows.map(row => ({
...row,
categories: row.categories ? row.categories.split(',') : [],
price: parseFloat(row.price),
cost_price: parseFloat(row.cost_price),
landing_cost_price: row.landing_cost_price ? parseFloat(row.landing_cost_price) : null,
stock_quantity: parseInt(row.stock_quantity),
daily_sales_avg: parseFloat(row.daily_sales_avg) || 0,
weekly_sales_avg: parseFloat(row.weekly_sales_avg) || 0,
monthly_sales_avg: parseFloat(row.monthly_sales_avg) || 0,
avg_quantity_per_order: parseFloat(row.avg_quantity_per_order) || 0,
number_of_orders: parseInt(row.number_of_orders) || 0,
first_sale_date: row.first_sale_date || null,
last_sale_date: row.last_sale_date || null,
days_of_inventory: parseFloat(row.days_of_inventory) || 0,
weeks_of_inventory: parseFloat(row.weeks_of_inventory) || 0,
reorder_point: parseFloat(row.reorder_point) || 0,
safety_stock: parseFloat(row.safety_stock) || 0,
avg_margin_percent: parseFloat(row.avg_margin_percent) || 0,
total_revenue: parseFloat(row.total_revenue) || 0,
inventory_value: parseFloat(row.inventory_value) || 0,
cost_of_goods_sold: parseFloat(row.cost_of_goods_sold) || 0,
gross_profit: parseFloat(row.gross_profit) || 0,
gmroi: parseFloat(row.gmroi) || 0,
avg_lead_time_days: parseFloat(row.avg_lead_time_days) || 0,
last_purchase_date: row.last_purchase_date || null,
last_received_date: row.last_received_date || null,
abc_class: row.abc_class || null,
stock_status: row.stock_status || null,
turnover_rate: parseFloat(row.turnover_rate) || 0,
current_lead_time: parseFloat(row.current_lead_time) || 0,
target_lead_time: parseFloat(row.target_lead_time) || 0,
lead_time_status: row.lead_time_status || null,
stock_coverage_ratio: parseFloat(row.stock_coverage_ratio) || 0,
reorder_qty: parseInt(row.reorder_qty) || 0,
overstocked_amt: parseInt(row.overstocked_amt) || 0
}));
res.json({ res.json({
products, products,
pagination: { pagination: {
total, total,
currentPage: page,
pages: Math.ceil(total / limit), pages: Math.ceil(total / limit),
currentPage: page,
limit limit
}, },
filters: { filters: {
categories: categories.map(category => category.name), categories: categories.map(c => c.name),
vendors: vendors.map(vendor => vendor.vendor), vendors: vendors.map(v => v.vendor),
brands: brands.map(brand => brand.brand) brands: brands.map(b => b.brand)
} }
}); });
} catch (error) { } catch (error) {

View File

@@ -29,40 +29,46 @@ router.get('/', async (req, res) => {
let whereClause = '1=1'; let whereClause = '1=1';
const params = []; const params = [];
let paramCounter = 1;
if (search) { if (search) {
whereClause += ' AND (po.po_id LIKE ? OR po.vendor LIKE ?)'; whereClause += ` AND (po.po_id ILIKE $${paramCounter} OR po.vendor ILIKE $${paramCounter})`;
params.push(`%${search}%`, `%${search}%`); params.push(`%${search}%`);
paramCounter++;
} }
if (status && status !== 'all') { if (status && status !== 'all') {
whereClause += ' AND po.status = ?'; whereClause += ` AND po.status = $${paramCounter}`;
params.push(Number(status)); params.push(Number(status));
paramCounter++;
} }
if (vendor && vendor !== 'all') { if (vendor && vendor !== 'all') {
whereClause += ' AND po.vendor = ?'; whereClause += ` AND po.vendor = $${paramCounter}`;
params.push(vendor); params.push(vendor);
paramCounter++;
} }
if (startDate) { if (startDate) {
whereClause += ' AND po.date >= ?'; whereClause += ` AND po.date >= $${paramCounter}`;
params.push(startDate); params.push(startDate);
paramCounter++;
} }
if (endDate) { if (endDate) {
whereClause += ' AND po.date <= ?'; whereClause += ` AND po.date <= $${paramCounter}`;
params.push(endDate); params.push(endDate);
paramCounter++;
} }
// Get filtered summary metrics // Get filtered summary metrics
const [summary] = await pool.query(` const { rows: [summary] } = await pool.query(`
WITH po_totals AS ( WITH po_totals AS (
SELECT SELECT
po_id, po_id,
SUM(ordered) as total_ordered, SUM(ordered) as total_ordered,
SUM(received) as total_received, SUM(received) as total_received,
CAST(SUM(ordered * cost_price) AS DECIMAL(15,3)) as total_cost ROUND(SUM(ordered * cost_price)::numeric, 3) as total_cost
FROM purchase_orders po FROM purchase_orders po
WHERE ${whereClause} WHERE ${whereClause}
GROUP BY po_id GROUP BY po_id
@@ -72,26 +78,26 @@ router.get('/', async (req, res) => {
SUM(total_ordered) as total_ordered, SUM(total_ordered) as total_ordered,
SUM(total_received) as total_received, SUM(total_received) as total_received,
ROUND( ROUND(
SUM(total_received) / NULLIF(SUM(total_ordered), 0), 3 (SUM(total_received)::numeric / NULLIF(SUM(total_ordered), 0)), 3
) as fulfillment_rate, ) as fulfillment_rate,
CAST(SUM(total_cost) AS DECIMAL(15,3)) as total_value, ROUND(SUM(total_cost)::numeric, 3) as total_value,
CAST(AVG(total_cost) AS DECIMAL(15,3)) as avg_cost ROUND(AVG(total_cost)::numeric, 3) as avg_cost
FROM po_totals FROM po_totals
`, params); `, params);
// Get total count for pagination // Get total count for pagination
const [countResult] = await pool.query(` const { rows: [countResult] } = await pool.query(`
SELECT COUNT(DISTINCT po_id) as total SELECT COUNT(DISTINCT po_id) as total
FROM purchase_orders po FROM purchase_orders po
WHERE ${whereClause} WHERE ${whereClause}
`, params); `, params);
const total = countResult[0].total; const total = countResult.total;
const offset = (page - 1) * limit; const offset = (page - 1) * limit;
const pages = Math.ceil(total / limit); const pages = Math.ceil(total / limit);
// Get recent purchase orders // Get recent purchase orders
const [orders] = await pool.query(` const { rows: orders } = await pool.query(`
WITH po_totals AS ( WITH po_totals AS (
SELECT SELECT
po_id, po_id,
@@ -101,10 +107,10 @@ router.get('/', async (req, res) => {
receiving_status, receiving_status,
COUNT(DISTINCT pid) as total_items, COUNT(DISTINCT pid) as total_items,
SUM(ordered) as total_quantity, SUM(ordered) as total_quantity,
CAST(SUM(ordered * cost_price) AS DECIMAL(15,3)) as total_cost, ROUND(SUM(ordered * cost_price)::numeric, 3) as total_cost,
SUM(received) as total_received, SUM(received) as total_received,
ROUND( ROUND(
SUM(received) / NULLIF(SUM(ordered), 0), 3 (SUM(received)::numeric / NULLIF(SUM(ordered), 0)), 3
) as fulfillment_rate ) as fulfillment_rate
FROM purchase_orders po FROM purchase_orders po
WHERE ${whereClause} WHERE ${whereClause}
@@ -113,7 +119,7 @@ router.get('/', async (req, res) => {
SELECT SELECT
po_id as id, po_id as id,
vendor as vendor_name, vendor as vendor_name,
DATE_FORMAT(date, '%Y-%m-%d') as order_date, to_char(date, 'YYYY-MM-DD') as order_date,
status, status,
receiving_status, receiving_status,
total_items, total_items,
@@ -124,21 +130,21 @@ router.get('/', async (req, res) => {
FROM po_totals FROM po_totals
ORDER BY ORDER BY
CASE CASE
WHEN ? = 'order_date' THEN date WHEN $${paramCounter} = 'order_date' THEN date
WHEN ? = 'vendor_name' THEN vendor WHEN $${paramCounter} = 'vendor_name' THEN vendor
WHEN ? = 'total_cost' THEN CAST(total_cost AS DECIMAL(15,3)) WHEN $${paramCounter} = 'total_cost' THEN total_cost
WHEN ? = 'total_received' THEN CAST(total_received AS DECIMAL(15,3)) WHEN $${paramCounter} = 'total_received' THEN total_received
WHEN ? = 'total_items' THEN CAST(total_items AS SIGNED) WHEN $${paramCounter} = 'total_items' THEN total_items
WHEN ? = 'total_quantity' THEN CAST(total_quantity AS SIGNED) WHEN $${paramCounter} = 'total_quantity' THEN total_quantity
WHEN ? = 'fulfillment_rate' THEN CAST(fulfillment_rate AS DECIMAL(5,3)) WHEN $${paramCounter} = 'fulfillment_rate' THEN fulfillment_rate
WHEN ? = 'status' THEN status WHEN $${paramCounter} = 'status' THEN status
ELSE date ELSE date
END ${sortDirection === 'desc' ? 'DESC' : 'ASC'} END ${sortDirection === 'desc' ? 'DESC' : 'ASC'}
LIMIT ? OFFSET ? LIMIT $${paramCounter + 1} OFFSET $${paramCounter + 2}
`, [...params, sortColumn, sortColumn, sortColumn, sortColumn, sortColumn, sortColumn, sortColumn, sortColumn, Number(limit), offset]); `, [...params, sortColumn, Number(limit), offset]);
// Get unique vendors for filter options // Get unique vendors for filter options
const [vendors] = await pool.query(` const { rows: vendors } = await pool.query(`
SELECT DISTINCT vendor SELECT DISTINCT vendor
FROM purchase_orders FROM purchase_orders
WHERE vendor IS NOT NULL AND vendor != '' WHERE vendor IS NOT NULL AND vendor != ''
@@ -146,7 +152,7 @@ router.get('/', async (req, res) => {
`); `);
// Get unique statuses for filter options // Get unique statuses for filter options
const [statuses] = await pool.query(` const { rows: statuses } = await pool.query(`
SELECT DISTINCT status SELECT DISTINCT status
FROM purchase_orders FROM purchase_orders
WHERE status IS NOT NULL WHERE status IS NOT NULL
@@ -169,12 +175,12 @@ router.get('/', async (req, res) => {
// Parse summary metrics // Parse summary metrics
const parsedSummary = { const parsedSummary = {
order_count: Number(summary[0].order_count) || 0, order_count: Number(summary.order_count) || 0,
total_ordered: Number(summary[0].total_ordered) || 0, total_ordered: Number(summary.total_ordered) || 0,
total_received: Number(summary[0].total_received) || 0, total_received: Number(summary.total_received) || 0,
fulfillment_rate: Number(summary[0].fulfillment_rate) || 0, fulfillment_rate: Number(summary.fulfillment_rate) || 0,
total_value: Number(summary[0].total_value) || 0, total_value: Number(summary.total_value) || 0,
avg_cost: Number(summary[0].avg_cost) || 0 avg_cost: Number(summary.avg_cost) || 0
}; };
res.json({ res.json({
@@ -202,7 +208,7 @@ router.get('/vendor-metrics', async (req, res) => {
try { try {
const pool = req.app.locals.pool; const pool = req.app.locals.pool;
const [metrics] = await pool.query(` const { rows: metrics } = await pool.query(`
WITH delivery_metrics AS ( WITH delivery_metrics AS (
SELECT SELECT
vendor, vendor,
@@ -213,7 +219,7 @@ router.get('/vendor-metrics', async (req, res) => {
CASE CASE
WHEN status >= ${STATUS.RECEIVING_STARTED} AND receiving_status >= ${RECEIVING_STATUS.PARTIAL_RECEIVED} WHEN status >= ${STATUS.RECEIVING_STARTED} AND receiving_status >= ${RECEIVING_STATUS.PARTIAL_RECEIVED}
AND received_date IS NOT NULL AND date IS NOT NULL AND received_date IS NOT NULL AND date IS NOT NULL
THEN DATEDIFF(received_date, date) THEN (received_date - date)::integer
ELSE NULL ELSE NULL
END as delivery_days END as delivery_days
FROM purchase_orders FROM purchase_orders
@@ -226,18 +232,18 @@ router.get('/vendor-metrics', async (req, res) => {
SUM(ordered) as total_ordered, SUM(ordered) as total_ordered,
SUM(received) as total_received, SUM(received) as total_received,
ROUND( ROUND(
SUM(received) / NULLIF(SUM(ordered), 0), 3 (SUM(received)::numeric / NULLIF(SUM(ordered), 0)), 3
) as fulfillment_rate, ) as fulfillment_rate,
CAST(ROUND(
SUM(ordered * cost_price) / NULLIF(SUM(ordered), 0), 2
) AS DECIMAL(15,3)) as avg_unit_cost,
CAST(SUM(ordered * cost_price) AS DECIMAL(15,3)) as total_spend,
ROUND( ROUND(
AVG(NULLIF(delivery_days, 0)), 1 (SUM(ordered * cost_price)::numeric / NULLIF(SUM(ordered), 0)), 2
) as avg_unit_cost,
ROUND(SUM(ordered * cost_price)::numeric, 3) as total_spend,
ROUND(
AVG(NULLIF(delivery_days, 0))::numeric, 1
) as avg_delivery_days ) as avg_delivery_days
FROM delivery_metrics FROM delivery_metrics
GROUP BY vendor GROUP BY vendor
HAVING total_orders > 0 HAVING COUNT(DISTINCT po_id) > 0
ORDER BY total_spend DESC ORDER BY total_spend DESC
`); `);
@@ -251,7 +257,7 @@ router.get('/vendor-metrics', async (req, res) => {
fulfillment_rate: Number(vendor.fulfillment_rate) || 0, fulfillment_rate: Number(vendor.fulfillment_rate) || 0,
avg_unit_cost: Number(vendor.avg_unit_cost) || 0, avg_unit_cost: Number(vendor.avg_unit_cost) || 0,
total_spend: Number(vendor.total_spend) || 0, total_spend: Number(vendor.total_spend) || 0,
avg_delivery_days: vendor.avg_delivery_days === null ? null : Number(vendor.avg_delivery_days) avg_delivery_days: Number(vendor.avg_delivery_days) || 0
})); }));
res.json(parsedMetrics); res.json(parsedMetrics);

View File

@@ -6,7 +6,7 @@ router.get('/', async (req, res) => {
const pool = req.app.locals.pool; const pool = req.app.locals.pool;
try { try {
// Get all vendors with metrics // Get all vendors with metrics
const [vendors] = await pool.query(` const { rows: vendors } = await pool.query(`
SELECT DISTINCT SELECT DISTINCT
p.vendor as name, p.vendor as name,
COALESCE(vm.active_products, 0) as active_products, COALESCE(vm.active_products, 0) as active_products,
@@ -26,16 +26,16 @@ router.get('/', async (req, res) => {
// Get cost metrics for all vendors // Get cost metrics for all vendors
const vendorNames = vendors.map(v => v.name); const vendorNames = vendors.map(v => v.name);
const [costMetrics] = await pool.query(` const { rows: costMetrics } = await pool.query(`
SELECT SELECT
vendor, vendor,
CAST(ROUND(SUM(ordered * cost_price) / NULLIF(SUM(ordered), 0), 2) AS DECIMAL(15,3)) as avg_unit_cost, ROUND((SUM(ordered * cost_price)::numeric / NULLIF(SUM(ordered), 0)), 2) as avg_unit_cost,
CAST(SUM(ordered * cost_price) AS DECIMAL(15,3)) as total_spend ROUND(SUM(ordered * cost_price)::numeric, 3) as total_spend
FROM purchase_orders FROM purchase_orders
WHERE status = 'closed' WHERE status = 'closed'
AND cost_price IS NOT NULL AND cost_price IS NOT NULL
AND ordered > 0 AND ordered > 0
AND vendor IN (?) AND vendor = ANY($1)
GROUP BY vendor GROUP BY vendor
`, [vendorNames]); `, [vendorNames]);
@@ -49,26 +49,26 @@ router.get('/', async (req, res) => {
}, {}); }, {});
// Get overall stats // Get overall stats
const [stats] = await pool.query(` const { rows: [stats] } = await pool.query(`
SELECT SELECT
COUNT(DISTINCT p.vendor) as totalVendors, COUNT(DISTINCT p.vendor) as totalVendors,
COUNT(DISTINCT CASE COUNT(DISTINCT CASE
WHEN COALESCE(vm.total_orders, 0) > 0 AND COALESCE(vm.order_fill_rate, 0) >= 75 WHEN COALESCE(vm.total_orders, 0) > 0 AND COALESCE(vm.order_fill_rate, 0) >= 75
THEN p.vendor THEN p.vendor
END) as activeVendors, END) as activeVendors,
COALESCE(ROUND(AVG(NULLIF(vm.avg_lead_time_days, 0)), 1), 0) as avgLeadTime, COALESCE(ROUND(AVG(NULLIF(vm.avg_lead_time_days, 0))::numeric, 1), 0) as avgLeadTime,
COALESCE(ROUND(AVG(NULLIF(vm.order_fill_rate, 0)), 1), 0) as avgFillRate, COALESCE(ROUND(AVG(NULLIF(vm.order_fill_rate, 0))::numeric, 1), 0) as avgFillRate,
COALESCE(ROUND(AVG(NULLIF(vm.on_time_delivery_rate, 0)), 1), 0) as avgOnTimeDelivery COALESCE(ROUND(AVG(NULLIF(vm.on_time_delivery_rate, 0))::numeric, 1), 0) as avgOnTimeDelivery
FROM products p FROM products p
LEFT JOIN vendor_metrics vm ON p.vendor = vm.vendor LEFT JOIN vendor_metrics vm ON p.vendor = vm.vendor
WHERE p.vendor IS NOT NULL AND p.vendor != '' WHERE p.vendor IS NOT NULL AND p.vendor != ''
`); `);
// Get overall cost metrics // Get overall cost metrics
const [overallCostMetrics] = await pool.query(` const { rows: [overallCostMetrics] } = await pool.query(`
SELECT SELECT
CAST(ROUND(SUM(ordered * cost_price) / NULLIF(SUM(ordered), 0), 2) AS DECIMAL(15,3)) as avg_unit_cost, ROUND((SUM(ordered * cost_price)::numeric / NULLIF(SUM(ordered), 0)), 2) as avg_unit_cost,
CAST(SUM(ordered * cost_price) AS DECIMAL(15,3)) as total_spend ROUND(SUM(ordered * cost_price)::numeric, 3) as total_spend
FROM purchase_orders FROM purchase_orders
WHERE status = 'closed' WHERE status = 'closed'
AND cost_price IS NOT NULL AND cost_price IS NOT NULL
@@ -90,13 +90,13 @@ router.get('/', async (req, res) => {
total_spend: parseFloat(costMetricsMap[vendor.name]?.total_spend || 0) total_spend: parseFloat(costMetricsMap[vendor.name]?.total_spend || 0)
})), })),
stats: { stats: {
totalVendors: parseInt(stats[0].totalVendors), totalVendors: parseInt(stats.totalvendors),
activeVendors: parseInt(stats[0].activeVendors), activeVendors: parseInt(stats.activevendors),
avgLeadTime: parseFloat(stats[0].avgLeadTime), avgLeadTime: parseFloat(stats.avgleadtime),
avgFillRate: parseFloat(stats[0].avgFillRate), avgFillRate: parseFloat(stats.avgfillrate),
avgOnTimeDelivery: parseFloat(stats[0].avgOnTimeDelivery), avgOnTimeDelivery: parseFloat(stats.avgontimedelivery),
avgUnitCost: parseFloat(overallCostMetrics[0].avg_unit_cost), avgUnitCost: parseFloat(overallCostMetrics.avg_unit_cost),
totalSpend: parseFloat(overallCostMetrics[0].total_spend) totalSpend: parseFloat(overallCostMetrics.total_spend)
} }
}); });
} catch (error) { } catch (error) {

View File

@@ -3,7 +3,6 @@ const cors = require('cors');
const { spawn } = require('child_process'); const { spawn } = require('child_process');
const path = require('path'); const path = require('path');
const fs = require('fs'); const fs = require('fs');
const mysql = require('mysql2/promise');
const { corsMiddleware, corsErrorHandler } = require('./middleware/cors'); const { corsMiddleware, corsErrorHandler } = require('./middleware/cors');
const { initPool } = require('./utils/db'); const { initPool } = require('./utils/db');
const productsRouter = require('./routes/products'); const productsRouter = require('./routes/products');
@@ -16,11 +15,9 @@ const configRouter = require('./routes/config');
const metricsRouter = require('./routes/metrics'); const metricsRouter = require('./routes/metrics');
const vendorsRouter = require('./routes/vendors'); const vendorsRouter = require('./routes/vendors');
const categoriesRouter = require('./routes/categories'); const categoriesRouter = require('./routes/categories');
const testConnectionRouter = require('./routes/test-connection');
// Get the absolute path to the .env file // Get the absolute path to the .env file
const envPath = path.resolve(process.cwd(), '.env'); const envPath = '/var/www/html/inventory/.env';
console.log('Current working directory:', process.cwd());
console.log('Looking for .env file at:', envPath); console.log('Looking for .env file at:', envPath);
console.log('.env file exists:', fs.existsSync(envPath)); console.log('.env file exists:', fs.existsSync(envPath));
@@ -33,6 +30,9 @@ try {
DB_HOST: process.env.DB_HOST || 'not set', DB_HOST: process.env.DB_HOST || 'not set',
DB_USER: process.env.DB_USER || 'not set', DB_USER: process.env.DB_USER || 'not set',
DB_NAME: process.env.DB_NAME || 'not set', DB_NAME: process.env.DB_NAME || 'not set',
DB_PASSWORD: process.env.DB_PASSWORD ? '[password set]' : 'not set',
DB_PORT: process.env.DB_PORT || 'not set',
DB_SSL: process.env.DB_SSL || 'not set'
}); });
} catch (error) { } catch (error) {
console.error('Error loading .env file:', error); console.error('Error loading .env file:', error);
@@ -66,20 +66,27 @@ app.use(express.json());
app.use(express.urlencoded({ extended: true })); app.use(express.urlencoded({ extended: true }));
// Initialize database pool // Initialize database pool
const pool = initPool({ const poolPromise = initPool({
host: process.env.DB_HOST, host: process.env.DB_HOST,
user: process.env.DB_USER, user: process.env.DB_USER,
password: process.env.DB_PASSWORD, password: process.env.DB_PASSWORD,
database: process.env.DB_NAME, database: process.env.DB_NAME,
waitForConnections: true, port: process.env.DB_PORT || 5432,
connectionLimit: process.env.NODE_ENV === 'production' ? 20 : 10, max: process.env.NODE_ENV === 'production' ? 20 : 10,
queueLimit: 0, idleTimeoutMillis: 30000,
enableKeepAlive: true, connectionTimeoutMillis: 2000,
keepAliveInitialDelay: 0 ssl: process.env.DB_SSL === 'true' ? {
rejectUnauthorized: false
} : false
}); });
// Make pool available to routes // Make pool available to routes once initialized
app.locals.pool = pool; poolPromise.then(pool => {
app.locals.pool = pool;
}).catch(err => {
console.error('[Database] Failed to initialize pool:', err);
process.exit(1);
});
// Routes // Routes
app.use('/api/products', productsRouter); app.use('/api/products', productsRouter);
@@ -92,7 +99,6 @@ app.use('/api/config', configRouter);
app.use('/api/metrics', metricsRouter); app.use('/api/metrics', metricsRouter);
app.use('/api/vendors', vendorsRouter); app.use('/api/vendors', vendorsRouter);
app.use('/api/categories', categoriesRouter); app.use('/api/categories', categoriesRouter);
app.use('/api', testConnectionRouter);
// Basic health check route // Basic health check route
app.get('/health', (req, res) => { app.get('/health', (req, res) => {
@@ -128,17 +134,6 @@ process.on('unhandledRejection', (reason, promise) => {
console.error(`[${new Date().toISOString()}] Unhandled Rejection at:`, promise, 'reason:', reason); console.error(`[${new Date().toISOString()}] Unhandled Rejection at:`, promise, 'reason:', reason);
}); });
// Test database connection
pool.getConnection()
.then(connection => {
console.log('[Database] Connected successfully');
connection.release();
})
.catch(err => {
console.error('[Database] Error connecting:', err);
process.exit(1);
});
// Initialize client sets for SSE // Initialize client sets for SSE
const importClients = new Set(); const importClients = new Set();
const updateClients = new Set(); const updateClients = new Set();

View File

@@ -1,17 +1,70 @@
const mysql = require('mysql2/promise'); const { Pool, Client } = require('pg');
let pool; let pool;
function initPool(config) { function initPool(config) {
pool = mysql.createPool(config); // Log config without sensitive data
return pool; const safeConfig = {
host: config.host,
user: config.user,
database: config.database,
port: config.port,
max: config.max,
idleTimeoutMillis: config.idleTimeoutMillis,
connectionTimeoutMillis: config.connectionTimeoutMillis,
ssl: config.ssl,
password: config.password ? '[password set]' : '[no password]'
};
console.log('[Database] Initializing pool with config:', safeConfig);
// Try creating a client first to test the connection
const testClient = new Client({
host: config.host,
user: config.user,
password: config.password,
database: config.database,
port: config.port,
ssl: config.ssl
});
console.log('[Database] Testing connection with Client...');
return testClient.connect()
.then(() => {
console.log('[Database] Test connection with Client successful');
return testClient.end();
})
.then(() => {
// If client connection worked, create the pool
console.log('[Database] Creating pool...');
pool = new Pool({
host: config.host,
user: config.user,
password: config.password,
database: config.database,
port: config.port,
max: config.max,
idleTimeoutMillis: config.idleTimeoutMillis,
connectionTimeoutMillis: config.connectionTimeoutMillis,
ssl: config.ssl
});
return pool.connect();
})
.then(poolClient => {
console.log('[Database] Pool connection successful');
poolClient.release();
return pool;
})
.catch(err => {
console.error('[Database] Connection failed:', err);
throw err;
});
} }
async function getConnection() { async function getConnection() {
if (!pool) { if (!pool) {
throw new Error('Database pool not initialized'); throw new Error('Database pool not initialized');
} }
return pool.getConnection(); return pool.connect();
} }
module.exports = { module.exports = {

View File

@@ -27,7 +27,7 @@ import {
import { Loader2, X, RefreshCw, AlertTriangle, RefreshCcw, Hourglass } from "lucide-react"; import { Loader2, X, RefreshCw, AlertTriangle, RefreshCcw, Hourglass } from "lucide-react";
import config from "../../config"; import config from "../../config";
import { toast } from "sonner"; import { toast } from "sonner";
import { Table, TableBody, TableCell, TableRow, TableHeader, TableHead } from "@/components/ui/table"; import { Table, TableBody, TableCell, TableRow } from "@/components/ui/table";
interface ImportProgress { interface ImportProgress {
status: "running" | "error" | "complete" | "cancelled"; status: "running" | "error" | "complete" | "cancelled";
@@ -85,9 +85,7 @@ export function DataManagement() {
const [] = useState<ImportProgress | null>(null); const [] = useState<ImportProgress | null>(null);
const [eventSource, setEventSource] = useState<EventSource | null>(null); const [eventSource, setEventSource] = useState<EventSource | null>(null);
const [importHistory, setImportHistory] = useState<ImportHistoryRecord[]>([]); const [importHistory, setImportHistory] = useState<ImportHistoryRecord[]>([]);
const [calculateHistory, setCalculateHistory] = useState< const [calculateHistory, setCalculateHistory] = useState<CalculateHistoryRecord[]>([]);
CalculateHistoryRecord[]
>([]);
const [moduleStatus, setModuleStatus] = useState<ModuleStatus[]>([]); const [moduleStatus, setModuleStatus] = useState<ModuleStatus[]>([]);
const [tableStatus, setTableStatus] = useState<TableStatus[]>([]); const [tableStatus, setTableStatus] = useState<TableStatus[]>([]);
const [scriptOutput, setScriptOutput] = useState<string[]>([]); const [scriptOutput, setScriptOutput] = useState<string[]>([]);
@@ -368,6 +366,10 @@ export function DataManagement() {
fetch(`${config.apiUrl}/csv/status/tables`), fetch(`${config.apiUrl}/csv/status/tables`),
]); ]);
if (!importRes.ok || !calcRes.ok || !moduleRes.ok || !tableRes.ok) {
throw new Error('One or more requests failed');
}
const [importData, calcData, moduleData, tableData] = await Promise.all([ const [importData, calcData, moduleData, tableData] = await Promise.all([
importRes.json(), importRes.json(),
calcRes.json(), calcRes.json(),
@@ -375,52 +377,66 @@ export function DataManagement() {
tableRes.json(), tableRes.json(),
]); ]);
setImportHistory(importData); // Ensure we're setting arrays even if the response is empty or invalid
setCalculateHistory(calcData); setImportHistory(Array.isArray(importData) ? importData : []);
setModuleStatus(moduleData); setCalculateHistory(Array.isArray(calcData) ? calcData : []);
setTableStatus(tableData); setModuleStatus(Array.isArray(moduleData) ? moduleData : []);
setTableStatus(Array.isArray(tableData) ? tableData : []);
} catch (error) { } catch (error) {
console.error("Error fetching history:", error); console.error("Error fetching history:", error);
// Set empty arrays as fallback
setImportHistory([]);
setCalculateHistory([]);
setModuleStatus([]);
setTableStatus([]);
} }
}; };
const refreshTableStatus = async () => { const refreshTableStatus = async () => {
try { try {
const response = await fetch(`${config.apiUrl}/csv/status/tables`); const response = await fetch(`${config.apiUrl}/csv/status/tables`);
if (!response.ok) throw new Error('Failed to fetch table status');
const data = await response.json(); const data = await response.json();
setTableStatus(data); setTableStatus(Array.isArray(data) ? data : []);
} catch (error) { } catch (error) {
toast.error("Failed to refresh table status"); toast.error("Failed to refresh table status");
setTableStatus([]);
} }
}; };
const refreshModuleStatus = async () => { const refreshModuleStatus = async () => {
try { try {
const response = await fetch(`${config.apiUrl}/csv/status/modules`); const response = await fetch(`${config.apiUrl}/csv/status/modules`);
if (!response.ok) throw new Error('Failed to fetch module status');
const data = await response.json(); const data = await response.json();
setModuleStatus(data); setModuleStatus(Array.isArray(data) ? data : []);
} catch (error) { } catch (error) {
toast.error("Failed to refresh module status"); toast.error("Failed to refresh module status");
setModuleStatus([]);
} }
}; };
const refreshImportHistory = async () => { const refreshImportHistory = async () => {
try { try {
const response = await fetch(`${config.apiUrl}/csv/history/import`); const response = await fetch(`${config.apiUrl}/csv/history/import`);
if (!response.ok) throw new Error('Failed to fetch import history');
const data = await response.json(); const data = await response.json();
setImportHistory(data); setImportHistory(Array.isArray(data) ? data : []);
} catch (error) { } catch (error) {
toast.error("Failed to refresh import history"); toast.error("Failed to refresh import history");
setImportHistory([]);
} }
}; };
const refreshCalculateHistory = async () => { const refreshCalculateHistory = async () => {
try { try {
const response = await fetch(`${config.apiUrl}/csv/history/calculate`); const response = await fetch(`${config.apiUrl}/csv/history/calculate`);
if (!response.ok) throw new Error('Failed to fetch calculate history');
const data = await response.json(); const data = await response.json();
setCalculateHistory(data); setCalculateHistory(Array.isArray(data) ? data : []);
} catch (error) { } catch (error) {
toast.error("Failed to refresh calculate history"); toast.error("Failed to refresh calculate history");
setCalculateHistory([]);
} }
}; };

View File

@@ -22,7 +22,7 @@ export function Login() {
setIsLoading(true); setIsLoading(true);
try { try {
const url = isDev ? "/auth-inv/login" : `${config.authUrl}/login`; const url = `${config.authUrl}/login`;
console.log("Making login request:", { console.log("Making login request:", {
url, url,
method: "POST", method: "POST",

View File

@@ -22,7 +22,6 @@ import {
import { motion } from 'motion/react'; import { motion } from 'motion/react';
import { import {
PurchaseOrderStatus, PurchaseOrderStatus,
ReceivingStatus as ReceivingStatusCode,
getPurchaseOrderStatusLabel, getPurchaseOrderStatusLabel,
getReceivingStatusLabel, getReceivingStatusLabel,
getPurchaseOrderStatusVariant, getPurchaseOrderStatusVariant,
@@ -113,7 +112,7 @@ export default function PurchaseOrders() {
statuses: string[]; statuses: string[];
}>({ }>({
vendors: [], vendors: [],
statuses: [], statuses: []
}); });
const [pagination, setPagination] = useState({ const [pagination, setPagination] = useState({
total: 0, total: 0,
@@ -154,15 +153,57 @@ export default function PurchaseOrders() {
fetch('/api/purchase-orders/cost-analysis') fetch('/api/purchase-orders/cost-analysis')
]); ]);
const [ // Initialize default data
purchaseOrdersData, let purchaseOrdersData: PurchaseOrdersResponse = {
vendorMetricsData, orders: [],
costAnalysisData summary: {
] = await Promise.all([ order_count: 0,
purchaseOrdersRes.json() as Promise<PurchaseOrdersResponse>, total_ordered: 0,
vendorMetricsRes.json(), total_received: 0,
costAnalysisRes.json() fulfillment_rate: 0,
]); total_value: 0,
avg_cost: 0
},
pagination: {
total: 0,
pages: 0,
page: 1,
limit: 100
},
filters: {
vendors: [],
statuses: []
}
};
let vendorMetricsData: VendorMetrics[] = [];
let costAnalysisData: CostAnalysis = {
unique_products: 0,
avg_cost: 0,
min_cost: 0,
max_cost: 0,
cost_variance: 0,
total_spend_by_category: []
};
// Only try to parse responses if they were successful
if (purchaseOrdersRes.ok) {
purchaseOrdersData = await purchaseOrdersRes.json();
} else {
console.error('Failed to fetch purchase orders:', await purchaseOrdersRes.text());
}
if (vendorMetricsRes.ok) {
vendorMetricsData = await vendorMetricsRes.json();
} else {
console.error('Failed to fetch vendor metrics:', await vendorMetricsRes.text());
}
if (costAnalysisRes.ok) {
costAnalysisData = await costAnalysisRes.json();
} else {
console.error('Failed to fetch cost analysis:', await costAnalysisRes.text());
}
setPurchaseOrders(purchaseOrdersData.orders); setPurchaseOrders(purchaseOrdersData.orders);
setPagination(purchaseOrdersData.pagination); setPagination(purchaseOrdersData.pagination);
@@ -172,6 +213,27 @@ export default function PurchaseOrders() {
setCostAnalysis(costAnalysisData); setCostAnalysis(costAnalysisData);
} catch (error) { } catch (error) {
console.error('Error fetching data:', error); console.error('Error fetching data:', error);
// Set default values in case of error
setPurchaseOrders([]);
setPagination({ total: 0, pages: 0, page: 1, limit: 100 });
setFilterOptions({ vendors: [], statuses: [] });
setSummary({
order_count: 0,
total_ordered: 0,
total_received: 0,
fulfillment_rate: 0,
total_value: 0,
avg_cost: 0
});
setVendorMetrics([]);
setCostAnalysis({
unique_products: 0,
avg_cost: 0,
min_cost: 0,
max_cost: 0,
cost_variance: 0,
total_spend_by_category: []
});
} finally { } finally {
setLoading(false); setLoading(false);
} }
@@ -311,7 +373,7 @@ export default function PurchaseOrders() {
</SelectTrigger> </SelectTrigger>
<SelectContent> <SelectContent>
<SelectItem value="all">All Vendors</SelectItem> <SelectItem value="all">All Vendors</SelectItem>
{filterOptions.vendors.map(vendor => ( {filterOptions?.vendors?.map(vendor => (
<SelectItem key={vendor} value={vendor}> <SelectItem key={vendor} value={vendor}>
{vendor} {vendor}
</SelectItem> </SelectItem>

View File

@@ -3,10 +3,10 @@ export interface Product {
title: string; title: string;
SKU: string; SKU: string;
stock_quantity: number; stock_quantity: number;
price: string; // DECIMAL(15,3) price: string; // numeric(15,3)
regular_price: string; // DECIMAL(15,3) regular_price: string; // numeric(15,3)
cost_price: string; // DECIMAL(15,3) cost_price: string; // numeric(15,3)
landing_cost_price: string | null; // DECIMAL(15,3) landing_cost_price: string | null; // numeric(15,3)
barcode: string; barcode: string;
vendor: string; vendor: string;
vendor_reference: string; vendor_reference: string;
@@ -24,32 +24,32 @@ export interface Product {
updated_at: string; updated_at: string;
// Metrics // Metrics
daily_sales_avg?: string; // DECIMAL(15,3) daily_sales_avg?: string; // numeric(15,3)
weekly_sales_avg?: string; // DECIMAL(15,3) weekly_sales_avg?: string; // numeric(15,3)
monthly_sales_avg?: string; // DECIMAL(15,3) monthly_sales_avg?: string; // numeric(15,3)
avg_quantity_per_order?: string; // DECIMAL(15,3) avg_quantity_per_order?: string; // numeric(15,3)
number_of_orders?: number; number_of_orders?: number;
first_sale_date?: string; first_sale_date?: string;
last_sale_date?: string; last_sale_date?: string;
last_purchase_date?: string; last_purchase_date?: string;
days_of_inventory?: string; // DECIMAL(15,3) days_of_inventory?: string; // numeric(15,3)
weeks_of_inventory?: string; // DECIMAL(15,3) weeks_of_inventory?: string; // numeric(15,3)
reorder_point?: string; // DECIMAL(15,3) reorder_point?: string; // numeric(15,3)
safety_stock?: string; // DECIMAL(15,3) safety_stock?: string; // numeric(15,3)
avg_margin_percent?: string; // DECIMAL(15,3) avg_margin_percent?: string; // numeric(15,3)
total_revenue?: string; // DECIMAL(15,3) total_revenue?: string; // numeric(15,3)
inventory_value?: string; // DECIMAL(15,3) inventory_value?: string; // numeric(15,3)
cost_of_goods_sold?: string; // DECIMAL(15,3) cost_of_goods_sold?: string; // numeric(15,3)
gross_profit?: string; // DECIMAL(15,3) gross_profit?: string; // numeric(15,3)
gmroi?: string; // DECIMAL(15,3) gmroi?: string; // numeric(15,3)
avg_lead_time_days?: string; // DECIMAL(15,3) avg_lead_time_days?: string; // numeric(15,3)
last_received_date?: string; last_received_date?: string;
abc_class?: string; abc_class?: string;
stock_status?: string; stock_status?: string;
turnover_rate?: string; // DECIMAL(15,3) turnover_rate?: string; // numeric(15,3)
current_lead_time?: string; // DECIMAL(15,3) current_lead_time?: string; // numeric(15,3)
target_lead_time?: string; // DECIMAL(15,3) target_lead_time?: string; // numeric(15,3)
lead_time_status?: string; lead_time_status?: string;
reorder_qty?: number; reorder_qty?: number;
overstocked_amt?: string; // DECIMAL(15,3) overstocked_amt?: string; // numeric(15,3)
} }

View File

@@ -1 +1 @@
{"root":["./src/app.tsx","./src/config.ts","./src/main.tsx","./src/vite-env.d.ts","./src/components/analytics/categoryperformance.tsx","./src/components/analytics/priceanalysis.tsx","./src/components/analytics/profitanalysis.tsx","./src/components/analytics/stockanalysis.tsx","./src/components/analytics/vendorperformance.tsx","./src/components/auth/requireauth.tsx","./src/components/dashboard/bestsellers.tsx","./src/components/dashboard/forecastmetrics.tsx","./src/components/dashboard/inventoryhealthsummary.tsx","./src/components/dashboard/inventorystats.tsx","./src/components/dashboard/keymetricscharts.tsx","./src/components/dashboard/lowstockalerts.tsx","./src/components/dashboard/overstockmetrics.tsx","./src/components/dashboard/overview.tsx","./src/components/dashboard/purchasemetrics.tsx","./src/components/dashboard/recentsales.tsx","./src/components/dashboard/replenishmentmetrics.tsx","./src/components/dashboard/salesbycategory.tsx","./src/components/dashboard/salesmetrics.tsx","./src/components/dashboard/stockmetrics.tsx","./src/components/dashboard/topoverstockedproducts.tsx","./src/components/dashboard/topreplenishproducts.tsx","./src/components/dashboard/trendingproducts.tsx","./src/components/dashboard/vendorperformance.tsx","./src/components/forecasting/columns.tsx","./src/components/layout/appsidebar.tsx","./src/components/layout/mainlayout.tsx","./src/components/products/productdetail.tsx","./src/components/products/productfilters.tsx","./src/components/products/producttable.tsx","./src/components/products/producttableskeleton.tsx","./src/components/products/productviews.tsx","./src/components/settings/calculationsettings.tsx","./src/components/settings/configuration.tsx","./src/components/settings/datamanagement.tsx","./src/components/settings/performancemetrics.tsx","./src/components/settings/stockmanagement.tsx","./src/components/ui/accordion.tsx","./src/components/ui/alert-dialog.tsx","./src/components/ui/alert.tsx","./src/components/ui/avatar.tsx","./src/components/ui/badge.tsx","./src/components/ui/button.tsx","./src/components/ui/calendar.tsx","./src/components/ui/card.tsx","./src/components/ui/command.tsx","./src/components/ui/date-range-picker-narrow.tsx","./src/components/ui/date-range-picker.tsx","./src/components/ui/dialog.tsx","./src/components/ui/drawer.tsx","./src/components/ui/dropdown-menu.tsx","./src/components/ui/input.tsx","./src/components/ui/label.tsx","./src/components/ui/pagination.tsx","./src/components/ui/popover.tsx","./src/components/ui/progress.tsx","./src/components/ui/scroll-area.tsx","./src/components/ui/select.tsx","./src/components/ui/separator.tsx","./src/components/ui/sheet.tsx","./src/components/ui/sidebar.tsx","./src/components/ui/skeleton.tsx","./src/components/ui/sonner.tsx","./src/components/ui/switch.tsx","./src/components/ui/table.tsx","./src/components/ui/tabs.tsx","./src/components/ui/toggle-group.tsx","./src/components/ui/toggle.tsx","./src/components/ui/tooltip.tsx","./src/hooks/use-mobile.tsx","./src/lib/utils.ts","./src/pages/analytics.tsx","./src/pages/categories.tsx","./src/pages/dashboard.tsx","./src/pages/forecasting.tsx","./src/pages/login.tsx","./src/pages/orders.tsx","./src/pages/products.tsx","./src/pages/purchaseorders.tsx","./src/pages/settings.tsx","./src/pages/vendors.tsx","./src/routes/forecasting.tsx","./src/types/products.ts"],"version":"5.6.3"} {"root":["./src/app.tsx","./src/config.ts","./src/main.tsx","./src/vite-env.d.ts","./src/components/analytics/categoryperformance.tsx","./src/components/analytics/priceanalysis.tsx","./src/components/analytics/profitanalysis.tsx","./src/components/analytics/stockanalysis.tsx","./src/components/analytics/vendorperformance.tsx","./src/components/auth/requireauth.tsx","./src/components/dashboard/bestsellers.tsx","./src/components/dashboard/forecastmetrics.tsx","./src/components/dashboard/inventoryhealthsummary.tsx","./src/components/dashboard/inventorystats.tsx","./src/components/dashboard/keymetricscharts.tsx","./src/components/dashboard/lowstockalerts.tsx","./src/components/dashboard/overstockmetrics.tsx","./src/components/dashboard/overview.tsx","./src/components/dashboard/purchasemetrics.tsx","./src/components/dashboard/recentsales.tsx","./src/components/dashboard/replenishmentmetrics.tsx","./src/components/dashboard/salesbycategory.tsx","./src/components/dashboard/salesmetrics.tsx","./src/components/dashboard/stockmetrics.tsx","./src/components/dashboard/topoverstockedproducts.tsx","./src/components/dashboard/topreplenishproducts.tsx","./src/components/dashboard/trendingproducts.tsx","./src/components/dashboard/vendorperformance.tsx","./src/components/forecasting/columns.tsx","./src/components/layout/appsidebar.tsx","./src/components/layout/mainlayout.tsx","./src/components/products/productdetail.tsx","./src/components/products/productfilters.tsx","./src/components/products/producttable.tsx","./src/components/products/producttableskeleton.tsx","./src/components/products/productviews.tsx","./src/components/settings/calculationsettings.tsx","./src/components/settings/configuration.tsx","./src/components/settings/datamanagement.tsx","./src/components/settings/performancemetrics.tsx","./src/components/settings/stockmanagement.tsx","./src/components/ui/accordion.tsx","./src/components/ui/alert-dialog.tsx","./src/components/ui/alert.tsx","./src/components/ui/avatar.tsx","./src/components/ui/badge.tsx","./src/components/ui/button.tsx","./src/components/ui/calendar.tsx","./src/components/ui/card.tsx","./src/components/ui/command.tsx","./src/components/ui/date-range-picker-narrow.tsx","./src/components/ui/date-range-picker.tsx","./src/components/ui/dialog.tsx","./src/components/ui/drawer.tsx","./src/components/ui/dropdown-menu.tsx","./src/components/ui/input.tsx","./src/components/ui/label.tsx","./src/components/ui/pagination.tsx","./src/components/ui/popover.tsx","./src/components/ui/progress.tsx","./src/components/ui/scroll-area.tsx","./src/components/ui/select.tsx","./src/components/ui/separator.tsx","./src/components/ui/sheet.tsx","./src/components/ui/sidebar.tsx","./src/components/ui/skeleton.tsx","./src/components/ui/sonner.tsx","./src/components/ui/switch.tsx","./src/components/ui/table.tsx","./src/components/ui/tabs.tsx","./src/components/ui/toggle-group.tsx","./src/components/ui/toggle.tsx","./src/components/ui/tooltip.tsx","./src/hooks/use-mobile.tsx","./src/lib/utils.ts","./src/pages/analytics.tsx","./src/pages/categories.tsx","./src/pages/dashboard.tsx","./src/pages/forecasting.tsx","./src/pages/login.tsx","./src/pages/orders.tsx","./src/pages/products.tsx","./src/pages/purchaseorders.tsx","./src/pages/settings.tsx","./src/pages/vendors.tsx","./src/types/products.ts","./src/types/status-codes.ts"],"version":"5.6.3"}

View File

@@ -1,7 +1,5 @@
#!/bin/zsh #!/bin/zsh
#Clear previous mount in case it's still there
#Clear previous mount in case its still there umount '/Users/matt/Library/Mobile Documents/com~apple~CloudDocs/Dev/inventory/inventory-server'
umount /Users/matt/Library/Mobile Documents/com~apple~CloudDocs/Dev/inventory/inventory-server
#Mount #Mount
sshfs matt@dashboard.kent.pw:/var/www/html/inventory -p 22122 /Users/matt/Library/Mobile Documents/com~apple~CloudDocs/Dev/inventory/inventory-server/ sshfs matt@dashboard.kent.pw:/var/www/html/inventory -p 22122 '/Users/matt/Library/Mobile Documents/com~apple~CloudDocs/Dev/inventory/inventory-server/'