Put back files
This commit is contained in:
599
inventory-server/scripts/reset-db.js
Normal file
599
inventory-server/scripts/reset-db.js
Normal file
@@ -0,0 +1,599 @@
|
||||
const { Client } = require('pg');
|
||||
const path = require('path');
|
||||
const dotenv = require('dotenv');
|
||||
const fs = require('fs');
|
||||
|
||||
dotenv.config({ path: path.join(__dirname, '../.env') });
|
||||
|
||||
const dbConfig = {
|
||||
host: process.env.DB_HOST,
|
||||
user: process.env.DB_USER,
|
||||
password: process.env.DB_PASSWORD,
|
||||
database: process.env.DB_NAME,
|
||||
port: process.env.DB_PORT || 5432
|
||||
};
|
||||
|
||||
// Tables to always protect from being dropped
|
||||
const PROTECTED_TABLES = [
|
||||
'users',
|
||||
'permissions',
|
||||
'user_permissions',
|
||||
'calculate_history',
|
||||
'import_history',
|
||||
'ai_prompts',
|
||||
'ai_validation_performance',
|
||||
'templates',
|
||||
'reusable_images',
|
||||
'imported_daily_inventory',
|
||||
'imported_product_stat_history',
|
||||
'imported_product_current_prices'
|
||||
];
|
||||
|
||||
// Helper function to output progress in JSON format
|
||||
function outputProgress(data) {
|
||||
if (!data.status) {
|
||||
data = {
|
||||
status: 'running',
|
||||
...data
|
||||
};
|
||||
}
|
||||
console.log(JSON.stringify(data));
|
||||
}
|
||||
|
||||
// Core tables that must be created
|
||||
const CORE_TABLES = [
|
||||
'products',
|
||||
'orders',
|
||||
'purchase_orders',
|
||||
'categories',
|
||||
'product_categories'
|
||||
];
|
||||
|
||||
// Split SQL into individual statements
|
||||
function splitSQLStatements(sql) {
|
||||
// First, normalize line endings
|
||||
sql = sql.replace(/\r\n/g, '\n');
|
||||
|
||||
// Track statement boundaries
|
||||
let statements = [];
|
||||
let currentStatement = '';
|
||||
let inString = false;
|
||||
let stringChar = '';
|
||||
let inDollarQuote = false;
|
||||
let dollarQuoteTag = '';
|
||||
|
||||
// Process character by character
|
||||
for (let i = 0; i < sql.length; i++) {
|
||||
const char = sql[i];
|
||||
const nextChar = sql[i + 1] || '';
|
||||
|
||||
// Handle dollar quotes
|
||||
if (char === '$' && !inString) {
|
||||
// Look ahead to find the dollar quote tag
|
||||
let tag = '$';
|
||||
let j = i + 1;
|
||||
while (j < sql.length && sql[j] !== '$') {
|
||||
tag += sql[j];
|
||||
j++;
|
||||
}
|
||||
tag += '$';
|
||||
|
||||
if (j < sql.length) { // Found closing $
|
||||
if (!inDollarQuote) {
|
||||
inDollarQuote = true;
|
||||
dollarQuoteTag = tag;
|
||||
currentStatement += tag;
|
||||
i = j;
|
||||
continue;
|
||||
} else if (sql.substring(i, j + 1) === dollarQuoteTag) {
|
||||
inDollarQuote = false;
|
||||
dollarQuoteTag = '';
|
||||
currentStatement += tag;
|
||||
i = j;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle string literals (only if not in dollar quote)
|
||||
if (!inDollarQuote && (char === "'" || char === '"') && sql[i - 1] !== '\\') {
|
||||
if (!inString) {
|
||||
inString = true;
|
||||
stringChar = char;
|
||||
} else if (char === stringChar) {
|
||||
inString = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Handle comments (only if not in string or dollar quote)
|
||||
if (!inString && !inDollarQuote) {
|
||||
if (char === '-' && nextChar === '-') {
|
||||
// Skip to end of line
|
||||
while (i < sql.length && sql[i] !== '\n') i++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (char === '/' && nextChar === '*') {
|
||||
// Skip until closing */
|
||||
i += 2;
|
||||
while (i < sql.length && (sql[i] !== '*' || sql[i + 1] !== '/')) i++;
|
||||
i++; // Skip the closing /
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Handle statement boundaries (only if not in string or dollar quote)
|
||||
if (!inString && !inDollarQuote && char === ';') {
|
||||
if (currentStatement.trim()) {
|
||||
statements.push(currentStatement.trim());
|
||||
}
|
||||
currentStatement = '';
|
||||
} else {
|
||||
currentStatement += char;
|
||||
}
|
||||
}
|
||||
|
||||
// Add the last statement if it exists
|
||||
if (currentStatement.trim()) {
|
||||
statements.push(currentStatement.trim());
|
||||
}
|
||||
|
||||
return statements;
|
||||
}
|
||||
|
||||
async function resetDatabase() {
|
||||
outputProgress({
|
||||
operation: 'Starting database reset',
|
||||
message: 'Connecting to database...'
|
||||
});
|
||||
|
||||
// Debug: Log current directory and file paths
|
||||
outputProgress({
|
||||
operation: 'Debug paths',
|
||||
message: {
|
||||
currentDir: process.cwd(),
|
||||
__dirname: __dirname,
|
||||
schemaPath: path.join(__dirname, '../db/schema.sql')
|
||||
}
|
||||
});
|
||||
|
||||
const client = new Client(dbConfig);
|
||||
await client.connect();
|
||||
|
||||
try {
|
||||
// Check PostgreSQL version and user
|
||||
outputProgress({
|
||||
operation: 'Checking database',
|
||||
message: 'Verifying PostgreSQL version and user privileges...'
|
||||
});
|
||||
|
||||
const versionResult = await client.query('SELECT version()');
|
||||
const userResult = await client.query('SELECT current_user, current_database()');
|
||||
|
||||
outputProgress({
|
||||
operation: 'Database info',
|
||||
message: {
|
||||
version: versionResult.rows[0].version,
|
||||
user: userResult.rows[0].current_user,
|
||||
database: userResult.rows[0].current_database
|
||||
}
|
||||
});
|
||||
|
||||
// Get list of all tables in the current database
|
||||
outputProgress({
|
||||
operation: 'Getting table list',
|
||||
message: 'Retrieving all table names...'
|
||||
});
|
||||
|
||||
const tablesResult = await client.query(`
|
||||
SELECT string_agg(tablename, ', ') as tables
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename NOT IN (SELECT unnest($1::text[]));
|
||||
`, [PROTECTED_TABLES]);
|
||||
|
||||
if (!tablesResult.rows[0].tables) {
|
||||
outputProgress({
|
||||
operation: 'No tables found',
|
||||
message: 'Database is already empty'
|
||||
});
|
||||
} else {
|
||||
outputProgress({
|
||||
operation: 'Dropping tables',
|
||||
message: 'Dropping all existing tables...'
|
||||
});
|
||||
|
||||
// Disable triggers/foreign key checks
|
||||
await client.query('SET session_replication_role = \'replica\';');
|
||||
|
||||
// Drop all tables except users
|
||||
const tables = tablesResult.rows[0].tables.split(', ');
|
||||
for (const table of tables) {
|
||||
if (!PROTECTED_TABLES.includes(table)) {
|
||||
await client.query(`DROP TABLE IF EXISTS "${table}" CASCADE`);
|
||||
}
|
||||
}
|
||||
|
||||
// Only drop types if we're not preserving history tables
|
||||
const historyTablesExist = await client.query(`
|
||||
SELECT EXISTS (
|
||||
SELECT FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename IN ('calculate_history', 'import_history')
|
||||
);
|
||||
`);
|
||||
|
||||
if (!historyTablesExist.rows[0].exists) {
|
||||
await client.query('DROP TYPE IF EXISTS calculation_status CASCADE;');
|
||||
await client.query('DROP TYPE IF EXISTS module_name CASCADE;');
|
||||
}
|
||||
|
||||
// Re-enable triggers/foreign key checks
|
||||
await client.query('SET session_replication_role = \'origin\';');
|
||||
}
|
||||
|
||||
// Create enum types if they don't exist
|
||||
outputProgress({
|
||||
operation: 'Creating enum types',
|
||||
message: 'Setting up required enum types...'
|
||||
});
|
||||
|
||||
// Check if types exist before creating
|
||||
const typesExist = await client.query(`
|
||||
SELECT EXISTS (
|
||||
SELECT 1 FROM pg_type
|
||||
WHERE typname = 'calculation_status'
|
||||
) as calc_status_exists,
|
||||
EXISTS (
|
||||
SELECT 1 FROM pg_type
|
||||
WHERE typname = 'module_name'
|
||||
) as module_name_exists;
|
||||
`);
|
||||
|
||||
if (!typesExist.rows[0].calc_status_exists) {
|
||||
await client.query(`CREATE TYPE calculation_status AS ENUM ('running', 'completed', 'failed', 'cancelled')`);
|
||||
}
|
||||
|
||||
if (!typesExist.rows[0].module_name_exists) {
|
||||
await client.query(`
|
||||
CREATE TYPE module_name AS ENUM (
|
||||
'product_metrics',
|
||||
'time_aggregates',
|
||||
'financial_metrics',
|
||||
'vendor_metrics',
|
||||
'category_metrics',
|
||||
'brand_metrics',
|
||||
'sales_forecasts',
|
||||
'abc_classification',
|
||||
'daily_snapshots',
|
||||
'periodic_metrics'
|
||||
)
|
||||
`);
|
||||
}
|
||||
|
||||
// Read and execute main schema first (core tables)
|
||||
outputProgress({
|
||||
operation: 'Running database setup',
|
||||
message: 'Creating core tables...'
|
||||
});
|
||||
const schemaPath = path.join(__dirname, '../db/schema.sql');
|
||||
|
||||
// Verify file exists
|
||||
if (!fs.existsSync(schemaPath)) {
|
||||
throw new Error(`Schema file not found at: ${schemaPath}`);
|
||||
}
|
||||
|
||||
const schemaSQL = fs.readFileSync(schemaPath, 'utf8');
|
||||
|
||||
outputProgress({
|
||||
operation: 'Schema file',
|
||||
message: {
|
||||
path: schemaPath,
|
||||
exists: fs.existsSync(schemaPath),
|
||||
size: fs.statSync(schemaPath).size,
|
||||
firstFewLines: schemaSQL.split('\n').slice(0, 5).join('\n')
|
||||
}
|
||||
});
|
||||
|
||||
// Execute schema statements one at a time
|
||||
const statements = splitSQLStatements(schemaSQL);
|
||||
outputProgress({
|
||||
operation: 'SQL Execution',
|
||||
message: {
|
||||
totalStatements: statements.length,
|
||||
statements: statements.map((stmt, i) => ({
|
||||
number: i + 1,
|
||||
preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : '')
|
||||
}))
|
||||
}
|
||||
});
|
||||
|
||||
// Start a transaction for better error handling
|
||||
await client.query('BEGIN');
|
||||
try {
|
||||
for (let i = 0; i < statements.length; i++) {
|
||||
const stmt = statements[i];
|
||||
try {
|
||||
const result = await client.query(stmt);
|
||||
|
||||
// Verify if table was created (if this was a CREATE TABLE statement)
|
||||
if (stmt.trim().toLowerCase().startsWith('create table')) {
|
||||
const tableName = stmt.match(/create\s+table\s+(?:if\s+not\s+exists\s+)?["]?(\w+)["]?/i)?.[1];
|
||||
if (tableName) {
|
||||
const tableExists = await client.query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM information_schema.tables
|
||||
WHERE table_schema = 'public'
|
||||
AND table_name = $1
|
||||
`, [tableName]);
|
||||
|
||||
outputProgress({
|
||||
operation: 'Table Creation Verification',
|
||||
message: {
|
||||
table: tableName,
|
||||
exists: tableExists.rows[0].count > 0
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
outputProgress({
|
||||
operation: 'SQL Progress',
|
||||
message: {
|
||||
statement: i + 1,
|
||||
total: statements.length,
|
||||
preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : ''),
|
||||
rowCount: result.rowCount
|
||||
}
|
||||
});
|
||||
|
||||
// Commit in chunks of 10 statements to avoid long-running transactions
|
||||
if (i > 0 && i % 10 === 0) {
|
||||
await client.query('COMMIT');
|
||||
await client.query('BEGIN');
|
||||
}
|
||||
} catch (sqlError) {
|
||||
await client.query('ROLLBACK');
|
||||
outputProgress({
|
||||
status: 'error',
|
||||
operation: 'SQL Error',
|
||||
error: sqlError.message,
|
||||
statement: stmt,
|
||||
statementNumber: i + 1
|
||||
});
|
||||
throw sqlError;
|
||||
}
|
||||
}
|
||||
// Commit the final transaction
|
||||
await client.query('COMMIT');
|
||||
} catch (error) {
|
||||
await client.query('ROLLBACK');
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Verify core tables were created
|
||||
const existingTables = (await client.query(`
|
||||
SELECT table_name
|
||||
FROM information_schema.tables
|
||||
WHERE table_schema = 'public'
|
||||
`)).rows.map(t => t.table_name);
|
||||
|
||||
outputProgress({
|
||||
operation: 'Core tables verification',
|
||||
message: {
|
||||
found: existingTables,
|
||||
expected: CORE_TABLES
|
||||
}
|
||||
});
|
||||
|
||||
const missingCoreTables = CORE_TABLES.filter(
|
||||
t => !existingTables.includes(t)
|
||||
);
|
||||
|
||||
if (missingCoreTables.length > 0) {
|
||||
throw new Error(
|
||||
`Failed to create core tables: ${missingCoreTables.join(', ')}`
|
||||
);
|
||||
}
|
||||
|
||||
outputProgress({
|
||||
operation: 'Core tables created',
|
||||
message: `Successfully created tables: ${CORE_TABLES.join(', ')}`
|
||||
});
|
||||
|
||||
// Now read and execute config schema (since core tables exist)
|
||||
outputProgress({
|
||||
operation: 'Running config setup',
|
||||
message: 'Creating configuration tables...'
|
||||
});
|
||||
const configSchemaPath = path.join(__dirname, '../db/config-schema-new.sql');
|
||||
|
||||
// Verify file exists
|
||||
if (!fs.existsSync(configSchemaPath)) {
|
||||
throw new Error(`Config schema file not found at: ${configSchemaPath}`);
|
||||
}
|
||||
|
||||
const configSchemaSQL = fs.readFileSync(configSchemaPath, 'utf8');
|
||||
|
||||
outputProgress({
|
||||
operation: 'Config Schema file',
|
||||
message: {
|
||||
path: configSchemaPath,
|
||||
exists: fs.existsSync(configSchemaPath),
|
||||
size: fs.statSync(configSchemaPath).size,
|
||||
firstFewLines: configSchemaSQL.split('\n').slice(0, 5).join('\n')
|
||||
}
|
||||
});
|
||||
|
||||
// Execute config schema statements one at a time
|
||||
const configStatements = splitSQLStatements(configSchemaSQL);
|
||||
outputProgress({
|
||||
operation: 'Config SQL Execution',
|
||||
message: {
|
||||
totalStatements: configStatements.length,
|
||||
statements: configStatements.map((stmt, i) => ({
|
||||
number: i + 1,
|
||||
preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : '')
|
||||
}))
|
||||
}
|
||||
});
|
||||
|
||||
// Start a transaction for better error handling
|
||||
await client.query('BEGIN');
|
||||
try {
|
||||
for (let i = 0; i < configStatements.length; i++) {
|
||||
const stmt = configStatements[i];
|
||||
try {
|
||||
const result = await client.query(stmt);
|
||||
|
||||
outputProgress({
|
||||
operation: 'Config SQL Progress',
|
||||
message: {
|
||||
statement: i + 1,
|
||||
total: configStatements.length,
|
||||
preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : ''),
|
||||
rowCount: result.rowCount
|
||||
}
|
||||
});
|
||||
|
||||
// Commit in chunks of 10 statements to avoid long-running transactions
|
||||
if (i > 0 && i % 10 === 0) {
|
||||
await client.query('COMMIT');
|
||||
await client.query('BEGIN');
|
||||
}
|
||||
} catch (sqlError) {
|
||||
await client.query('ROLLBACK');
|
||||
outputProgress({
|
||||
status: 'error',
|
||||
operation: 'Config SQL Error',
|
||||
error: sqlError.message,
|
||||
statement: stmt,
|
||||
statementNumber: i + 1
|
||||
});
|
||||
throw sqlError;
|
||||
}
|
||||
}
|
||||
// Commit the final transaction
|
||||
await client.query('COMMIT');
|
||||
} catch (error) {
|
||||
await client.query('ROLLBACK');
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Read and execute metrics schema (metrics tables)
|
||||
outputProgress({
|
||||
operation: 'Running metrics setup',
|
||||
message: 'Creating metrics tables...'
|
||||
});
|
||||
const metricsSchemaPath = path.join(__dirname, '../db/metrics-schema-new.sql');
|
||||
|
||||
// Verify file exists
|
||||
if (!fs.existsSync(metricsSchemaPath)) {
|
||||
throw new Error(`Metrics schema file not found at: ${metricsSchemaPath}`);
|
||||
}
|
||||
|
||||
const metricsSchemaSQL = fs.readFileSync(metricsSchemaPath, 'utf8');
|
||||
|
||||
outputProgress({
|
||||
operation: 'Metrics Schema file',
|
||||
message: {
|
||||
path: metricsSchemaPath,
|
||||
exists: fs.existsSync(metricsSchemaPath),
|
||||
size: fs.statSync(metricsSchemaPath).size,
|
||||
firstFewLines: metricsSchemaSQL.split('\n').slice(0, 5).join('\n')
|
||||
}
|
||||
});
|
||||
|
||||
// Execute metrics schema statements one at a time
|
||||
const metricsStatements = splitSQLStatements(metricsSchemaSQL);
|
||||
outputProgress({
|
||||
operation: 'Metrics SQL Execution',
|
||||
message: {
|
||||
totalStatements: metricsStatements.length,
|
||||
statements: metricsStatements.map((stmt, i) => ({
|
||||
number: i + 1,
|
||||
preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : '')
|
||||
}))
|
||||
}
|
||||
});
|
||||
|
||||
// Start a transaction for better error handling
|
||||
await client.query('BEGIN');
|
||||
try {
|
||||
for (let i = 0; i < metricsStatements.length; i++) {
|
||||
const stmt = metricsStatements[i];
|
||||
try {
|
||||
const result = await client.query(stmt);
|
||||
|
||||
outputProgress({
|
||||
operation: 'Metrics SQL Progress',
|
||||
message: {
|
||||
statement: i + 1,
|
||||
total: metricsStatements.length,
|
||||
preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : ''),
|
||||
rowCount: result.rowCount
|
||||
}
|
||||
});
|
||||
|
||||
// Commit in chunks of 10 statements to avoid long-running transactions
|
||||
if (i > 0 && i % 10 === 0) {
|
||||
await client.query('COMMIT');
|
||||
await client.query('BEGIN');
|
||||
}
|
||||
} catch (sqlError) {
|
||||
await client.query('ROLLBACK');
|
||||
outputProgress({
|
||||
status: 'error',
|
||||
operation: 'Metrics SQL Error',
|
||||
error: sqlError.message,
|
||||
statement: stmt,
|
||||
statementNumber: i + 1
|
||||
});
|
||||
throw sqlError;
|
||||
}
|
||||
}
|
||||
// Commit the final transaction
|
||||
await client.query('COMMIT');
|
||||
} catch (error) {
|
||||
await client.query('ROLLBACK');
|
||||
throw error;
|
||||
}
|
||||
|
||||
outputProgress({
|
||||
status: 'complete',
|
||||
operation: 'Database reset complete',
|
||||
message: 'Database has been reset and all tables recreated'
|
||||
});
|
||||
} catch (error) {
|
||||
outputProgress({
|
||||
status: 'error',
|
||||
operation: 'Failed to reset database',
|
||||
error: error.message,
|
||||
stack: error.stack
|
||||
});
|
||||
process.exit(1);
|
||||
} finally {
|
||||
// Make sure to re-enable foreign key checks if they were disabled
|
||||
try {
|
||||
await client.query('SET session_replication_role = \'origin\'');
|
||||
} catch (e) {
|
||||
console.error('Error re-enabling foreign key checks:', e.message);
|
||||
}
|
||||
|
||||
// Close the database connection
|
||||
await client.end();
|
||||
}
|
||||
}
|
||||
|
||||
// Export if required as a module
|
||||
if (typeof module !== 'undefined' && module.exports) {
|
||||
module.exports = resetDatabase;
|
||||
}
|
||||
|
||||
// Run if called directly
|
||||
if (require.main === module) {
|
||||
resetDatabase().catch(error => {
|
||||
console.error('Error:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
}
|
||||
Reference in New Issue
Block a user