Fixes for re-running reset scripts
This commit is contained in:
@@ -175,8 +175,8 @@ ORDER BY
|
||||
c.name,
|
||||
st.vendor;
|
||||
|
||||
-- Types are created by the reset script
|
||||
CREATE TABLE calculate_history (
|
||||
-- History and status tables
|
||||
CREATE TABLE IF NOT EXISTS calculate_history (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
start_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
end_time TIMESTAMP NULL,
|
||||
@@ -193,24 +193,18 @@ CREATE TABLE calculate_history (
|
||||
additional_info JSONB
|
||||
);
|
||||
|
||||
CREATE INDEX idx_status_time ON calculate_history(status, start_time);
|
||||
|
||||
CREATE TABLE calculate_status (
|
||||
CREATE TABLE IF NOT EXISTS calculate_status (
|
||||
module_name module_name PRIMARY KEY,
|
||||
last_calculation_timestamp TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE INDEX idx_last_calc ON calculate_status(last_calculation_timestamp);
|
||||
|
||||
CREATE TABLE sync_status (
|
||||
CREATE TABLE IF NOT EXISTS sync_status (
|
||||
table_name VARCHAR(50) PRIMARY KEY,
|
||||
last_sync_timestamp TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
last_sync_id BIGINT
|
||||
);
|
||||
|
||||
CREATE INDEX idx_last_sync ON sync_status(last_sync_timestamp);
|
||||
|
||||
CREATE TABLE import_history (
|
||||
CREATE TABLE IF NOT EXISTS import_history (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
table_name VARCHAR(50) NOT NULL,
|
||||
start_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
@@ -225,5 +219,7 @@ CREATE TABLE import_history (
|
||||
additional_info JSONB
|
||||
);
|
||||
|
||||
CREATE INDEX idx_table_time ON import_history(table_name, start_time);
|
||||
CREATE INDEX idx_import_history_status ON import_history(status);
|
||||
-- Create all indexes after tables are fully created
|
||||
CREATE INDEX IF NOT EXISTS idx_last_calc ON calculate_status(last_calculation_timestamp);
|
||||
CREATE INDEX IF NOT EXISTS idx_last_sync ON sync_status(last_sync_timestamp);
|
||||
CREATE INDEX IF NOT EXISTS idx_table_time ON import_history(table_name, start_time);
|
||||
File diff suppressed because it is too large
Load Diff
@@ -177,38 +177,62 @@ async function resetDatabase() {
|
||||
}
|
||||
}
|
||||
|
||||
// Drop types if they exist
|
||||
await client.query('DROP TYPE IF EXISTS calculation_status CASCADE;');
|
||||
await client.query('DROP TYPE IF EXISTS module_name CASCADE;');
|
||||
// Only drop types if we're not preserving history tables
|
||||
const historyTablesExist = await client.query(`
|
||||
SELECT EXISTS (
|
||||
SELECT FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename IN ('calculate_history', 'import_history')
|
||||
);
|
||||
`);
|
||||
|
||||
if (!historyTablesExist.rows[0].exists) {
|
||||
await client.query('DROP TYPE IF EXISTS calculation_status CASCADE;');
|
||||
await client.query('DROP TYPE IF EXISTS module_name CASCADE;');
|
||||
}
|
||||
|
||||
// Re-enable triggers/foreign key checks
|
||||
await client.query('SET session_replication_role = \'origin\';');
|
||||
}
|
||||
|
||||
// Create enum types
|
||||
// Create enum types if they don't exist
|
||||
outputProgress({
|
||||
operation: 'Creating enum types',
|
||||
message: 'Setting up required enum types...'
|
||||
});
|
||||
|
||||
await client.query(`
|
||||
CREATE TYPE calculation_status AS ENUM ('running', 'completed', 'failed', 'cancelled')
|
||||
// Check if types exist before creating
|
||||
const typesExist = await client.query(`
|
||||
SELECT EXISTS (
|
||||
SELECT 1 FROM pg_type
|
||||
WHERE typname = 'calculation_status'
|
||||
) as calc_status_exists,
|
||||
EXISTS (
|
||||
SELECT 1 FROM pg_type
|
||||
WHERE typname = 'module_name'
|
||||
) as module_name_exists;
|
||||
`);
|
||||
|
||||
await client.query(`
|
||||
CREATE TYPE module_name AS ENUM (
|
||||
'product_metrics',
|
||||
'time_aggregates',
|
||||
'financial_metrics',
|
||||
'vendor_metrics',
|
||||
'category_metrics',
|
||||
'brand_metrics',
|
||||
'sales_forecasts',
|
||||
'abc_classification'
|
||||
)
|
||||
`);
|
||||
if (!typesExist.rows[0].calc_status_exists) {
|
||||
await client.query(`CREATE TYPE calculation_status AS ENUM ('running', 'completed', 'failed', 'cancelled')`);
|
||||
}
|
||||
|
||||
// Read and execute main schema (core tables)
|
||||
if (!typesExist.rows[0].module_name_exists) {
|
||||
await client.query(`
|
||||
CREATE TYPE module_name AS ENUM (
|
||||
'product_metrics',
|
||||
'time_aggregates',
|
||||
'financial_metrics',
|
||||
'vendor_metrics',
|
||||
'category_metrics',
|
||||
'brand_metrics',
|
||||
'sales_forecasts',
|
||||
'abc_classification'
|
||||
)
|
||||
`);
|
||||
}
|
||||
|
||||
// Read and execute main schema first (core tables)
|
||||
outputProgress({
|
||||
operation: 'Running database setup',
|
||||
message: 'Creating core tables...'
|
||||
@@ -292,39 +316,12 @@ async function resetDatabase() {
|
||||
}
|
||||
}
|
||||
|
||||
// List all tables in the database after schema execution
|
||||
const allTables = await client.query(`
|
||||
SELECT
|
||||
table_schema,
|
||||
table_name,
|
||||
pg_size_pretty(pg_total_relation_size(quote_ident(table_schema) || '.' || quote_ident(table_name))) as size,
|
||||
pg_relation_size(quote_ident(table_schema) || '.' || quote_ident(table_name)) as raw_size
|
||||
// Verify core tables were created
|
||||
const existingTables = (await client.query(`
|
||||
SELECT table_name
|
||||
FROM information_schema.tables
|
||||
WHERE table_schema = 'public'
|
||||
`);
|
||||
|
||||
if (allTables.rows.length === 0) {
|
||||
outputProgress({
|
||||
operation: 'Warning',
|
||||
message: 'No tables found in database after schema execution'
|
||||
});
|
||||
} else {
|
||||
outputProgress({
|
||||
operation: 'Tables after schema execution',
|
||||
message: {
|
||||
count: allTables.rows.length,
|
||||
tables: allTables.rows.map(t => ({
|
||||
schema: t.table_schema,
|
||||
name: t.table_name,
|
||||
size: t.size,
|
||||
rawSize: t.raw_size
|
||||
}))
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Verify core tables were created
|
||||
const existingTables = allTables.rows.map(t => t.table_name);
|
||||
`)).rows.map(t => t.table_name);
|
||||
|
||||
outputProgress({
|
||||
operation: 'Core tables verification',
|
||||
@@ -349,7 +346,7 @@ async function resetDatabase() {
|
||||
message: `Successfully created tables: ${CORE_TABLES.join(', ')}`
|
||||
});
|
||||
|
||||
// Read and execute config schema
|
||||
// Now read and execute config schema (since core tables exist)
|
||||
outputProgress({
|
||||
operation: 'Running config setup',
|
||||
message: 'Creating configuration tables...'
|
||||
@@ -398,37 +395,6 @@ async function resetDatabase() {
|
||||
}
|
||||
}
|
||||
|
||||
// Verify config tables were created
|
||||
const configTablesResult = await client.query(`
|
||||
SELECT table_name
|
||||
FROM information_schema.tables
|
||||
WHERE table_schema = 'public'
|
||||
`);
|
||||
const existingConfigTables = configTablesResult.rows.map(t => t.table_name);
|
||||
|
||||
outputProgress({
|
||||
operation: 'Config tables verification',
|
||||
message: {
|
||||
found: existingConfigTables,
|
||||
expected: CONFIG_TABLES
|
||||
}
|
||||
});
|
||||
|
||||
const missingConfigTables = CONFIG_TABLES.filter(
|
||||
t => !existingConfigTables.includes(t)
|
||||
);
|
||||
|
||||
if (missingConfigTables.length > 0) {
|
||||
throw new Error(
|
||||
`Failed to create config tables: ${missingConfigTables.join(', ')}`
|
||||
);
|
||||
}
|
||||
|
||||
outputProgress({
|
||||
operation: 'Config tables created',
|
||||
message: `Successfully created tables: ${CONFIG_TABLES.join(', ')}`
|
||||
});
|
||||
|
||||
// Read and execute metrics schema (metrics tables)
|
||||
outputProgress({
|
||||
operation: 'Running metrics setup',
|
||||
|
||||
Reference in New Issue
Block a user