117 Commits

Author SHA1 Message Date
a161f4533d Regroup sidebar, discount sim layout updates and fixes 2025-09-25 11:44:15 -04:00
6e30ba60ff Add discount simulator 2025-09-24 21:53:46 -04:00
138251cf86 Make tax cat single select, plus revert previous commit "Attempted improvements to validation to make the validation step table more responsive" 2025-09-24 09:14:58 -04:00
24aee1db90 Update product import output json 2025-09-23 11:46:46 -04:00
2fe7fd5b2f Layout tweaks for financial overview, add cogs % line 2025-09-23 11:45:01 -04:00
d8b39979cd Pull out period selection popover into its own component 2025-09-22 12:49:13 -04:00
4776a112b6 Add in natural language time period input and tweak layout for financial overview 2025-09-22 12:28:59 -04:00
2ff325a132 Fix time periods on financial overview, remove some logging 2025-09-21 23:47:05 -04:00
5d46a2a7e5 Tweak financial calculations 2025-09-20 17:40:34 -04:00
512b351429 Remove duplicate/old UI library 2025-09-18 13:13:15 -04:00
3991341376 Deal with incomplete periods in financial overview 2025-09-18 13:00:23 -04:00
5833779c10 Add ability to group by different periods to financial overview 2025-09-18 12:29:10 -04:00
c61115f665 Add in financial overview component with related routes 2025-09-18 12:04:20 -04:00
7da2b304b4 Attempted improvements to validation to make the validation step table more responsive 2025-09-17 21:43:51 -04:00
4ccda8ad49 Update vite config for new dashboard server location 2025-09-17 21:40:43 -04:00
88f703ec70 Move dashboard server into project 2025-09-17 21:09:22 -04:00
ab998fb7c4 Add mount script 2025-09-08 21:58:48 -04:00
faaa8cc47a Fix vendors page query issue 2025-09-06 23:49:59 -04:00
459c5092d2 Clean up build errors 2025-09-06 23:40:45 -04:00
6c9fd062e9 Add line breaks to ai validation prompt dialog 2025-09-06 17:00:41 -04:00
5d7d7a8671 Random import fixes/enhancements 2025-09-06 16:55:35 -04:00
54f55b06a1 More validation fixes and enhancements 2025-09-06 16:15:00 -04:00
4935cfe3bb More validation fixes, validate only cells that have changed instead of everything every time 2025-09-06 15:33:48 -04:00
5e2ee73e2d Product import speed/responsiveness fixes, particularly around validation 2025-09-06 15:08:53 -04:00
4dfe85231a Fixes and improvements for product import module 2025-09-06 14:38:47 -04:00
9e7aac836e Add in initial PO creation feature 2025-09-03 12:15:20 -04:00
d35c7dd6cf Fix/enhance forecasting page 2025-09-02 16:46:05 -04:00
ad1ebeefe1 More user form tweaks 2025-09-02 12:15:35 -04:00
a0c442d1af Tweak user management form 2025-09-01 18:55:16 -04:00
7938c50762 Add rocket chat user id field and show messages from linked user id on non-admin accounts 2025-09-01 18:46:59 -04:00
5dcd19e7f3 Clean up some permissions 2025-08-30 17:28:43 -04:00
075e7253a0 Fix some cors issues 2025-06-23 10:22:36 -04:00
763aa4f74b Tweak sidebar and header 2025-06-22 21:21:14 -04:00
520ff5bd74 Lazy loading for smaller build chunks/faster initial load 2025-06-22 21:07:17 -04:00
8496bbc4ee Merge dashboard app in 2025-06-22 19:13:35 -04:00
38f6688f10 Misc product fixes 2025-06-22 15:52:16 -04:00
fcfe7e2fab Add groups to sidebar 2025-06-20 14:55:45 -04:00
2e3e81a02b Opus corrections/fixes/additions 2025-06-19 15:49:31 -04:00
8606a90e34 Optimize imports, fix up tracking records and time overall 2025-06-19 11:15:04 -04:00
a97819f4a6 Clean up old historical data calcs/scripts, optimize calculations to not update every row every time 2025-06-18 15:13:31 -04:00
dd82c624d8 Fix issues with data management settings page 2025-06-18 11:20:34 -04:00
7999e1e64a FIx time zone calcs 2025-06-15 09:24:40 -04:00
12a0f540b3 Chat fixes and layout tweaks 2025-06-15 00:55:49 -04:00
e793cb0cc5 Build out chat more 2025-06-14 14:27:50 -04:00
b2330dee22 Add chat page and chat server 2025-06-14 13:36:31 -04:00
00501704df Switch dev port 2025-06-14 10:43:48 -04:00
4cb41a7e4c Fix PO import not removing products from edited POs 2025-04-14 14:31:03 -04:00
d05d27494d Adjust PO accordion styles, add in product and PO/receiving links 2025-04-14 14:20:30 -04:00
4ed734e5c0 Add PO details accordion to purchase orders page 2025-04-14 00:58:55 -04:00
1e3be5d4cb Refactor purchase orders page into individual components 2025-04-14 00:29:37 -04:00
8dd852dd6a Fix filtering/sorting/pagination for purchase orders 2025-04-13 23:51:09 -04:00
eeff5817ea More layout/header tweaks for purchase orders 2025-04-13 22:19:14 -04:00
1b19feb172 Tweak layout of purchase orders page and redo header cards 2025-04-13 17:16:08 -04:00
80ff8124ec Update calculate scripts and routes for PO table split 2025-04-12 17:07:43 -04:00
8508bfac93 Add receivings table, split PO import 2025-04-12 14:20:59 -04:00
ac14179bd2 PO-related fixes 2025-04-12 10:54:42 -04:00
00249f7c33 Clean up routes 2025-04-08 21:26:00 -04:00
f271f3aae4 Get frontend dashboard/analytics mostly loading data again 2025-04-08 00:02:43 -04:00
43f76e4ac0 Fix specific import calculations 2025-04-07 22:07:21 -04:00
92ff80fba2 Import and calculate tweaks and fixes 2025-04-06 17:12:36 -04:00
a4c1a19d2e Try to synchronize time zones across import 2025-04-05 16:20:43 -04:00
c9b656d34b Tweaks and fixes for products table 2025-04-05 09:52:36 -04:00
d081a60662 Change calculate metrics script to only record one entry in database per run 2025-04-04 11:33:50 -04:00
4021fe487d Create pages and routes for new settings tables, start improving product details 2025-04-03 22:12:53 -04:00
4552fa4862 Move product status calculation to database, fix up products table, more categories tweaks 2025-04-03 17:12:10 -04:00
2601a04211 Category calculation fixes 2025-04-02 15:42:20 -04:00
6051b849d6 Consolidate old/new vendor and category routes, enhance new brands route, update frontend accordingly for all three pages, improve hierarchy on categories page, fix some calculations 2025-04-02 14:28:18 -04:00
dbd0232285 Update/add frontend pages for categories, brands, vendors new routes, update products page to use new route 2025-04-01 14:34:57 -04:00
1b9f01d101 Add routes for brands, categories, vendors new implementation 2025-04-01 12:03:12 -04:00
a9dbbbf824 Add new vendors, brands, categories tables and calculate scripts 2025-04-01 01:12:03 -04:00
97296946f1 Clean up, fix file path issues with import scripts, adjust data management page for new metrics calcs 2025-04-01 00:15:06 -04:00
5035dda733 Add metrics historical backfill scripts, fix up all new metrics calc queries and add combined script to run 2025-03-31 22:15:41 -04:00
796a2e5d1f Add new metrics route 2025-03-30 11:43:29 -04:00
047122a620 Add new calculate scripts, add in historical data import 2025-03-30 10:30:13 -04:00
4c4359908c Create new metrics reset script 2025-03-29 17:17:02 -04:00
54cc4be1e3 Add new schemas and scripts for calculate 2025-03-29 17:08:30 -04:00
f4854423ab Update import tables schema with minor changes, add new metrics schema 2025-03-29 16:46:31 -04:00
0796518e26 Add some additional existing data points to products table (partly broken) 2025-03-29 10:44:13 -04:00
7aa494aaad Clean up 2025-03-28 19:35:23 -04:00
1e0be3f86e Ensure data management page grabs progress of any running scripts on load, clean up unneeded console logs, restyle login page 2025-03-28 19:26:52 -04:00
a068a253cd More data management page tweaks, ensure reusable images don't get deleted automatically 2025-03-27 19:31:11 -04:00
087ec710f6 Fix/enhance data management page 2025-03-27 17:09:06 -04:00
957c7b5eb1 Add new filter options and metrics to product filters and pages; enhance SQL schema for financial calculations 2025-03-27 16:27:13 -04:00
8b8845b423 Clean up build errors 2025-03-26 21:53:33 -04:00
e5c4f617c5 Get frontend pages loading data again, remove unused components 2025-03-26 21:47:24 -04:00
8e19e6cd74 Finish fixing calculate scripts 2025-03-26 14:22:08 -04:00
749907bd30 Start migrating and fixing calculate scripts 2025-03-26 01:19:44 -04:00
108181c63d Fix more import script bugs/missing data 2025-03-25 22:23:06 -04:00
5dd779cb4a Fix purchase orders import 2025-03-25 19:12:41 -04:00
7b0e792d03 Merge branch 'master' into move-to-postgresql 2025-03-25 12:15:07 -04:00
517bbe72f4 Add in image library feature 2025-03-25 12:14:36 -04:00
87d4b9e804 Fixes/improvements for import scripts 2025-03-24 22:27:44 -04:00
75da2c6772 Get all import scripts running again 2025-03-24 21:58:00 -04:00
00a02aa788 Enhance ai validation changes dialog 2025-03-24 14:17:02 -04:00
114018080a Enhance ai debug dialog 2025-03-24 13:25:18 -04:00
228ae8b2a9 Layout/style tweaks, remove text file prompts, integrate system prompt into database/settings 2025-03-24 12:26:21 -04:00
dd4b3f7145 Add prompts table and settings page to create/read/update/delete from it, incorporate company specific prompts into ai validation 2025-03-24 11:30:15 -04:00
7eb4077224 Clean up build errors 2025-03-23 22:15:11 -04:00
d60a8cbc6e Hide debug components without permission 2025-03-23 22:06:51 -04:00
1fcbf54989 Layout/style tweaks, fix performance metrics settings page 2025-03-23 22:01:41 -04:00
ce75496770 Clean up unused permissions, take user to first page/component they can access 2025-03-23 17:18:31 -04:00
7eae4a0b29 More permissions setup, simplify to one component 2025-03-23 16:04:32 -04:00
f421154c1d Get user management page working, add permission checking in more places 2025-03-22 22:27:50 -04:00
03dc119a15 Initial permissions framework and setup 2025-03-22 22:11:03 -04:00
1963bee00c Merge branch 'add-product-upload-page' 2025-03-22 21:11:10 -04:00
675a0fc374 Fix incorrect columns in import scripts 2025-02-18 10:46:16 -05:00
ca2653ea1a Update import scripts through POs 2025-02-17 22:17:01 -05:00
a8d3fd8033 Update import scripts through orders 2025-02-17 00:53:07 -05:00
702b956ff1 Fix main import script issue 2025-02-16 11:54:28 -05:00
9b8577f258 Update import scripts through products 2025-02-14 21:46:50 -05:00
9623681a15 Update import scripts, working through categories 2025-02-14 13:30:14 -05:00
cc22fd8c35 Update backend/frontend 2025-02-14 11:26:02 -05:00
0ef1b6100e Clean up old files 2025-02-14 09:37:05 -05:00
a519746ccb Move authentication to postgres 2025-02-14 09:10:15 -05:00
f29dd8ef8b Clean up build errors 2025-02-13 20:02:11 -05:00
f2a5c06005 Fixes for re-running reset scripts 2025-02-13 10:25:04 -05:00
fb9f959fe5 Update schemas and reset scripts 2025-02-12 16:14:25 -05:00
329 changed files with 83413 additions and 13072 deletions

8
.gitignore vendored
View File

@@ -67,4 +67,10 @@ inventory-server/scripts/.fuse_hidden00000fa20000000a
.VSCodeCounter/
.VSCodeCounter/*
.VSCodeCounter/**/*
.VSCodeCounter/**/*
*/chat/db-convert/db/*
*/chat/db-convert/mongo_converter_env/*
# Ignore compiled Vite config to avoid duplication
vite.config.js

204
docs/PERMISSIONS.md Normal file
View File

@@ -0,0 +1,204 @@
# Permission System Documentation
This document outlines the permission system implemented in the Inventory Manager application.
## Permission Structure
Permissions follow this naming convention:
- Page access: `access:{page_name}`
- Settings sections: `settings:{section_name}`
- Admin features: `admin:{feature}`
Examples:
- `access:products` - Can access the Products page
- `settings:user_management` - Can access User Management settings
- `admin:debug` - Can see debug information
## Permission Components
### PermissionGuard
The core component that conditionally renders content based on permissions.
```tsx
<PermissionGuard
permission="settings:user_management"
fallback={<p>No permission</p>}
>
<button>Manage Users</button>
</PermissionGuard>
```
Options:
- `permission`: Single permission code
- `anyPermissions`: Array of permissions (ANY match grants access)
- `allPermissions`: Array of permissions (ALL required)
- `adminOnly`: For admin-only sections
- `page`: Page name (checks `access:{page}` permission)
- `fallback`: Content to show if permission check fails
### PermissionProtectedRoute
Protects entire pages based on page access permissions.
```tsx
<Route path="/products" element={
<PermissionProtectedRoute page="products">
<Products />
</PermissionProtectedRoute>
} />
```
### ProtectedSection
Protects sections within a page based on action permissions.
```tsx
<ProtectedSection page="products" action="create">
<button>Add Product</button>
</ProtectedSection>
```
### PermissionButton
Button that automatically handles permissions.
```tsx
<PermissionButton
page="products"
action="create"
onClick={handleCreateProduct}
>
Add Product
</PermissionButton>
```
### SettingsSection
Specific component for settings with built-in permission checks.
```tsx
<SettingsSection
title="System Settings"
description="Configure global settings"
permission="settings:global"
>
{/* Settings content */}
</SettingsSection>
```
## Permission Hooks
### usePermissions
Core hook for checking any permission.
```tsx
const { hasPermission, hasPageAccess, isAdmin } = usePermissions();
if (hasPermission('settings:user_management')) {
// Can access user management
}
```
### usePagePermission
Specialized hook for page-level permissions.
```tsx
const { canView, canCreate, canEdit, canDelete } = usePagePermission('products');
if (canView()) {
// Can view products
}
```
## Database Schema
Permissions are stored in the database:
- `permissions` table: Stores all available permissions
- `user_permissions` junction table: Maps permissions to users
Admin users automatically have all permissions.
## Implemented Permission Codes
### Page Access Permissions
| Code | Description |
|------|-------------|
| `access:dashboard` | Access to Dashboard page |
| `access:overview` | Access to Overview page |
| `access:products` | Access to Products page |
| `access:categories` | Access to Categories page |
| `access:brands` | Access to Brands page |
| `access:vendors` | Access to Vendors page |
| `access:purchase_orders` | Access to Purchase Orders page |
| `access:analytics` | Access to Analytics page |
| `access:forecasting` | Access to Forecasting page |
| `access:import` | Access to Import page |
| `access:settings` | Access to Settings page |
| `access:chat` | Access to Chat Archive page |
### Settings Permissions
| Code | Description |
|------|-------------|
| `settings:global` | Access to Global Settings section |
| `settings:products` | Access to Product Settings section |
| `settings:vendors` | Access to Vendor Settings section |
| `settings:data_management` | Access to Data Management settings |
| `settings:calculation_settings` | Access to Calculation Settings |
| `settings:library_management` | Access to Image Library Management |
| `settings:performance_metrics` | Access to Performance Metrics |
| `settings:prompt_management` | Access to AI Prompt Management |
| `settings:stock_management` | Access to Stock Management |
| `settings:templates` | Access to Template Management |
| `settings:user_management` | Access to User Management |
### Admin Permissions
| Code | Description |
|------|-------------|
| `admin:debug` | Can see debug information and features |
## Implementation Examples
### Page Protection
In `App.tsx`:
```tsx
<Route path="/products" element={
<PermissionProtectedRoute page="products">
<Products />
</PermissionProtectedRoute>
} />
```
### Component Level Protection
```tsx
const { hasPermission } = usePermissions();
function handleAction() {
if (!hasPermission('settings:user_management')) {
toast.error("You don't have permission");
return;
}
// Action logic
}
```
### UI Element Protection
```tsx
<PermissionGuard permission="settings:user_management">
<button onClick={handleManageUsers}>
Manage Users
</button>
</PermissionGuard>
```
## Notes
- **Page Access**: These permissions control which pages a user can navigate to
- **Settings Access**: These permissions control access to different sections within the Settings page
- **Admin Features**: Special permissions for administrative functions
- **CRUD Operations**: The application currently focuses on viewing and managing data rather than creating/editing/deleting individual records
- **User Management**: User CRUD operations are handled through the settings interface rather than dedicated user management pages

View File

@@ -0,0 +1,342 @@
# MySQL to PostgreSQL Import Process Documentation
This document outlines the data import process from the production MySQL database to the local PostgreSQL database, focusing on column mappings, data transformations, and the overall import architecture.
## Table of Contents
1. [Overview](#overview)
2. [Import Architecture](#import-architecture)
3. [Column Mappings](#column-mappings)
- [Categories](#categories)
- [Products](#products)
- [Product Categories (Relationship)](#product-categories-relationship)
- [Orders](#orders)
- [Purchase Orders](#purchase-orders)
- [Metadata Tables](#metadata-tables)
4. [Special Calculations](#special-calculations)
5. [Implementation Notes](#implementation-notes)
## Overview
The import process extracts data from a MySQL 5.7 production database and imports it into a PostgreSQL database. It can operate in two modes:
- **Full Import**: Imports all data regardless of last sync time
- **Incremental Import**: Only imports data that has changed since the last import
The process handles four main data types:
- Categories (product categorization hierarchy)
- Products (inventory items)
- Orders (sales records)
- Purchase Orders (vendor orders)
## Import Architecture
The import process follows these steps:
1. **Establish Connection**: Creates a SSH tunnel to the production server and establishes database connections
2. **Setup Import History**: Creates a record of the current import operation
3. **Import Categories**: Processes product categories in hierarchical order
4. **Import Products**: Processes products with their attributes and category relationships
5. **Import Orders**: Processes customer orders with line items, taxes, and discounts
6. **Import Purchase Orders**: Processes vendor purchase orders with line items
7. **Record Results**: Updates the import history with results
8. **Close Connections**: Cleans up connections and resources
Each import step uses temporary tables for processing and wraps operations in transactions to ensure data consistency.
## Column Mappings
### Categories
| PostgreSQL Column | MySQL Source | Transformation |
|-------------------|---------------------------------|----------------------------------------------|
| cat_id | product_categories.cat_id | Direct mapping |
| name | product_categories.name | Direct mapping |
| type | product_categories.type | Direct mapping |
| parent_id | product_categories.master_cat_id| NULL for top-level categories (types 10, 20) |
| description | product_categories.combined_name| Direct mapping |
| status | N/A | Hard-coded 'active' |
| created_at | N/A | Current timestamp |
| updated_at | N/A | Current timestamp |
**Notes:**
- Categories are processed in hierarchical order by type: [10, 20, 11, 21, 12, 13]
- Type 10/20 are top-level categories with no parent
- Types 11/21/12/13 are child categories that reference parent categories
### Products
| PostgreSQL Column | MySQL Source | Transformation |
|----------------------|----------------------------------|---------------------------------------------------------------|
| pid | products.pid | Direct mapping |
| title | products.description | Direct mapping |
| description | products.notes | Direct mapping |
| sku | products.itemnumber | Fallback to 'NO-SKU' if empty |
| stock_quantity | shop_inventory.available_local | Capped at 5000, minimum 0 |
| preorder_count | current_inventory.onpreorder | Default 0 |
| notions_inv_count | product_notions_b2b.inventory | Default 0 |
| price | product_current_prices.price_each| Default 0, filtered on active=1 |
| regular_price | products.sellingprice | Default 0 |
| cost_price | product_inventory | Weighted average: SUM(costeach * count) / SUM(count) when count > 0, or latest costeach |
| vendor | suppliers.companyname | Via supplier_item_data.supplier_id |
| vendor_reference | supplier_item_data | supplier_itemnumber or notions_itemnumber based on vendor |
| notions_reference | supplier_item_data.notions_itemnumber | Direct mapping |
| brand | product_categories.name | Linked via products.company |
| line | product_categories.name | Linked via products.line |
| subline | product_categories.name | Linked via products.subline |
| artist | product_categories.name | Linked via products.artist |
| categories | product_category_index | Comma-separated list of category IDs |
| created_at | products.date_created | Validated date, NULL if invalid |
| first_received | products.datein | Validated date, NULL if invalid |
| landing_cost_price | NULL | Not set |
| barcode | products.upc | Direct mapping |
| harmonized_tariff_code| products.harmonized_tariff_code | Direct mapping |
| updated_at | products.stamp | Validated date, NULL if invalid |
| visible | shop_inventory | Calculated from show + buyable > 0 |
| managing_stock | N/A | Hard-coded true |
| replenishable | Multiple fields | Complex calculation based on reorder, dates, etc. |
| permalink | N/A | Constructed URL with product ID |
| moq | supplier_item_data | notions_qty_per_unit or supplier_qty_per_unit, minimum 1 |
| uom | N/A | Hard-coded 1 |
| rating | products.rating | Direct mapping |
| reviews | products.rating_votes | Direct mapping |
| weight | products.weight | Direct mapping |
| length | products.length | Direct mapping |
| width | products.width | Direct mapping |
| height | products.height | Direct mapping |
| country_of_origin | products.country_of_origin | Direct mapping |
| location | products.location | Direct mapping |
| total_sold | order_items | SUM(qty_ordered) for all order_items where prod_pid = pid |
| baskets | mybasket | COUNT of records where mb.item = pid and qty > 0 |
| notifies | product_notify | COUNT of records where pn.pid = pid |
| date_last_sold | product_last_sold.date_sold | Validated date, NULL if invalid |
| image | N/A | Constructed from pid and image URL pattern |
| image_175 | N/A | Constructed from pid and image URL pattern |
| image_full | N/A | Constructed from pid and image URL pattern |
| options | NULL | Not set |
| tags | NULL | Not set |
**Notes:**
- Replenishable calculation:
```javascript
CASE
WHEN p.reorder < 0 THEN 0
WHEN (
(COALESCE(pls.date_sold, '0000-00-00') = '0000-00-00' OR pls.date_sold <= DATE_SUB(CURRENT_DATE, INTERVAL 5 YEAR))
AND (p.datein = '0000-00-00 00:00:00' OR p.datein <= DATE_SUB(CURRENT_TIMESTAMP, INTERVAL 5 YEAR))
AND (p.date_refill = '0000-00-00 00:00:00' OR p.date_refill <= DATE_SUB(CURRENT_TIMESTAMP, INTERVAL 5 YEAR))
) THEN 0
ELSE 1
END
```
In business terms, a product is considered NOT replenishable only if:
- It was manually flagged as not replenishable (negative reorder value)
- OR it shows no activity across ALL metrics (no sales AND no receipts AND no refills in the past 5 years)
- Image URLs are constructed using this pattern:
```javascript
const paddedPid = pid.toString().padStart(6, '0');
const prefix = paddedPid.slice(0, 3);
const basePath = `${imageUrlBase}${prefix}/${pid}`;
return {
image: `${basePath}-t-${iid}.jpg`,
image_175: `${basePath}-175x175-${iid}.jpg`,
image_full: `${basePath}-o-${iid}.jpg`
};
```
### Product Categories (Relationship)
| PostgreSQL Column | MySQL Source | Transformation |
|-------------------|-----------------------------------|---------------------------------------------------------------|
| pid | products.pid | Direct mapping |
| cat_id | product_category_index.cat_id | Direct mapping, filtered by category types |
**Notes:**
- Only categories of types 10, 20, 11, 21, 12, 13 are imported
- Categories 16 and 17 are explicitly excluded
### Orders
| PostgreSQL Column | MySQL Source | Transformation |
|-------------------|-----------------------------------|---------------------------------------------------------------|
| order_number | order_items.order_id | Direct mapping |
| pid | order_items.prod_pid | Direct mapping |
| sku | order_items.prod_itemnumber | Fallback to 'NO-SKU' if empty |
| date | _order.date_placed_onlydate | Via join to _order table |
| price | order_items.prod_price | Direct mapping |
| quantity | order_items.qty_ordered | Direct mapping |
| discount | Multiple sources | Complex calculation (see notes) |
| tax | order_tax_info_products.item_taxes_to_collect | Via latest order_tax_info record |
| tax_included | N/A | Hard-coded false |
| shipping | N/A | Hard-coded 0 |
| customer | _order.order_cid | Direct mapping |
| customer_name | users | CONCAT(users.firstname, ' ', users.lastname) |
| status | _order.order_status | Direct mapping |
| canceled | _order.date_cancelled | Boolean: true if date_cancelled is not '0000-00-00 00:00:00' |
| costeach | order_costs | From latest record or fallback to price * 0.5 |
**Notes:**
- Only orders with order_status >= 15 and with a valid date_placed are processed
- For incremental imports, only orders modified since last sync are processed
- Discount calculation combines three sources:
1. Base discount: order_items.prod_price_reg - order_items.prod_price
2. Promo discount: SUM of order_discount_items.amount
3. Proportional order discount: Calculation based on order subtotal proportion
```javascript
(oi.base_discount +
COALESCE(ot.promo_discount, 0) +
CASE
WHEN om.summary_discount > 0 AND om.summary_subtotal > 0 THEN
ROUND((om.summary_discount * (oi.price * oi.quantity)) / NULLIF(om.summary_subtotal, 0), 2)
ELSE 0
END)::DECIMAL(10,2)
```
- Taxes are taken from the latest tax record for an order
- Cost data is taken from the latest non-pending cost record
### Purchase Orders
| PostgreSQL Column | MySQL Source | Transformation |
|-------------------|-----------------------------------|---------------------------------------------------------------|
| po_id | po.po_id | Default 0 if NULL |
| pid | po_products.pid | Direct mapping |
| sku | products.itemnumber | Fallback to 'NO-SKU' if empty |
| name | products.description | Fallback to 'Unknown Product' |
| cost_price | po_products.cost_each | Direct mapping |
| po_cost_price | po_products.cost_each | Duplicate of cost_price |
| vendor | suppliers.companyname | Fallback to 'Unknown Vendor' if empty |
| date | po.date_ordered | Fallback to po.date_created if NULL |
| expected_date | po.date_estin | Direct mapping |
| status | po.status | Default 1 if NULL |
| notes | po.short_note | Fallback to po.notes if NULL |
| ordered | po_products.qty_each | Direct mapping |
| received | N/A | Hard-coded 0 |
| receiving_status | N/A | Hard-coded 1 |
**Notes:**
- Only POs created within last 1 year (incremental) or 5 years (full) are processed
- For incremental imports, only POs modified since last sync are processed
### Metadata Tables
#### import_history
| PostgreSQL Column | Source | Notes |
|-------------------|-----------------------------------|---------------------------------------------------------------|
| id | Auto-increment | Primary key |
| table_name | Code | 'all_tables' for overall import |
| start_time | NOW() | Import start time |
| end_time | NOW() | Import completion time |
| duration_seconds | Calculation | Elapsed seconds |
| is_incremental | INCREMENTAL_UPDATE | Flag from config |
| records_added | Calculation | Sum from all imports |
| records_updated | Calculation | Sum from all imports |
| status | Code | 'running', 'completed', 'failed', or 'cancelled' |
| error_message | Exception | Error message if failed |
| additional_info | JSON | Configuration and results |
#### sync_status
| PostgreSQL Column | Source | Notes |
|----------------------|--------------------------------|---------------------------------------------------------------|
| table_name | Code | Name of imported table |
| last_sync_timestamp | NOW() | Timestamp of successful sync |
| last_sync_id | NULL | Not used currently |
## Special Calculations
### Date Validation
MySQL dates are validated before insertion into PostgreSQL:
```javascript
function validateDate(mysqlDate) {
if (!mysqlDate || mysqlDate === '0000-00-00' || mysqlDate === '0000-00-00 00:00:00') {
return null;
}
// Check if the date is valid
const date = new Date(mysqlDate);
return isNaN(date.getTime()) ? null : mysqlDate;
}
```
### Retry Mechanism
Operations that might fail temporarily are retried with exponential backoff:
```javascript
async function withRetry(operation, errorMessage) {
let lastError;
for (let attempt = 1; attempt <= MAX_RETRIES; attempt++) {
try {
return await operation();
} catch (error) {
lastError = error;
console.error(`${errorMessage} (Attempt ${attempt}/${MAX_RETRIES}):`, error);
if (attempt < MAX_RETRIES) {
const backoffTime = RETRY_DELAY * Math.pow(2, attempt - 1);
await new Promise(resolve => setTimeout(resolve, backoffTime));
}
}
}
throw lastError;
}
```
### Progress Tracking
Progress is tracked with estimated time remaining:
```javascript
function estimateRemaining(startTime, current, total) {
if (current === 0) return "Calculating...";
const elapsedSeconds = (Date.now() - startTime) / 1000;
const itemsPerSecond = current / elapsedSeconds;
const remainingItems = total - current;
const remainingSeconds = remainingItems / itemsPerSecond;
return formatElapsedTime(remainingSeconds);
}
```
## Implementation Notes
### Transaction Management
All imports use transactions to ensure data consistency:
- **Categories**: Uses savepoints for each category type
- **Products**: Uses a single transaction for the entire import
- **Orders**: Uses a single transaction with temporary tables
- **Purchase Orders**: Uses a single transaction with temporary tables
### Memory Usage Optimization
To minimize memory usage when processing large datasets:
1. Data is processed in batches (100-5000 records per batch)
2. Temporary tables are used for intermediate data
3. Some queries use cursors to avoid loading all results at once
### MySQL vs PostgreSQL Compatibility
The scripts handle differences between MySQL and PostgreSQL:
1. MySQL-specific syntax like `USE INDEX` is removed for PostgreSQL
2. `GROUP_CONCAT` in MySQL becomes string operations in PostgreSQL
3. Transaction syntax differences are abstracted in the connection wrapper
4. PostgreSQL's `ON CONFLICT` replaces MySQL's `ON DUPLICATE KEY UPDATE`
### SSH Tunnel
Database connections go through an SSH tunnel for security:
```javascript
ssh.forwardOut(
"127.0.0.1",
0,
sshConfig.prodDbConfig.host,
sshConfig.prodDbConfig.port,
async (err, stream) => {
if (err) reject(err);
resolve({ ssh, stream });
}
);
```

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

271
docs/routes-cleanup.md Normal file
View File

@@ -0,0 +1,271 @@
**Analysis of Potential Issues**
1. **Obsolete Functionality:**
* **`config.js` Legacy Endpoints:** The endpoints `GET /config/`, `PUT /config/stock-thresholds/:id`, `PUT /config/lead-time-thresholds/:id`, `PUT /config/sales-velocity/:id`, `PUT /config/abc-classification/:id`, `PUT /config/safety-stock/:id`, and `PUT /config/turnover/:id` appear **highly likely to be obsolete**. They reference older, single-row config tables (`stock_thresholds`, etc.) while newer endpoints (`/config/global`, `/config/products`, `/config/vendors`) manage settings in more structured tables (`settings_global`, `settings_product`, `settings_vendor`). Unless specifically required for backward compatibility, these legacy endpoints should be removed to avoid confusion and potential data conflicts.
* **`analytics.js` Forecast Endpoint (`GET /analytics/forecast`):** This endpoint uses **MySQL syntax** (`DATEDIFF`, `DATE_FORMAT`, `JSON_OBJECT`, `?` placeholders) but seems intended to run within the analytics module which otherwise uses PostgreSQL (`req.app.locals.pool`, `date_trunc`, `::text`, `$1` placeholders). This endpoint is likely **obsolete or misplaced** and will not function correctly against the PostgreSQL database.
* **`csv.js` Redundant Actions:**
* `POST /csv/update` seems redundant with `POST /csv/full-update`. The latter uses the `runScript` helper and dedicated state (`activeFullUpdate`), appearing more robust. `/csv/update` might be older or incomplete.
* `POST /csv/reset` seems redundant with `POST /csv/full-reset`. Similar reasoning applies; `/csv/full-reset` appears preferred.
* **`products.js` Import Endpoint (`POST /products/import`):** This is **dangerous duplication**. The `/csv` module handles imports (`/csv/import`, `/csv/import-from-prod`) with locking (`activeImport`) to prevent concurrent operations. This endpoint lacks such locking and could corrupt data if run simultaneously with other CSV/reset operations. It should likely be removed.
* **`products.js` Metrics Endpoint (`GET /products/:id/metrics`):** This is redundant. The `/metrics/:pid` endpoint provides the same, possibly more comprehensive, data directly from the `product_metrics` table. Clients should use `/metrics/:pid` instead.
2. **Overlap or Inappropriate Duplication of Effort:**
* **AI Prompt Getters:** `GET /ai-prompts/type/general` and `GET /ai-prompts/type/system` could potentially be handled by adding a query parameter filter to `GET /ai-prompts/` (e.g., `GET /ai-prompts?prompt_type=general`). However, dedicated endpoints for single, specific items can sometimes be simpler. This is more of a design choice than a major issue.
* **Vendor Performance/Metrics:** There are multiple ways to get vendor performance data:
* `GET /analytics/vendors` (uses `vendor_metrics`)
* `GET /dashboard/vendor/performance` (uses `purchase_orders`)
* `GET /purchase-orders/vendor-metrics` (uses `purchase_orders`)
* `GET /vendors-aggregate/` (uses `vendor_metrics`, augmented with `purchase_orders`)
This suggests significant overlap. The `/vendors-aggregate` endpoint seems the most comprehensive, combining pre-aggregated data with some real-time info. The others, especially `/dashboard/vendor/performance` and `/purchase-orders/vendor-metrics` which calculate directly from `purchase_orders`, might be redundant or less performant.
* **Product Listing:**
* `GET /products/` lists products joining `products`, `product_metrics`, and `categories`.
* `GET /metrics/` lists products primarily from `product_metrics`.
They offer similar filtering/sorting. If `product_metrics` contains all necessary display fields, `GET /products/` might be partly redundant for simple listing views, although it does provide aggregated category names. Evaluate if both full list endpoints are necessary.
* **Image Uploads/Management:** Image handling is split:
* `products-import.js`: Uploads temporary images for product import to `/uploads/products/`, schedules deletion.
* `reusable-images.js`: Uploads persistent images to `/uploads/reusable/`, stores metadata in DB.
* `products-import.js` has `/check-file` and `/list-uploads` that can see *both* directories, while `reusable-images.js` has a `/check-file` that only sees its own. This separation could be confusing. Clarify the purpose and lifecycle of images in each directory.
* **Background Task Management (`csv.js`):** The use of `activeImport` for multiple unrelated tasks (import, reset, metrics calc) prevents concurrency, which might be too restrictive. The cancellation logic (`/cancel`) only targets `full-update`/`full-reset`, not tasks locked by `activeImport`. This needs unification.
* **Analytics/Dashboard Base Table Queries:** Several endpoints in `analytics.js` (`/pricing`, `/categories`) and `dashboard.js` (`/best-sellers`, `/sales/metrics`, `/trending/products`, `/key-metrics`, `/inventory-health`, `/sales-overview`) query base tables (`orders`, `products`, `purchase_orders`) directly, while many others leverage pre-aggregated `_metrics` tables. This inconsistency can lead to performance differences and suggests potential for optimization by using aggregates where possible.
3. **Obvious Mistakes / Data Issues:**
* **AI Prompt Fetching:** `GET /ai-prompts/company/:companyId`, `/type/general`, `/type/system` return `result.rows[0]`. This assumes uniqueness. If the underlying DB constraints (`unique_company_prompt`, etc.) fail or aren't present, this could silently hide data if multiple rows match. The use of unique constraint handling in POST/PUT suggests this is likely intended and safe *if* DB constraints are solid.
* **Mixed Databases & SSH Tunnels:** The heavy reliance in `ai_validation.js` and `products-import.js` on connecting to a production MySQL DB via SSH tunnel while also using a local PostgreSQL DB adds significant architectural complexity.
* **Inefficiency:** In `ai_validation.js` (`generateDebugResponse`), an SSH tunnel and MySQL connection (`promptTunnel`, `promptConnection`) are established but seem unused when fetching prompts (which correctly come from the PG pool `res.app.locals.pool`). This is wasted effort.
* **Improvement:** The `getDbConnection` function in `products-import.js` implements caching/pooling for the SSH/MySQL connection this is much better and should ideally be used consistently wherever the production DB is accessed (e.g., in `ai_validation.js`).
* **`products.js` Brand Filtering:** `GET /products/brands` filters brands based on having associated purchase orders with a cost >= 500. This seems arbitrary for a general list of brands and might return incomplete results depending on the use case.
* **Type Handling:** Ensure `parseValue` handles all required types and edge cases correctly, especially for filtering complex queries in `*-aggregate` and `metrics` routes. Explicit type casting in SQL (`::numeric`, `::text`, etc.) is generally good practice in PostgreSQL.
* **Dummy Data:** Several `dashboard.js` endpoints return hardcoded dummy data on errors or when no data is found. While this prevents UI crashes, it can mask real issues. Ensure logging is robust when fallbacks are used.
**Summary of Endpoints**
Here's a summary of the available endpoints, grouped by their likely file/module:
**1. AI Prompts (`ai_prompts.js`)**
* `GET /`: Get all AI prompts.
* `GET /:id`: Get a specific AI prompt by its ID.
* `GET /company/:companyId`: Get the AI prompt for a specific company (expects one). **(Deprecated)**
* `GET /type/general`: Get the general AI prompt (expects one). **(Deprecated)**
* `GET /type/system`: Get the system AI prompt (expects one). **(Deprecated)**
* `GET /by-type`: Get AI prompt by type (general, system, company_specific) with optional company parameter. **(New Consolidated Endpoint)**
* `POST /`: Create a new AI prompt.
* `PUT /:id`: Update an existing AI prompt.
* `DELETE /:id`: Delete an AI prompt.
**2. AI Validation (`ai_validation.js`)**
* `POST /debug`: Generate and view the structure of prompts and taxonomy data (for debugging, doesn't call OpenAI). Connects to Prod MySQL (taxonomy) and Local PG (prompts, performance).
* `POST /validate`: Validate product data using OpenAI. Connects to Prod MySQL (taxonomy) and Local PG (prompts, performance).
* `GET /test-taxonomy`: Test endpoint to query sample taxonomy data from Prod MySQL.
**3. Analytics (`analytics.js`)**
* `GET /stats`: Get overall business statistics from metrics tables.
* `GET /profit`: Get profit analysis data (by category, over time, top products) from metrics tables.
* `GET /vendors`: Get vendor performance analysis from `vendor_metrics`.
* `GET /stock`: Get stock analysis data (turnover, levels, critical items) from metrics tables.
* `GET /pricing`: Get pricing analysis (price points, elasticity, recommendations) - **uses `orders` table**.
* `GET /categories`: Get category performance analysis (revenue, profit, growth, distribution, trends) - **uses `orders` and `products` tables**.
* `GET /forecast`: (**Likely Obsolete/Broken**) Attempts to get forecast data using MySQL syntax.
**4. Brands Aggregate (`brands-aggregate.js`)**
* `GET /filter-options`: Get distinct brand names and statuses for UI filters (from `brand_metrics`).
* `GET /stats`: Get overall statistics related to brands (from `brand_metrics`).
* `GET /`: List brands with aggregated metrics, supporting filtering, sorting, pagination (from `brand_metrics`).
**5. Categories Aggregate (`categories-aggregate.js`)**
* `GET /filter-options`: Get distinct category types, statuses, and counts for UI filters (from `category_metrics` & `categories`).
* `GET /stats`: Get overall statistics related to categories (from `category_metrics` & `categories`).
* `GET /`: List categories with aggregated metrics, supporting filtering, sorting (incl. hierarchy), pagination (from `category_metrics` & `categories`).
**6. Configuration (`config.js`)**
* **(New)** `GET /global`: Get all global settings.
* **(New)** `PUT /global`: Update global settings.
* **(New)** `GET /products`: List product-specific settings with pagination/search.
* **(New)** `PUT /products/:pid`: Update/Create product-specific settings.
* **(New)** `POST /products/:pid/reset`: Reset product settings to defaults.
* **(New)** `GET /vendors`: List vendor-specific settings with pagination/search.
* **(New)** `PUT /vendors/:vendor`: Update/Create vendor-specific settings.
* **(New)** `POST /vendors/:vendor/reset`: Reset vendor settings to defaults.
* **(Legacy/Obsolete)** `GET /`: Get all config from old single-row tables.
* **(Legacy/Obsolete)** `PUT /stock-thresholds/:id`: Update old stock thresholds.
* **(Legacy/Obsolete)** `PUT /lead-time-thresholds/:id`: Update old lead time thresholds.
* **(Legacy/Obsolete)** `PUT /sales-velocity/:id`: Update old sales velocity config.
* **(Legacy/Obsolete)** `PUT /abc-classification/:id`: Update old ABC config.
* **(Legacy/Obsolete)** `PUT /safety-stock/:id`: Update old safety stock config.
* **(Legacy/Obsolete)** `PUT /turnover/:id`: Update old turnover config.
**7. CSV Operations & Background Tasks (`csv.js`)**
* `GET /:type/progress`: SSE endpoint for full update/reset progress.
* `GET /test`: Simple test endpoint.
* `GET /status`: Check status of the generic background task lock (`activeImport`).
* `GET /calculate-metrics/status`: Check status of metrics calculation.
* `GET /history/import`: Get recent import history.
* `GET /history/calculate`: Get recent metrics calculation history.
* `GET /status/modules`: Get last calculation time per module.
* `GET /status/tables`: Get last sync time per table.
* `GET /status/table-counts`: Get record counts for key tables.
* `POST /update`: (**Potentially Obsolete**) Trigger `update-csv.js` script.
* `POST /import`: Trigger `import-csv.js` script.
* `POST /cancel`: Cancel `/full-update` or `/full-reset` task.
* `POST /reset`: (**Potentially Obsolete**) Trigger `reset-db.js` script.
* `POST /reset-metrics`: Trigger `reset-metrics.js` script.
* `POST /calculate-metrics`: Trigger `calculate-metrics.js` script.
* `POST /import-from-prod`: Trigger `import-from-prod.js` script.
* `POST /full-update`: Trigger `full-update.js` script (preferred update).
* `POST /full-reset`: Trigger `full-reset.js` script (preferred reset).
**8. Dashboard (`dashboard.js`)**
* `GET /stock/metrics`: Get dashboard stock summary metrics & brand breakdown.
* `GET /purchase/metrics`: Get dashboard purchase order summary metrics & vendor breakdown.
* `GET /replenishment/metrics`: Get dashboard replenishment summary & top variants.
* `GET /forecast/metrics`: Get dashboard forecast summary, daily, and category breakdown.
* `GET /overstock/metrics`: Get dashboard overstock summary & category breakdown.
* `GET /overstock/products`: Get list of top overstocked products.
* `GET /best-sellers`: Get dashboard best-selling products, brands, categories - **uses `orders`, `products`**.
* `GET /sales/metrics`: Get dashboard sales summary for a period - **uses `orders`**.
* `GET /low-stock/products`: Get list of top low stock/critical products.
* `GET /trending/products`: Get list of trending products - **uses `orders`, `products`**.
* `GET /vendor/performance`: Get dashboard vendor performance details - **uses `purchase_orders`**.
* `GET /key-metrics`: Get dashboard summary KPIs - **uses multiple base tables**.
* `GET /inventory-health`: Get dashboard inventory health overview - **uses `products`, `product_metrics`**.
* `GET /replenish/products`: Get list of products needing replenishment (overlaps `/low-stock/products`).
* `GET /sales-overview`: Get daily sales totals for chart - **uses `orders`**.
**9. Product Import Utilities (`products-import.js`)**
* `POST /upload-image`: Upload temporary product image, schedule deletion.
* `DELETE /delete-image`: Delete temporary product image.
* `GET /field-options`: Get dropdown options for product fields from Prod MySQL (cached).
* `GET /product-lines/:companyId`: Get product lines for a company from Prod MySQL (cached).
* `GET /sublines/:lineId`: Get sublines for a line from Prod MySQL (cached).
* `GET /check-file/:filename`: Check existence/permissions of uploaded file (temp or reusable).
* `GET /list-uploads`: List files in upload directories.
* `GET /search-products`: Search products in Prod MySQL DB.
* `GET /check-upc-and-generate-sku`: Check UPC existence and generate SKU suggestion based on Prod MySQL data.
* `GET /product-categories/:pid`: Get assigned categories for a product from Prod MySQL.
**10. Product Metrics (`product-metrics.js`)**
* `GET /filter-options`: Get distinct filter values (vendor, brand, abcClass) from `product_metrics`.
* `GET /`: List detailed product metrics with filtering, sorting, pagination (primary data access).
* `GET /:pid`: Get full metrics record for a single product.
**11. Orders (`orders.js`)**
* `GET /`: List orders with summary info, filtering, sorting, pagination, and stats.
* `GET /:orderNumber`: Get details for a single order, including items.
**12. Products (`products.js`)**
* `GET /brands`: Get distinct brands (filtered by PO value).
* `GET /`: List products with core data + metrics, filtering, sorting, pagination.
* `GET /trending`: Get trending products based on `product_metrics`.
* `GET /:id`: Get details for a single product (core data + metrics).
* `POST /import`: (**Likely Obsolete/Dangerous**) Import products from CSV.
* `PUT /:id`: Update core product data.
* `GET /:id/metrics`: (**Redundant**) Get metrics for a single product.
* `GET /:id/time-series`: Get sales/PO history for a single product.
**13. Purchase Orders (`purchase-orders.js`)**
* `GET /`: List purchase orders with summary info, filtering, sorting, pagination, and summary stats.
* `GET /vendor-metrics`: Calculate vendor performance metrics from `purchase_orders`.
* `GET /cost-analysis`: Calculate cost analysis by category from `purchase_orders`.
* `GET /receiving-status`: Get summary counts based on PO receiving status.
* `GET /order-vs-received`: List product ordered vs. received quantities.
**14. Reusable Images (`reusable-images.js`)**
* `GET /`: List all reusable images.
* `GET /by-company/:companyId`: List global and company-specific images.
* `GET /global`: List only global images.
* `GET /:id`: Get a single reusable image record.
* `POST /upload`: Upload a new reusable image and create DB record.
* `PUT /:id`: Update reusable image metadata (name, global, company).
* `DELETE /:id`: Delete reusable image record and file.
* `GET /check-file/:filename`: Check existence/permissions of a reusable image file.
**15. Templates (`templates.js`)**
* `GET /`: List all product data templates.
* `GET /:company/:productType`: Get a specific template.
* `POST /`: Create a new template.
* `PUT /:id`: Update an existing template.
* `DELETE /:id`: Delete a template.
**16. Vendors Aggregate (`vendors-aggregate.js`)**
* `GET /filter-options`: Get distinct vendor names and statuses for UI filters (from `vendor_metrics`).
* `GET /stats`: Get overall statistics related to vendors (from `vendor_metrics` & `purchase_orders`).
* `GET /`: List vendors with aggregated metrics, supporting filtering, sorting, pagination (from `vendor_metrics` & `purchase_orders`).
**Recommendations:**
1. **Address Obsolete Endpoints:** Prioritize removing or confirming the necessity of the endpoints marked as obsolete/redundant (legacy config, `/analytics/forecast`, `/csv/update`, `/csv/reset`, `/products/import`, `/products/:id/metrics`).
2. **Consolidate Overlapping Functionality:** Review the multiple vendor performance and product listing endpoints. Decide on the primary method (e.g., using aggregate tables via `/vendors-aggregate` and `/metrics`) and refactor or remove the others. Clarify the image upload strategies.
3. **Standardize Data Access:** Decide whether `dashboard` and `analytics` endpoints should primarily use aggregate tables (like `/metrics`, `/brands-aggregate`, etc.) or if direct access to base tables is sometimes necessary. Aim for consistency and document the reasoning. Optimize queries hitting base tables if they must remain.
4. **Improve Background Task Management:** Refactor `csv.js` to use a unified locking mechanism (maybe separate locks per task type?) and a consistent cancellation strategy for all spawned/managed processes. Clarify the purpose of `update` vs `full-update` and `reset` vs `full-reset`.
5. **Optimize DB Connections:** Ensure the `getDbConnection` pooling/caching helper from `products-import.js` is used *consistently* across all modules interacting with the production MySQL database (especially `ai_validation.js`). Remove unnecessary tunnel creations.
6. **Review Data Integrity:** Double-check the assumptions made (e.g., uniqueness of AI prompts) and ensure database constraints enforce them. Review the `GET /products/brands` filtering logic.
## Changes Made
1. **Removed Obsolete Legacy Endpoints in `config.js`**:
- Removed `GET /config/` endpoint
- Removed `PUT /config/stock-thresholds/:id` endpoint
- Removed `PUT /config/lead-time-thresholds/:id` endpoint
- Removed `PUT /config/sales-velocity/:id` endpoint
- Removed `PUT /config/abc-classification/:id` endpoint
- Removed `PUT /config/safety-stock/:id` endpoint
- Removed `PUT /config/turnover/:id` endpoint
These endpoints were obsolete as they referenced older, single-row config tables that have been replaced by newer endpoints using the structured tables `settings_global`, `settings_product`, and `settings_vendor`.
2. **Removed MySQL Syntax `/forecast` Endpoint in `analytics.js`**:
- Removed `GET /analytics/forecast` endpoint that was using MySQL-specific syntax incompatible with the PostgreSQL database used elsewhere in the application.
3. **Renamed and Removed Redundant Endpoints**:
- Renamed `csv.js` to `data-management.js` while maintaining the same `/csv/*` endpoint paths for consistency
- Removed deprecated `/csv/update` endpoint (now fully replaced by `/csv/full-update`)
- Removed deprecated `/csv/reset` endpoint (now fully replaced by `/csv/full-reset`)
- Removed deprecated `/products/import` endpoint (now handled by `/csv/import`)
- Removed deprecated `/products/:id/metrics` endpoint (now handled by `/metrics/:pid`)
4. **Fixed Data Integrity Issues**:
- Improved `GET /products/brands` endpoint by removing the arbitrary filtering logic that was only showing brands with purchase orders that had a total cost of at least $500
- The updated endpoint now returns all distinct brands from visible products, providing more complete data
5. **Optimized Database Connections**:
- Created a new `dbConnection.js` utility file that encapsulates the optimized database connection management logic
- Improved the `ai-validation.js` file to use this shared connection management, eliminating unnecessary repeated tunnel creation
- Added proper connection pooling with timeout-based connection reuse, reducing the overhead of repeatedly creating SSH tunnels
- Added query result caching for frequently accessed data to improve performance
These changes improve maintainability by removing duplicate code, enhance consistency by standardizing on the newer endpoint patterns, and optimize performance by reducing redundant database connections.
## Additional Improvements
1. **Further Database Connection Optimizations**:
- Extended the use of the optimized database connection utility to additional endpoints in `ai-validation.js`
- Updated the `/validate` endpoint and `/test-taxonomy` endpoint to use `getDbConnection`
- Ensured consistent connection management across all routes that access the production database
2. **AI Prompts Data Integrity Verification**:
- Confirmed proper uniqueness constraints are in place in the database schema for AI prompts
- The schema includes:
- `unique_company_prompt` constraint ensuring only one prompt per company
- `idx_unique_general_prompt` index ensuring only one general prompt in the system
- `idx_unique_system_prompt` index ensuring only one system prompt in the system
- Endpoint handlers properly handle uniqueness constraint violations with appropriate 409 Conflict responses
- Validation ensures company-specific prompts have company IDs, while general/system prompts do not
3. **AI Prompts Endpoint Consolidation**:
- Added a new consolidated `/by-type` endpoint that handles all types of prompts (general, system, company_specific)
- Marked the existing separate endpoints as deprecated with console warnings
- Maintained backward compatibility while providing a cleaner API moving forward
## Completed Items
✅ Removed obsolete legacy endpoints in `config.js`
✅ Removed MySQL syntax `/forecast` endpoint in `analytics.js`
✅ Fixed `GET /products/brands` endpoint filtering logic
✅ Created reusable database connection utility (`dbConnection.js`)
✅ Optimized database connections in `ai-validation.js`
✅ Verified data integrity in AI prompts handling
✅ Consolidated AI prompts endpoints with a unified `/by-type` endpoint
## Remaining Items
- Consider adding additional error handling and logging for database connections
- Perform load testing on the optimized database connections to ensure they handle high traffic properly

23
docs/setup-chat.md Normal file
View File

@@ -0,0 +1,23 @@
This portion of the application is going to be a read only chat archive. It will pull data from a rocketchat export converted to postgresql. This is a separate database than the rest of the inventory application uses, but it will still use users and permissions from the inventory database. Both databases are on the same postgres instance.
For now, let's add a select to the top of the page that allows me to "view as" any of the users in the rocketchat database. We'll connect this to the authorization in the main application later.
The db connection info is stored in the .env file in the inventory-server root. It contains these variables
DB_HOST=localhost
DB_USER=rocketchat_user
DB_PASSWORD=password
DB_NAME=rocketchat_converted
DB_PORT=5432
Not all of the information in this database is relevant as it's a direct export from another app with more features. You can use the query tool to examine the structure and data available.
Server-side files should use similar conventions and the same technologies as the inventory-server (inventor-server root) and auth-server (inventory-server/auth). I will provide my current pm2 ecosystem file upon request for you to add the configuration for the new "chat-server". I use Caddy on the server and can provide my caddyfile to assist with configuring the api routes. All configuration and routes for the chat-server should go in the inventory-server/chat folder or subfolders you create.
The folder you see as inventory-server is actually a direct mount of the /var/www/html/inventory folder on the server. You can read and write files from there like usual, but any terminal commands for the server I will have to run myself.
The "Chat" page should be added to the main application sidebar and a similar page to the others should be created in inventory/src/pages. All other frontend pages should go in inventory/src/components/chat.
The application uses shadcn components and those should be used for all ui elements where possible (located in inventory/src/components/ui). The UI should match existing pages and components.

112
docs/split-up-pos.md Normal file
View File

@@ -0,0 +1,112 @@
Okay, I understand completely now. The core issue is that the previous approaches tried too hard to reconcile every receipt back to a specific PO line within the `purchase_orders` table structure, which doesn't reflect the reality where receipts can be independent events. Your downstream scripts, especially `daily_snapshots` and `product_metrics`, rely on having a complete picture of *all* receivings.
Let's pivot to a model that respects both distinct data streams: **Orders (Intent)** and **Receivings (Actuals)**.
**Proposed Solution: Separate `purchase_orders` and `receivings` Tables**
This is the cleanest way to model the reality you've described.
1. **`purchase_orders` Table:**
* **Purpose:** Tracks the status and details of purchase *orders* placed. Represents the *intent* to receive goods.
* **Key Columns:** `po_id`, `pid`, `ordered` (quantity ordered), `po_cost_price`, `date` (order/created date), `expected_date`, `status` (PO lifecycle: 'ordered', 'canceled', 'done'), `vendor`, `notes`, etc.
* **Crucially:** This table *does not* need a `received` column or a `receiving_history` column derived from complex allocations. It focuses solely on the PO itself.
2. **`receivings` Table (New or Refined):**
* **Purpose:** Tracks every single line item received, regardless of whether it was linked to a PO during the receiving process. Represents the *actual* goods that arrived.
* **Key Columns:**
* `receiving_id` (Identifier for the overall receiving document/batch)
* `pid` (Product ID received)
* `received_qty` (Quantity received for this specific line)
* `cost_each` (Actual cost paid for this item on this receiving)
* `received_date` (Actual date the item was received)
* `received_by` (Employee ID/Name)
* `source_po_id` (The `po_id` entered on the receiving screen, *nullable*. Stores the original link attempt, even if it was wrong or missing)
* `source_receiving_status` (The status from the source `receivings` table: 'partial_received', 'full_received', 'paid', 'canceled')
**How the Import Script Changes:**
1. **Fetch POs:** Fetch data from `po` and `po_products`.
2. **Populate `purchase_orders`:**
* Insert/Update rows into `purchase_orders` based directly on the fetched PO data.
* Set `po_id`, `pid`, `ordered`, `po_cost_price`, `date` (`COALESCE(date_ordered, date_created)`), `expected_date`.
* Set `status` by mapping the source `po.status` code directly ('ordered', 'canceled', 'done', etc.).
* **No complex allocation needed here.**
3. **Fetch Receivings:** Fetch data from `receivings` and `receivings_products`.
4. **Populate `receivings`:**
* For *every* line item fetched from `receivings_products`:
* Perform necessary data validation (dates, numbers).
* Insert a new row into `receivings` with all the relevant details (`receiving_id`, `pid`, `received_qty`, `cost_each`, `received_date`, `received_by`, `source_po_id`, `source_receiving_status`).
* Use `ON CONFLICT (receiving_id, pid)` (or similar unique key based on your source data) `DO UPDATE SET ...` for incremental updates if necessary, or simply delete/re-insert based on `receiving_id` for simplicity if performance allows.
**Impact on Downstream Scripts (and how to adapt):**
* **Initial Query (Active POs):**
* `SELECT ... FROM purchase_orders po WHERE po.status NOT IN ('canceled', 'done', 'paid_equivalent_status?') AND po.date >= ...`
* `active_pos`: `COUNT(DISTINCT po.po_id)` based on the filtered POs.
* `overdue_pos`: Add `AND po.expected_date < CURRENT_DATE`.
* `total_units`: `SUM(po.ordered)`. Represents total units *ordered* on active POs.
* `total_cost`: `SUM(po.ordered * po.po_cost_price)`. Cost of units *ordered*.
* `total_retail`: `SUM(po.ordered * pm.current_price)`. Retail value of units *ordered*.
* **Result:** This query now cleanly reports on the status of *orders* placed, which seems closer to its original intent. The filter `po.receiving_status NOT IN ('partial_received', 'full_received', 'paid')` is replaced by `po.status NOT IN ('canceled', 'done', 'paid_equivalent?')`. The 90% received check is removed as `received` is not reliably tracked *on the PO* anymore.
* **`daily_product_snapshots`:**
* **`SalesData` CTE:** No change needed.
* **`ReceivingData` CTE:** **Must be changed.** Query the **`receivings`** table instead of `purchase_orders`.
```sql
ReceivingData AS (
SELECT
rl.pid,
COUNT(DISTINCT rl.receiving_id) as receiving_doc_count,
SUM(rl.received_qty) AS units_received,
SUM(rl.received_qty * rl.cost_each) AS cost_received
FROM public.receivings rl
WHERE rl.received_date::date = _date
-- Optional: Filter out canceled receivings if needed
-- AND rl.source_receiving_status <> 'canceled'
GROUP BY rl.pid
),
```
* **Result:** This now accurately reflects *all* units received on a given day from the definitive source.
* **`update_product_metrics`:**
* **`CurrentInfo` CTE:** No change needed (pulls from `products`).
* **`OnOrderInfo` CTE:** Needs re-evaluation. How do you want to define "On Order"?
* **Option A (Strict PO View):** `SUM(po.ordered)` from `purchase_orders po WHERE po.status NOT IN ('canceled', 'done', 'paid_equivalent?')`. This is quantity on *open orders*, ignoring fulfillment state. Simple, but might overestimate if items arrived unlinked.
* **Option B (Approximate Fulfillment):** `SUM(po.ordered)` from open POs MINUS `SUM(rl.received_qty)` from `receivings rl` where `rl.source_po_id = po.po_id` (summing only directly linked receivings). Better, but still misses fulfillment via unlinked receivings.
* **Option C (Heuristic):** `SUM(po.ordered)` from open POs MINUS `SUM(rl.received_qty)` from `receivings rl` where `rl.pid = po.pid` and `rl.received_date >= po.date`. This *tries* to account for unlinked receivings but is imprecise.
* **Recommendation:** Start with **Option A** for simplicity, clearly labeling it "Quantity on Open POs". You might need a separate process or metric for a more nuanced view of expected vs. actual pipeline.
```sql
-- Example for Option A
OnOrderInfo AS (
SELECT
pid,
SUM(ordered) AS on_order_qty, -- Total qty on open POs
SUM(ordered * po_cost_price) AS on_order_cost -- Cost of qty on open POs
FROM public.purchase_orders
WHERE status NOT IN ('canceled', 'done', 'paid_equivalent?') -- Define your open statuses
GROUP BY pid
),
```
* **`HistoricalDates` CTE:**
* `date_first_sold`, `max_order_date`: No change (queries `orders`).
* `date_first_received_calc`, `date_last_received_calc`: **Must be changed.** Query `MIN(rl.received_date)` and `MAX(rl.received_date)` from the **`receivings`** table grouped by `pid`.
* **`SnapshotAggregates` CTE:**
* `received_qty_30d`, `received_cost_30d`: These are calculated from `daily_product_snapshots`, which are now correctly sourced from `receivings`, so this part is fine.
* **Forecasting Calculations:** Will use the chosen definition of `on_order_qty`. Be aware of the implications of Option A (potentially inflated if unlinked receivings fulfill orders).
* **Result:** Metrics are calculated based on distinct order data and complete receiving data. The definition of "on order" needs careful consideration.
**Summary of this Approach:**
* **Pros:**
* Accurately models distinct order and receiving events.
* Provides a definitive source (`receivings`) for all received inventory.
* Simplifies the `purchase_orders` table and its import logic.
* Avoids complex/potentially inaccurate allocation logic for unlinked receivings within the main tables.
* Avoids synthetic records.
* Fixes downstream reporting (`daily_snapshots` receiving data).
* **Cons:**
* Requires creating/managing the `receivings` table.
* Requires modifying downstream queries (`ReceivingData`, `OnOrderInfo`, `HistoricalDates`).
* Calculating a precise "net quantity still expected to arrive" (true on-order minus all relevant fulfillment) becomes more complex and may require specific business rules or heuristics outside the basic table structure if Option A for `OnOrderInfo` isn't sufficient.
This two-table approach (`purchase_orders` + `receivings`) seems the most robust and accurate way to handle your requirement for complete receiving records independent of potentially flawed PO linking. It directly addresses the shortcomings of the previous attempts.

View File

@@ -1,222 +0,0 @@
// ecosystem.config.js
const path = require('path');
const dotenv = require('dotenv');
// Load environment variables safely with error handling
const loadEnvFile = (envPath) => {
try {
console.log('Loading env from:', envPath);
const result = dotenv.config({ path: envPath });
if (result.error) {
console.warn(`Warning: .env file not found or invalid at ${envPath}:`, result.error.message);
return {};
}
console.log('Env variables loaded from', envPath, ':', Object.keys(result.parsed || {}));
return result.parsed || {};
} catch (error) {
console.warn(`Warning: Error loading .env file at ${envPath}:`, error.message);
return {};
}
}
// Load environment variables for each server
const authEnv = loadEnvFile(path.resolve(__dirname, 'dashboard/auth-server/.env'));
const aircallEnv = loadEnvFile(path.resolve(__dirname, 'dashboard/aircall-server/.env'));
const klaviyoEnv = loadEnvFile(path.resolve(__dirname, 'dashboard/klaviyo-server/.env'));
const metaEnv = loadEnvFile(path.resolve(__dirname, 'dashboard/meta-server/.env'));
const googleAnalyticsEnv = require('dotenv').config({
path: path.resolve(__dirname, 'dashboard/google-server/.env')
}).parsed || {};
const typeformEnv = loadEnvFile(path.resolve(__dirname, 'dashboard/typeform-server/.env'));
const inventoryEnv = loadEnvFile(path.resolve(__dirname, 'inventory/.env'));
// Common log settings for all apps
const logSettings = {
log_rotate: true,
max_size: '10M',
retain: '10',
log_date_format: 'YYYY-MM-DD HH:mm:ss'
};
// Common app settings
const commonSettings = {
instances: 1,
exec_mode: 'fork',
autorestart: true,
watch: false,
max_memory_restart: '1G',
time: true,
...logSettings,
ignore_watch: [
'node_modules',
'logs',
'.git',
'*.log'
],
min_uptime: 5000,
max_restarts: 5,
restart_delay: 4000,
listen_timeout: 50000,
kill_timeout: 5000,
node_args: '--max-old-space-size=1536'
};
module.exports = {
apps: [
{
...commonSettings,
name: 'auth-server',
script: './dashboard/auth-server/index.js',
env: {
NODE_ENV: 'production',
PORT: 3003,
...authEnv
},
error_file: 'dashboard/auth-server/logs/pm2/err.log',
out_file: 'dashboard/auth-server/logs/pm2/out.log',
log_file: 'dashboard/auth-server/logs/pm2/combined.log',
env_production: {
NODE_ENV: 'production',
PORT: 3003
},
env_development: {
NODE_ENV: 'development',
PORT: 3003
}
},
{
...commonSettings,
name: 'aircall-server',
script: './dashboard/aircall-server/server.js',
env: {
NODE_ENV: 'production',
AIRCALL_PORT: 3002,
...aircallEnv
},
error_file: 'dashboard/aircall-server/logs/pm2/err.log',
out_file: 'dashboard/aircall-server/logs/pm2/out.log',
log_file: 'dashboard/aircall-server/logs/pm2/combined.log',
env_production: {
NODE_ENV: 'production',
AIRCALL_PORT: 3002
}
},
{
...commonSettings,
name: 'klaviyo-server',
script: './dashboard/klaviyo-server/server.js',
env: {
NODE_ENV: 'production',
KLAVIYO_PORT: 3004,
...klaviyoEnv
},
error_file: 'dashboard/klaviyo-server/logs/pm2/err.log',
out_file: 'dashboard/klaviyo-server/logs/pm2/out.log',
log_file: 'dashboard/klaviyo-server/logs/pm2/combined.log',
env_production: {
NODE_ENV: 'production',
KLAVIYO_PORT: 3004
}
},
{
...commonSettings,
name: 'meta-server',
script: './dashboard/meta-server/server.js',
env: {
NODE_ENV: 'production',
PORT: 3005,
...metaEnv
},
error_file: 'dashboard/meta-server/logs/pm2/err.log',
out_file: 'dashboard/meta-server/logs/pm2/out.log',
log_file: 'dashboard/meta-server/logs/pm2/combined.log',
env_production: {
NODE_ENV: 'production',
PORT: 3005
}
},
{
name: "gorgias-server",
script: "./dashboard/gorgias-server/server.js",
env: {
NODE_ENV: "development",
PORT: 3006
},
env_production: {
NODE_ENV: "production",
PORT: 3006
},
error_file: "dashboard/logs/gorgias-server-error.log",
out_file: "dashboard/logs/gorgias-server-out.log",
log_file: "dashboard/logs/gorgias-server-combined.log",
time: true
},
{
...commonSettings,
name: 'google-server',
script: path.resolve(__dirname, 'dashboard/google-server/server.js'),
watch: false,
env: {
NODE_ENV: 'production',
GOOGLE_ANALYTICS_PORT: 3007,
...googleAnalyticsEnv
},
error_file: path.resolve(__dirname, 'dashboard/google-server/logs/pm2/err.log'),
out_file: path.resolve(__dirname, 'dashboard/google-server/logs/pm2/out.log'),
log_file: path.resolve(__dirname, 'dashboard/google-server/logs/pm2/combined.log'),
env_production: {
NODE_ENV: 'production',
GOOGLE_ANALYTICS_PORT: 3007
}
},
{
...commonSettings,
name: 'typeform-server',
script: './dashboard/typeform-server/server.js',
env: {
NODE_ENV: 'production',
TYPEFORM_PORT: 3008,
...typeformEnv
},
error_file: 'dashboard/typeform-server/logs/pm2/err.log',
out_file: 'dashboard/typeform-server/logs/pm2/out.log',
log_file: 'dashboard/typeform-server/logs/pm2/combined.log',
env_production: {
NODE_ENV: 'production',
TYPEFORM_PORT: 3008
}
},
{
...commonSettings,
name: 'inventory-server',
script: './inventory/src/server.js',
env: {
NODE_ENV: 'production',
PORT: 3010,
...inventoryEnv
},
error_file: 'inventory/logs/pm2/err.log',
out_file: 'inventory/logs/pm2/out.log',
log_file: 'inventory/logs/pm2/combined.log',
env_production: {
NODE_ENV: 'production',
PORT: 3010,
...inventoryEnv
}
},
{
...commonSettings,
name: 'new-auth-server',
script: './inventory-server/auth/server.js',
env: {
NODE_ENV: 'production',
AUTH_PORT: 3011,
...inventoryEnv,
JWT_SECRET: process.env.JWT_SECRET
},
error_file: 'inventory-server/auth/logs/pm2/err.log',
out_file: 'inventory-server/auth/logs/pm2/out.log',
log_file: 'inventory-server/auth/logs/pm2/combined.log'
}
]
};

View File

@@ -0,0 +1,128 @@
// Get pool from global or create a new one if not available
let pool;
if (typeof global.pool !== 'undefined') {
pool = global.pool;
} else {
// If global pool is not available, create a new connection
const { Pool } = require('pg');
pool = new Pool({
host: process.env.DB_HOST,
user: process.env.DB_USER,
password: process.env.DB_PASSWORD,
database: process.env.DB_NAME,
port: process.env.DB_PORT,
});
console.log('Created new database pool in permissions.js');
}
/**
* Check if a user has a specific permission
* @param {number} userId - The user ID to check
* @param {string} permissionCode - The permission code to check
* @returns {Promise<boolean>} - Whether the user has the permission
*/
async function checkPermission(userId, permissionCode) {
try {
// First check if the user is an admin
const adminResult = await pool.query(
'SELECT is_admin FROM users WHERE id = $1',
[userId]
);
// If user is admin, automatically grant permission
if (adminResult.rows.length > 0 && adminResult.rows[0].is_admin) {
return true;
}
// Otherwise check for specific permission
const result = await pool.query(
`SELECT COUNT(*) AS has_permission
FROM user_permissions up
JOIN permissions p ON up.permission_id = p.id
WHERE up.user_id = $1 AND p.code = $2`,
[userId, permissionCode]
);
return result.rows[0].has_permission > 0;
} catch (error) {
console.error('Error checking permission:', error);
return false;
}
}
/**
* Middleware to require a specific permission
* @param {string} permissionCode - The permission code required
* @returns {Function} - Express middleware function
*/
function requirePermission(permissionCode) {
return async (req, res, next) => {
try {
// Check if user is authenticated
if (!req.user || !req.user.id) {
return res.status(401).json({ error: 'Authentication required' });
}
const hasPermission = await checkPermission(req.user.id, permissionCode);
if (!hasPermission) {
return res.status(403).json({
error: 'Insufficient permissions',
requiredPermission: permissionCode
});
}
next();
} catch (error) {
console.error('Permission middleware error:', error);
res.status(500).json({ error: 'Server error checking permissions' });
}
};
}
/**
* Get all permissions for a user
* @param {number} userId - The user ID
* @returns {Promise<string[]>} - Array of permission codes
*/
async function getUserPermissions(userId) {
try {
// Check if user is admin
const adminResult = await pool.query(
'SELECT is_admin FROM users WHERE id = $1',
[userId]
);
if (adminResult.rows.length === 0) {
return [];
}
const isAdmin = adminResult.rows[0].is_admin;
if (isAdmin) {
// Admin gets all permissions
const allPermissions = await pool.query('SELECT code FROM permissions');
return allPermissions.rows.map(p => p.code);
} else {
// Get assigned permissions
const permissions = await pool.query(
`SELECT p.code
FROM permissions p
JOIN user_permissions up ON p.id = up.permission_id
WHERE up.user_id = $1`,
[userId]
);
return permissions.rows.map(p => p.code);
}
} catch (error) {
console.error('Error getting user permissions:', error);
return [];
}
}
module.exports = {
checkPermission,
requirePermission,
getUserPermissions
};

View File

@@ -0,0 +1,528 @@
const express = require('express');
const router = express.Router();
const bcrypt = require('bcrypt');
const jwt = require('jsonwebtoken');
const { requirePermission, getUserPermissions } = require('./permissions');
// Get pool from global or create a new one if not available
let pool;
if (typeof global.pool !== 'undefined') {
pool = global.pool;
} else {
// If global pool is not available, create a new connection
const { Pool } = require('pg');
pool = new Pool({
host: process.env.DB_HOST,
user: process.env.DB_USER,
password: process.env.DB_PASSWORD,
database: process.env.DB_NAME,
port: process.env.DB_PORT,
});
console.log('Created new database pool in routes.js');
}
// Authentication middleware
const authenticate = async (req, res, next) => {
try {
const authHeader = req.headers.authorization;
if (!authHeader || !authHeader.startsWith('Bearer ')) {
return res.status(401).json({ error: 'Authentication required' });
}
const token = authHeader.split(' ')[1];
const decoded = jwt.verify(token, process.env.JWT_SECRET);
// Get user from database
const result = await pool.query(
'SELECT id, username, email, is_admin, rocket_chat_user_id FROM users WHERE id = $1',
[decoded.userId]
);
console.log('Database query result for user', decoded.userId, ':', result.rows[0]);
if (result.rows.length === 0) {
return res.status(401).json({ error: 'User not found' });
}
// Attach user to request
req.user = result.rows[0];
next();
} catch (error) {
console.error('Authentication error:', error);
res.status(401).json({ error: 'Invalid token' });
}
};
// Login route
router.post('/login', async (req, res) => {
try {
const { username, password } = req.body;
// Get user from database
const result = await pool.query(
'SELECT id, username, password, is_admin, is_active, rocket_chat_user_id FROM users WHERE username = $1',
[username]
);
if (result.rows.length === 0) {
return res.status(401).json({ error: 'Invalid username or password' });
}
const user = result.rows[0];
// Check if user is active
if (!user.is_active) {
return res.status(403).json({ error: 'Account is inactive' });
}
// Verify password
const validPassword = await bcrypt.compare(password, user.password);
if (!validPassword) {
return res.status(401).json({ error: 'Invalid username or password' });
}
// Update last login
await pool.query(
'UPDATE users SET last_login = CURRENT_TIMESTAMP WHERE id = $1',
[user.id]
);
// Generate JWT
const token = jwt.sign(
{ userId: user.id, username: user.username },
process.env.JWT_SECRET,
{ expiresIn: '8h' }
);
// Get user permissions
const permissions = await getUserPermissions(user.id);
res.json({
token,
user: {
id: user.id,
username: user.username,
is_admin: user.is_admin,
rocket_chat_user_id: user.rocket_chat_user_id,
permissions
}
});
} catch (error) {
console.error('Login error:', error);
res.status(500).json({ error: 'Server error' });
}
});
// Get current user
router.get('/me', authenticate, async (req, res) => {
try {
// Get user permissions
const permissions = await getUserPermissions(req.user.id);
res.json({
id: req.user.id,
username: req.user.username,
email: req.user.email,
is_admin: req.user.is_admin,
rocket_chat_user_id: req.user.rocket_chat_user_id,
permissions,
// Debug info
_debug_raw_user: req.user,
_server_identifier: "INVENTORY_AUTH_SERVER_MODIFIED"
});
} catch (error) {
console.error('Error getting current user:', error);
res.status(500).json({ error: 'Server error' });
}
});
// Get all users
router.get('/users', authenticate, requirePermission('view:users'), async (req, res) => {
try {
const result = await pool.query(`
SELECT id, username, email, is_admin, is_active, rocket_chat_user_id, created_at, last_login
FROM users
ORDER BY username
`);
res.json(result.rows);
} catch (error) {
console.error('Error getting users:', error);
res.status(500).json({ error: 'Server error' });
}
});
// Get user with permissions
router.get('/users/:id', authenticate, requirePermission('view:users'), async (req, res) => {
try {
const userId = req.params.id;
// Get user details
const userResult = await pool.query(`
SELECT id, username, email, is_admin, is_active, rocket_chat_user_id, created_at, last_login
FROM users
WHERE id = $1
`, [userId]);
if (userResult.rows.length === 0) {
return res.status(404).json({ error: 'User not found' });
}
// Get user permissions
const permissionsResult = await pool.query(`
SELECT p.id, p.name, p.code, p.category, p.description
FROM permissions p
JOIN user_permissions up ON p.id = up.permission_id
WHERE up.user_id = $1
ORDER BY p.category, p.name
`, [userId]);
// Combine user and permissions
const user = {
...userResult.rows[0],
permissions: permissionsResult.rows
};
res.json(user);
} catch (error) {
console.error('Error getting user:', error);
res.status(500).json({ error: 'Server error' });
}
});
// Create new user
router.post('/users', authenticate, requirePermission('create:users'), async (req, res) => {
const client = await pool.connect();
try {
const { username, email, password, is_admin, is_active, rocket_chat_user_id, permissions } = req.body;
console.log("Create user request:", {
username,
email,
is_admin,
is_active,
rocket_chat_user_id,
permissions: permissions || []
});
// Validate required fields
if (!username || !password) {
return res.status(400).json({ error: 'Username and password are required' });
}
// Check if username is taken
const existingUser = await client.query(
'SELECT id FROM users WHERE username = $1',
[username]
);
if (existingUser.rows.length > 0) {
return res.status(400).json({ error: 'Username already exists' });
}
// Start transaction
await client.query('BEGIN');
// Hash password
const saltRounds = 10;
const hashedPassword = await bcrypt.hash(password, saltRounds);
// Insert new user
const userResult = await client.query(`
INSERT INTO users (username, email, password, is_admin, is_active, rocket_chat_user_id, created_at)
VALUES ($1, $2, $3, $4, $5, $6, CURRENT_TIMESTAMP)
RETURNING id
`, [username, email || null, hashedPassword, !!is_admin, is_active !== false, rocket_chat_user_id || null]);
const userId = userResult.rows[0].id;
// Assign permissions if provided and not admin
if (!is_admin && Array.isArray(permissions) && permissions.length > 0) {
console.log("Adding permissions for new user:", userId);
console.log("Permissions received:", permissions);
// Check permission format
const permissionIds = permissions.map(p => {
if (typeof p === 'object' && p.id) {
console.log("Permission is an object with ID:", p.id);
return parseInt(p.id, 10);
} else if (typeof p === 'number') {
console.log("Permission is a number:", p);
return p;
} else if (typeof p === 'string' && !isNaN(parseInt(p, 10))) {
console.log("Permission is a string that can be parsed as a number:", p);
return parseInt(p, 10);
} else {
console.log("Unknown permission format:", typeof p, p);
// If it's a permission code, we need to look up the ID
return null;
}
}).filter(id => id !== null);
console.log("Filtered permission IDs:", permissionIds);
if (permissionIds.length > 0) {
const permissionValues = permissionIds
.map(permId => `(${userId}, ${permId})`)
.join(',');
console.log("Inserting permission values:", permissionValues);
try {
await client.query(`
INSERT INTO user_permissions (user_id, permission_id)
VALUES ${permissionValues}
ON CONFLICT DO NOTHING
`);
console.log("Successfully inserted permissions for new user:", userId);
} catch (err) {
console.error("Error inserting permissions for new user:", err);
throw err;
}
} else {
console.log("No valid permission IDs found to insert for new user");
}
} else {
console.log("Not adding permissions: is_admin =", is_admin, "permissions array:", Array.isArray(permissions), "length:", permissions ? permissions.length : 0);
}
await client.query('COMMIT');
res.status(201).json({
id: userId,
message: 'User created successfully'
});
} catch (error) {
await client.query('ROLLBACK');
console.error('Error creating user:', error);
res.status(500).json({ error: 'Server error' });
} finally {
client.release();
}
});
// Update user
router.put('/users/:id', authenticate, requirePermission('edit:users'), async (req, res) => {
const client = await pool.connect();
try {
const userId = req.params.id;
const { username, email, password, is_admin, is_active, rocket_chat_user_id, permissions } = req.body;
console.log("Update user request:", {
userId,
username,
email,
is_admin,
is_active,
rocket_chat_user_id,
permissions: permissions || []
});
// Check if user exists
const userExists = await client.query(
'SELECT id FROM users WHERE id = $1',
[userId]
);
if (userExists.rows.length === 0) {
return res.status(404).json({ error: 'User not found' });
}
// Start transaction
await client.query('BEGIN');
// Build update fields
const updateFields = [];
const updateValues = [userId]; // First parameter is the user ID
let paramIndex = 2;
if (username !== undefined) {
updateFields.push(`username = $${paramIndex++}`);
updateValues.push(username);
}
if (email !== undefined) {
updateFields.push(`email = $${paramIndex++}`);
updateValues.push(email || null);
}
if (is_admin !== undefined) {
updateFields.push(`is_admin = $${paramIndex++}`);
updateValues.push(!!is_admin);
}
if (is_active !== undefined) {
updateFields.push(`is_active = $${paramIndex++}`);
updateValues.push(!!is_active);
}
if (rocket_chat_user_id !== undefined) {
updateFields.push(`rocket_chat_user_id = $${paramIndex++}`);
updateValues.push(rocket_chat_user_id || null);
}
// Update password if provided
if (password) {
const saltRounds = 10;
const hashedPassword = await bcrypt.hash(password, saltRounds);
updateFields.push(`password = $${paramIndex++}`);
updateValues.push(hashedPassword);
}
// Update user if there are fields to update
if (updateFields.length > 0) {
updateFields.push(`updated_at = CURRENT_TIMESTAMP`);
await client.query(`
UPDATE users
SET ${updateFields.join(', ')}
WHERE id = $1
`, updateValues);
}
// Update permissions if provided
if (Array.isArray(permissions)) {
console.log("Updating permissions for user:", userId);
console.log("Permissions received:", permissions);
// First remove existing permissions
await client.query(
'DELETE FROM user_permissions WHERE user_id = $1',
[userId]
);
console.log("Deleted existing permissions for user:", userId);
// Add new permissions if any and not admin
const newIsAdmin = is_admin !== undefined ? is_admin : (await client.query('SELECT is_admin FROM users WHERE id = $1', [userId])).rows[0].is_admin;
console.log("User is admin:", newIsAdmin);
if (!newIsAdmin && permissions.length > 0) {
console.log("Adding permissions:", permissions);
// Check permission format
const permissionIds = permissions.map(p => {
if (typeof p === 'object' && p.id) {
console.log("Permission is an object with ID:", p.id);
return parseInt(p.id, 10);
} else if (typeof p === 'number') {
console.log("Permission is a number:", p);
return p;
} else if (typeof p === 'string' && !isNaN(parseInt(p, 10))) {
console.log("Permission is a string that can be parsed as a number:", p);
return parseInt(p, 10);
} else {
console.log("Unknown permission format:", typeof p, p);
// If it's a permission code, we need to look up the ID
return null;
}
}).filter(id => id !== null);
console.log("Filtered permission IDs:", permissionIds);
if (permissionIds.length > 0) {
const permissionValues = permissionIds
.map(permId => `(${userId}, ${permId})`)
.join(',');
console.log("Inserting permission values:", permissionValues);
try {
await client.query(`
INSERT INTO user_permissions (user_id, permission_id)
VALUES ${permissionValues}
ON CONFLICT DO NOTHING
`);
console.log("Successfully inserted permissions for user:", userId);
} catch (err) {
console.error("Error inserting permissions:", err);
throw err;
}
} else {
console.log("No valid permission IDs found to insert");
}
}
}
await client.query('COMMIT');
res.json({ message: 'User updated successfully' });
} catch (error) {
await client.query('ROLLBACK');
console.error('Error updating user:', error);
res.status(500).json({ error: 'Server error' });
} finally {
client.release();
}
});
// Delete user
router.delete('/users/:id', authenticate, requirePermission('delete:users'), async (req, res) => {
try {
const userId = req.params.id;
// Check that user is not deleting themselves
if (req.user.id === parseInt(userId, 10)) {
return res.status(400).json({ error: 'Cannot delete your own account' });
}
// Delete user (this will cascade to user_permissions due to FK constraints)
const result = await pool.query(
'DELETE FROM users WHERE id = $1 RETURNING id',
[userId]
);
if (result.rows.length === 0) {
return res.status(404).json({ error: 'User not found' });
}
res.json({ message: 'User deleted successfully' });
} catch (error) {
console.error('Error deleting user:', error);
res.status(500).json({ error: 'Server error' });
}
});
// Get all permissions grouped by category
router.get('/permissions/categories', authenticate, requirePermission('view:users'), async (req, res) => {
try {
const result = await pool.query(`
SELECT category, json_agg(
json_build_object(
'id', id,
'name', name,
'code', code,
'description', description
) ORDER BY name
) as permissions
FROM permissions
GROUP BY category
ORDER BY category
`);
res.json(result.rows);
} catch (error) {
console.error('Error getting permissions:', error);
res.status(500).json({ error: 'Server error' });
}
});
// Get all permissions
router.get('/permissions', authenticate, requirePermission('view:users'), async (req, res) => {
try {
const result = await pool.query(`
SELECT *
FROM permissions
ORDER BY category, name
`);
res.json(result.rows);
} catch (error) {
console.error('Error getting permissions:', error);
res.status(500).json({ error: 'Server error' });
}
});
module.exports = router;

View File

@@ -2,5 +2,88 @@ CREATE TABLE users (
id SERIAL PRIMARY KEY,
username VARCHAR(255) NOT NULL UNIQUE,
password VARCHAR(255) NOT NULL,
email VARCHAR UNIQUE,
is_admin BOOLEAN DEFAULT FALSE,
is_active BOOLEAN DEFAULT TRUE,
last_login TIMESTAMP WITH TIME ZONE,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
);
-- Function to update the updated_at timestamp
CREATE OR REPLACE FUNCTION update_updated_at_column()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
-- Sequence and defined type for users table if not exists
CREATE SEQUENCE IF NOT EXISTS users_id_seq;
-- Create permissions table
CREATE TABLE IF NOT EXISTS "public"."permissions" (
"id" SERIAL PRIMARY KEY,
"name" varchar NOT NULL UNIQUE,
"code" varchar NOT NULL UNIQUE,
"description" text,
"category" varchar NOT NULL,
"created_at" timestamp with time zone DEFAULT CURRENT_TIMESTAMP,
"updated_at" timestamp with time zone DEFAULT CURRENT_TIMESTAMP
);
-- Create user_permissions junction table
CREATE TABLE IF NOT EXISTS "public"."user_permissions" (
"user_id" int4 NOT NULL REFERENCES "public"."users"("id") ON DELETE CASCADE,
"permission_id" int4 NOT NULL REFERENCES "public"."permissions"("id") ON DELETE CASCADE,
"created_at" timestamp with time zone DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY ("user_id", "permission_id")
);
-- Add triggers for updated_at on users and permissions
DROP TRIGGER IF EXISTS update_users_updated_at ON users;
CREATE TRIGGER update_users_updated_at
BEFORE UPDATE ON users
FOR EACH ROW
EXECUTE FUNCTION update_updated_at_column();
DROP TRIGGER IF EXISTS update_permissions_updated_at ON permissions;
CREATE TRIGGER update_permissions_updated_at
BEFORE UPDATE ON permissions
FOR EACH ROW
EXECUTE FUNCTION update_updated_at_column();
-- Insert default permissions by page - only the ones used in application
INSERT INTO permissions (name, code, description, category) VALUES
('Dashboard Access', 'access:dashboard', 'Can access the Dashboard page', 'Pages'),
('Products Access', 'access:products', 'Can access the Products page', 'Pages'),
('Categories Access', 'access:categories', 'Can access the Categories page', 'Pages'),
('Vendors Access', 'access:vendors', 'Can access the Vendors page', 'Pages'),
('Analytics Access', 'access:analytics', 'Can access the Analytics page', 'Pages'),
('Forecasting Access', 'access:forecasting', 'Can access the Forecasting page', 'Pages'),
('Purchase Orders Access', 'access:purchase_orders', 'Can access the Purchase Orders page', 'Pages'),
('Import Access', 'access:import', 'Can access the Import page', 'Pages'),
('Settings Access', 'access:settings', 'Can access the Settings page', 'Pages'),
('AI Validation Debug Access', 'access:ai_validation_debug', 'Can access the AI Validation Debug page', 'Pages')
ON CONFLICT (code) DO NOTHING;
-- Settings section permissions
INSERT INTO permissions (name, code, description, category) VALUES
('Data Management', 'settings:data_management', 'Access to the Data Management settings section', 'Settings'),
('Stock Management', 'settings:stock_management', 'Access to the Stock Management settings section', 'Settings'),
('Performance Metrics', 'settings:performance_metrics', 'Access to the Performance Metrics settings section', 'Settings'),
('Calculation Settings', 'settings:calculation_settings', 'Access to the Calculation Settings section', 'Settings'),
('Template Management', 'settings:templates', 'Access to the Template Management settings section', 'Settings'),
('User Management', 'settings:user_management', 'Access to the User Management settings section', 'Settings')
ON CONFLICT (code) DO NOTHING;
-- Set any existing users as admin
UPDATE users SET is_admin = TRUE WHERE is_admin IS NULL;
-- Grant all permissions to admin users
INSERT INTO user_permissions (user_id, permission_id)
SELECT u.id, p.id
FROM users u, permissions p
WHERE u.is_admin = TRUE
ON CONFLICT DO NOTHING;

View File

@@ -5,6 +5,7 @@ const bcrypt = require('bcrypt');
const jwt = require('jsonwebtoken');
const { Pool } = require('pg');
const morgan = require('morgan');
const authRoutes = require('./routes');
// Log startup configuration
console.log('Starting auth server with config:', {
@@ -27,11 +28,14 @@ const pool = new Pool({
port: process.env.DB_PORT,
});
// Make pool available globally
global.pool = pool;
// Middleware
app.use(express.json());
app.use(morgan('combined'));
app.use(cors({
origin: ['http://localhost:5173', 'https://inventory.kent.pw'],
origin: ['http://localhost:5175', 'http://localhost:5174', 'https://inventory.kent.pw'],
credentials: true
}));
@@ -42,7 +46,7 @@ app.post('/login', async (req, res) => {
try {
// Get user from database
const result = await pool.query(
'SELECT id, username, password FROM users WHERE username = $1',
'SELECT id, username, password, is_admin, is_active FROM users WHERE username = $1',
[username]
);
@@ -52,6 +56,11 @@ app.post('/login', async (req, res) => {
if (!user || !(await bcrypt.compare(password, user.password))) {
return res.status(401).json({ error: 'Invalid username or password' });
}
// Check if user is active
if (!user.is_active) {
return res.status(403).json({ error: 'Account is inactive' });
}
// Generate JWT token
const token = jwt.sign(
@@ -60,31 +69,85 @@ app.post('/login', async (req, res) => {
{ expiresIn: '24h' }
);
res.json({ token });
// Get user permissions for the response
const permissionsResult = await pool.query(`
SELECT code
FROM permissions p
JOIN user_permissions up ON p.id = up.permission_id
WHERE up.user_id = $1
`, [user.id]);
const permissions = permissionsResult.rows.map(row => row.code);
res.json({
token,
user: {
id: user.id,
username: user.username,
is_admin: user.is_admin,
permissions: user.is_admin ? [] : permissions
}
});
} catch (error) {
console.error('Login error:', error);
res.status(500).json({ error: 'Internal server error' });
}
});
// Protected route to verify token
app.get('/protected', async (req, res) => {
// User info endpoint
app.get('/me', async (req, res) => {
const authHeader = req.headers.authorization;
if (!authHeader) {
if (!authHeader || !authHeader.startsWith('Bearer ')) {
return res.status(401).json({ error: 'No token provided' });
}
try {
const token = authHeader.split(' ')[1];
const decoded = jwt.verify(token, process.env.JWT_SECRET);
res.json({ userId: decoded.userId, username: decoded.username });
// Get user details from database
const userResult = await pool.query(
'SELECT id, username, email, is_admin, rocket_chat_user_id, is_active FROM users WHERE id = $1',
[decoded.userId]
);
if (userResult.rows.length === 0) {
return res.status(404).json({ error: 'User not found' });
}
const user = userResult.rows[0];
// Get user permissions
let permissions = [];
if (!user.is_admin) {
const permissionsResult = await pool.query(`
SELECT code
FROM permissions p
JOIN user_permissions up ON p.id = up.permission_id
WHERE up.user_id = $1
`, [user.id]);
permissions = permissionsResult.rows.map(row => row.code);
}
res.json({
id: user.id,
username: user.username,
email: user.email,
rocket_chat_user_id: user.rocket_chat_user_id,
is_admin: user.is_admin,
permissions: permissions
});
} catch (error) {
console.error('Token verification error:', error);
res.status(401).json({ error: 'Invalid token' });
}
});
// Mount all routes from routes.js
app.use('/', authRoutes);
// Health check endpoint
app.get('/health', (req, res) => {
res.json({ status: 'healthy' });

View File

@@ -0,0 +1,881 @@
#!/usr/bin/env python3
"""
MongoDB to PostgreSQL Converter for Rocket.Chat
Converts MongoDB BSON export files to PostgreSQL database
Usage:
python3 mongo_to_postgres_converter.py \
--mongo-path db/database/62df06d44234d20001289144 \
--pg-database rocketchat_converted \
--pg-user rocketchat_user \
--pg-password your_password \
--debug
"""
import json
import os
import re
import subprocess
import sys
import struct
from datetime import datetime
from pathlib import Path
from typing import Dict, Any, List, Optional
import argparse
import traceback
# Auto-install dependencies if needed
try:
import bson
import psycopg2
except ImportError:
print("Installing required packages...")
subprocess.check_call([sys.executable, "-m", "pip", "install", "pymongo", "psycopg2-binary"])
import bson
import psycopg2
class MongoToPostgresConverter:
def __init__(self, mongo_db_path: str, postgres_config: Dict[str, str], debug_mode: bool = False, debug_collections: List[str] = None):
self.mongo_db_path = Path(mongo_db_path)
self.postgres_config = postgres_config
self.debug_mode = debug_mode
self.debug_collections = debug_collections or []
self.collections = {}
self.schema_info = {}
self.error_log = {}
def log_debug(self, message: str, collection: str = None):
"""Log debug messages if debug mode is enabled and collection is in debug list"""
if self.debug_mode and (not self.debug_collections or collection in self.debug_collections):
print(f"DEBUG: {message}")
def log_error(self, collection: str, error_type: str, details: str):
"""Log detailed error information"""
if collection not in self.error_log:
self.error_log[collection] = []
self.error_log[collection].append({
'type': error_type,
'details': details,
'timestamp': datetime.now().isoformat()
})
def sample_documents(self, collection_name: str, max_samples: int = 3) -> List[Dict]:
"""Sample documents from a collection for debugging"""
if not self.debug_mode or (self.debug_collections and collection_name not in self.debug_collections):
return []
print(f"\n🔍 Sampling documents from {collection_name}:")
bson_file = self.collections[collection_name]['bson_file']
if bson_file.stat().st_size == 0:
print(" Collection is empty")
return []
samples = []
try:
with open(bson_file, 'rb') as f:
sample_count = 0
while sample_count < max_samples:
try:
doc_size = int.from_bytes(f.read(4), byteorder='little')
if doc_size <= 0:
break
f.seek(-4, 1)
doc_bytes = f.read(doc_size)
if len(doc_bytes) != doc_size:
break
doc = bson.decode(doc_bytes)
samples.append(doc)
sample_count += 1
print(f" Sample {sample_count} - Keys: {list(doc.keys())}")
# Show a few key fields with their types and truncated values
for key, value in list(doc.items())[:3]:
value_preview = str(value)[:50] + "..." if len(str(value)) > 50 else str(value)
print(f" {key}: {type(value).__name__} = {value_preview}")
if len(doc) > 3:
print(f" ... and {len(doc) - 3} more fields")
print()
except (bson.InvalidBSON, struct.error, OSError) as e:
self.log_error(collection_name, 'document_parsing', str(e))
break
except Exception as e:
self.log_error(collection_name, 'file_reading', str(e))
print(f" Error reading collection: {e}")
return samples
def discover_collections(self):
"""Discover all BSON files and their metadata"""
print("Discovering MongoDB collections...")
for bson_file in self.mongo_db_path.glob("*.bson"):
collection_name = bson_file.stem
metadata_file = bson_file.with_suffix(".metadata.json")
# Read metadata if available
metadata = {}
if metadata_file.exists():
try:
with open(metadata_file, 'r', encoding='utf-8') as f:
metadata = json.load(f)
except (UnicodeDecodeError, json.JSONDecodeError) as e:
print(f"Warning: Could not read metadata for {collection_name}: {e}")
metadata = {}
# Get file size and document count estimate
file_size = bson_file.stat().st_size
doc_count = self._estimate_document_count(bson_file)
self.collections[collection_name] = {
'bson_file': bson_file,
'metadata': metadata,
'file_size': file_size,
'estimated_docs': doc_count
}
print(f"Found {len(self.collections)} collections")
for name, info in self.collections.items():
print(f" - {name}: {info['file_size']/1024/1024:.1f}MB (~{info['estimated_docs']} docs)")
def _estimate_document_count(self, bson_file: Path) -> int:
"""Estimate document count by reading first few documents"""
if bson_file.stat().st_size == 0:
return 0
try:
with open(bson_file, 'rb') as f:
docs_sampled = 0
bytes_sampled = 0
max_sample_size = min(1024 * 1024, bson_file.stat().st_size) # 1MB or file size
while bytes_sampled < max_sample_size:
try:
doc_size = int.from_bytes(f.read(4), byteorder='little')
if doc_size <= 0 or doc_size > 16 * 1024 * 1024: # MongoDB doc size limit
break
f.seek(-4, 1) # Go back
doc_bytes = f.read(doc_size)
if len(doc_bytes) != doc_size:
break
bson.decode(doc_bytes) # Validate it's a valid BSON document
docs_sampled += 1
bytes_sampled += doc_size
except (bson.InvalidBSON, struct.error, OSError):
break
if docs_sampled > 0 and bytes_sampled > 0:
avg_doc_size = bytes_sampled / docs_sampled
return int(bson_file.stat().st_size / avg_doc_size)
except Exception:
pass
return 0
def analyze_schema(self, collection_name: str, sample_size: int = 100) -> Dict[str, Any]:
"""Analyze collection schema by sampling documents"""
print(f"Analyzing schema for {collection_name}...")
bson_file = self.collections[collection_name]['bson_file']
if bson_file.stat().st_size == 0:
return {}
schema = {}
docs_analyzed = 0
try:
with open(bson_file, 'rb') as f:
while docs_analyzed < sample_size:
try:
doc_size = int.from_bytes(f.read(4), byteorder='little')
if doc_size <= 0:
break
f.seek(-4, 1)
doc_bytes = f.read(doc_size)
if len(doc_bytes) != doc_size:
break
doc = bson.decode(doc_bytes)
self._analyze_document_schema(doc, schema)
docs_analyzed += 1
except (bson.InvalidBSON, struct.error, OSError):
break
except Exception as e:
print(f"Error analyzing {collection_name}: {e}")
self.schema_info[collection_name] = schema
return schema
def _analyze_document_schema(self, doc: Dict[str, Any], schema: Dict[str, Any], prefix: str = ""):
"""Recursively analyze document structure"""
for key, value in doc.items():
full_key = f"{prefix}.{key}" if prefix else key
if full_key not in schema:
schema[full_key] = {
'types': set(),
'null_count': 0,
'total_count': 0,
'is_array': False,
'nested_schema': {}
}
schema[full_key]['total_count'] += 1
if value is None:
schema[full_key]['null_count'] += 1
schema[full_key]['types'].add('null')
elif isinstance(value, dict):
schema[full_key]['types'].add('object')
if 'nested_schema' not in schema[full_key]:
schema[full_key]['nested_schema'] = {}
self._analyze_document_schema(value, schema[full_key]['nested_schema'])
elif isinstance(value, list):
schema[full_key]['types'].add('array')
schema[full_key]['is_array'] = True
if value and isinstance(value[0], dict):
if 'array_item_schema' not in schema[full_key]:
schema[full_key]['array_item_schema'] = {}
for item in value[:5]: # Sample first 5 items
if isinstance(item, dict):
self._analyze_document_schema(item, schema[full_key]['array_item_schema'])
else:
schema[full_key]['types'].add(type(value).__name__)
def generate_postgres_schema(self) -> Dict[str, str]:
"""Generate PostgreSQL CREATE TABLE statements"""
print("Generating PostgreSQL schema...")
table_definitions = {}
for collection_name, schema in self.schema_info.items():
if not schema: # Empty collection
continue
table_name = self._sanitize_table_name(collection_name)
columns = []
# Always add an id column (PostgreSQL doesn't use _id like MongoDB)
columns.append("id SERIAL PRIMARY KEY")
for field_name, field_info in schema.items():
if field_name == '_id':
columns.append("mongo_id TEXT") # Always allow NULL for mongo_id
continue
col_name = self._sanitize_column_name(field_name)
# Handle conflicts with PostgreSQL auto-generated columns
if col_name in ['id', 'mongo_id', 'created_at', 'updated_at']:
col_name = f"field_{col_name}"
col_type = self._determine_postgres_type(field_info)
# Make all fields nullable by default to avoid constraint violations
columns.append(f"{col_name} {col_type}")
# Add metadata columns
columns.extend([
"created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP",
"updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP"
])
column_definitions = ',\n '.join(columns)
table_sql = f"""
CREATE TABLE IF NOT EXISTS {table_name} (
{column_definitions}
);
-- Create indexes based on MongoDB indexes
"""
# Get list of actual columns that will exist in the table
existing_columns = set(['id', 'mongo_id', 'created_at', 'updated_at'])
for field_name in schema.keys():
if field_name != '_id':
col_name = self._sanitize_column_name(field_name)
# Handle conflicts with PostgreSQL auto-generated columns
if col_name in ['id', 'mongo_id', 'created_at', 'updated_at']:
col_name = f"field_{col_name}"
existing_columns.add(col_name)
# Add indexes from MongoDB metadata
metadata = self.collections[collection_name].get('metadata', {})
indexes = metadata.get('indexes', [])
for index in indexes:
if index['name'] != '_id_': # Skip the default _id index
# Sanitize index name - remove special characters
sanitized_index_name = re.sub(r'[^a-zA-Z0-9_]', '_', index['name'])
index_name = f"idx_{table_name}_{sanitized_index_name}"
index_keys = list(index['key'].keys())
if index_keys:
sanitized_keys = []
for key in index_keys:
if key != '_id':
sanitized_key = self._sanitize_column_name(key)
# Handle conflicts with PostgreSQL auto-generated columns
if sanitized_key in ['id', 'mongo_id', 'created_at', 'updated_at']:
sanitized_key = f"field_{sanitized_key}"
# Only add if the column actually exists in our table
if sanitized_key in existing_columns:
sanitized_keys.append(sanitized_key)
if sanitized_keys:
table_sql += f"CREATE INDEX IF NOT EXISTS {index_name} ON {table_name} ({', '.join(sanitized_keys)});\n"
table_definitions[collection_name] = table_sql
return table_definitions
def _sanitize_table_name(self, name: str) -> str:
"""Convert MongoDB collection name to PostgreSQL table name"""
# Remove rocketchat_ prefix if present
if name.startswith('rocketchat_'):
name = name[11:]
# Replace special characters with underscores
name = re.sub(r'[^a-zA-Z0-9_]', '_', name)
# Ensure it starts with a letter
if name and name[0].isdigit():
name = 'table_' + name
return name.lower()
def _sanitize_column_name(self, name: str) -> str:
"""Convert MongoDB field name to PostgreSQL column name"""
# Handle nested field names (convert dots to underscores)
name = name.replace('.', '_')
# Replace special characters with underscores
name = re.sub(r'[^a-zA-Z0-9_]', '_', name)
# Ensure it starts with a letter or underscore
if name and name[0].isdigit():
name = 'col_' + name
# Handle PostgreSQL reserved words
reserved = {
'user', 'order', 'group', 'table', 'index', 'key', 'value', 'date', 'time', 'timestamp',
'default', 'select', 'from', 'where', 'insert', 'update', 'delete', 'create', 'drop',
'alter', 'grant', 'revoke', 'commit', 'rollback', 'begin', 'end', 'case', 'when',
'then', 'else', 'if', 'null', 'not', 'and', 'or', 'in', 'exists', 'between',
'like', 'limit', 'offset', 'union', 'join', 'inner', 'outer', 'left', 'right',
'full', 'cross', 'natural', 'on', 'using', 'distinct', 'all', 'any', 'some',
'desc', 'asc', 'primary', 'foreign', 'references', 'constraint', 'unique',
'check', 'cascade', 'restrict', 'action', 'match', 'partial', 'full'
}
if name.lower() in reserved:
name = name + '_col'
return name.lower()
def _determine_postgres_type(self, field_info: Dict[str, Any]) -> str:
"""Determine PostgreSQL column type from MongoDB field analysis with improved logic"""
types = field_info['types']
# Convert set to list for easier checking
type_list = list(types)
# If there's only one type (excluding null), use specific typing
non_null_types = [t for t in type_list if t != 'null']
if len(non_null_types) == 1:
single_type = non_null_types[0]
if single_type == 'bool':
return 'BOOLEAN'
elif single_type == 'int':
return 'INTEGER'
elif single_type == 'float':
return 'NUMERIC'
elif single_type == 'str':
return 'TEXT'
elif single_type == 'datetime':
return 'TIMESTAMP'
elif single_type == 'ObjectId':
return 'TEXT'
# Handle mixed types more conservatively
if 'array' in types or field_info.get('is_array', False):
return 'JSONB' # Arrays always go to JSONB
elif 'object' in types:
return 'JSONB' # Objects always go to JSONB
elif len(non_null_types) > 1:
# Multiple non-null types - check for common combinations
if set(non_null_types) <= {'int', 'float'}:
return 'NUMERIC' # Can handle both int and float
elif set(non_null_types) <= {'bool', 'str'}:
return 'TEXT' # Convert everything to text
elif set(non_null_types) <= {'str', 'ObjectId'}:
return 'TEXT' # Both are string-like
else:
return 'JSONB' # Complex mixed types go to JSONB
elif 'ObjectId' in types:
return 'TEXT'
elif 'datetime' in types:
return 'TIMESTAMP'
elif 'bool' in types:
return 'BOOLEAN'
elif 'int' in types:
return 'INTEGER'
elif 'float' in types:
return 'NUMERIC'
elif 'str' in types:
return 'TEXT'
else:
return 'TEXT' # Default fallback
def create_postgres_database(self, table_definitions: Dict[str, str]):
"""Create PostgreSQL database and tables"""
print("Creating PostgreSQL database schema...")
try:
# Connect to PostgreSQL
conn = psycopg2.connect(**self.postgres_config)
conn.autocommit = True
cursor = conn.cursor()
# Create tables
for collection_name, table_sql in table_definitions.items():
print(f"Creating table for {collection_name}...")
cursor.execute(table_sql)
cursor.close()
conn.close()
print("Database schema created successfully!")
except Exception as e:
print(f"Error creating database schema: {e}")
raise
def convert_and_insert_data(self, batch_size: int = 1000):
"""Convert BSON data and insert into PostgreSQL"""
print("Converting and inserting data...")
try:
conn = psycopg2.connect(**self.postgres_config)
conn.autocommit = False
for collection_name in self.collections:
print(f"Processing {collection_name}...")
self._convert_collection(conn, collection_name, batch_size)
conn.close()
print("Data conversion completed successfully!")
except Exception as e:
print(f"Error converting data: {e}")
raise
def _convert_collection(self, conn, collection_name: str, batch_size: int):
"""Convert a single collection"""
bson_file = self.collections[collection_name]['bson_file']
if bson_file.stat().st_size == 0:
print(f" Skipping empty collection {collection_name}")
return
table_name = self._sanitize_table_name(collection_name)
cursor = conn.cursor()
batch = []
total_inserted = 0
errors = 0
try:
with open(bson_file, 'rb') as f:
while True:
try:
doc_size = int.from_bytes(f.read(4), byteorder='little')
if doc_size <= 0:
break
f.seek(-4, 1)
doc_bytes = f.read(doc_size)
if len(doc_bytes) != doc_size:
break
doc = bson.decode(doc_bytes)
batch.append(doc)
if len(batch) >= batch_size:
inserted, batch_errors = self._insert_batch(cursor, table_name, batch, collection_name)
total_inserted += inserted
errors += batch_errors
batch = []
conn.commit()
if total_inserted % 5000 == 0: # Less frequent progress updates
print(f" Inserted {total_inserted} documents...")
except (bson.InvalidBSON, struct.error, OSError):
break
# Insert remaining documents
if batch:
inserted, batch_errors = self._insert_batch(cursor, table_name, batch, collection_name)
total_inserted += inserted
errors += batch_errors
conn.commit()
if errors > 0:
print(f" Completed {collection_name}: {total_inserted} documents inserted ({errors} errors)")
else:
print(f" Completed {collection_name}: {total_inserted} documents inserted")
except Exception as e:
print(f" Error processing {collection_name}: {e}")
conn.rollback()
finally:
cursor.close()
def _insert_batch(self, cursor, table_name: str, documents: List[Dict], collection_name: str):
"""Insert a batch of documents with proper transaction handling"""
if not documents:
return 0, 0
# Get schema info for this collection
schema = self.schema_info.get(collection_name, {})
# Build column list
columns = ['mongo_id']
for field_name in schema.keys():
if field_name != '_id':
col_name = self._sanitize_column_name(field_name)
# Handle conflicts with PostgreSQL auto-generated columns
if col_name in ['id', 'mongo_id', 'created_at', 'updated_at']:
col_name = f"field_{col_name}"
columns.append(col_name)
# Build INSERT statement
placeholders = ', '.join(['%s'] * len(columns))
sql = f"INSERT INTO {table_name} ({', '.join(columns)}) VALUES ({placeholders})"
self.log_debug(f"SQL: {sql}", collection_name)
# Convert documents to tuples
rows = []
errors = 0
for doc_idx, doc in enumerate(documents):
try:
row = []
# Add mongo_id
row.append(str(doc.get('_id', '')))
# Add other fields
for field_name in schema.keys():
if field_name != '_id':
try:
value = self._get_nested_value(doc, field_name)
converted_value = self._convert_value_for_postgres(value, field_name, schema)
row.append(converted_value)
except Exception as e:
self.log_error(collection_name, 'field_conversion',
f"Field '{field_name}' in doc {doc_idx}: {str(e)}")
# Only show debug for collections we're focusing on
if collection_name in self.debug_collections:
print(f" ⚠️ Error converting field '{field_name}': {e}")
row.append(None) # Use NULL for problematic fields
rows.append(tuple(row))
except Exception as e:
self.log_error(collection_name, 'document_conversion', f"Document {doc_idx}: {str(e)}")
errors += 1
continue
# Execute batch insert
if rows:
try:
cursor.executemany(sql, rows)
return len(rows), errors
except Exception as batch_error:
self.log_error(collection_name, 'batch_insert', str(batch_error))
# Only show detailed debugging for targeted collections
if collection_name in self.debug_collections:
print(f" 🔴 Batch insert failed for {collection_name}: {batch_error}")
print(" Trying individual inserts with rollback handling...")
# Rollback the failed transaction
cursor.connection.rollback()
# Try inserting one by one in individual transactions
success_count = 0
for row_idx, row in enumerate(rows):
try:
cursor.execute(sql, row)
cursor.connection.commit() # Commit each successful insert
success_count += 1
except Exception as row_error:
cursor.connection.rollback() # Rollback failed insert
self.log_error(collection_name, 'row_insert', f"Row {row_idx}: {str(row_error)}")
# Show detailed error only for the first few failures and only for targeted collections
if collection_name in self.debug_collections and errors < 3:
print(f" Row {row_idx} failed: {row_error}")
print(f" Row data: {len(row)} values, expected {len(columns)} columns")
errors += 1
continue
return success_count, errors
return 0, errors
def _get_nested_value(self, doc: Dict, field_path: str):
"""Get value from nested document using dot notation"""
keys = field_path.split('.')
value = doc
for key in keys:
if isinstance(value, dict) and key in value:
value = value[key]
else:
return None
return value
def _convert_value_for_postgres(self, value, field_name: str = None, schema: Dict = None):
"""Convert MongoDB value to PostgreSQL compatible value with schema-aware conversion"""
if value is None:
return None
# Get the expected PostgreSQL type for this field if available
expected_type = None
if schema and field_name and field_name in schema:
field_info = schema[field_name]
expected_type = self._determine_postgres_type(field_info)
# Handle conversion based on expected type
if expected_type == 'BOOLEAN':
if isinstance(value, bool):
return value
elif isinstance(value, str):
return value.lower() in ('true', '1', 'yes', 'on')
elif isinstance(value, (int, float)):
return bool(value)
else:
return None
elif expected_type == 'INTEGER':
if isinstance(value, int):
return value
elif isinstance(value, float):
return int(value)
elif isinstance(value, str) and value.isdigit():
return int(value)
elif isinstance(value, bool):
return int(value)
else:
return None
elif expected_type == 'NUMERIC':
if isinstance(value, (int, float)):
return value
elif isinstance(value, str):
try:
return float(value)
except ValueError:
return None
elif isinstance(value, bool):
return float(value)
else:
return None
elif expected_type == 'TEXT':
if isinstance(value, str):
return value
elif value is not None:
str_value = str(value)
# Handle very long strings
if len(str_value) > 65535:
return str_value[:65535]
return str_value
else:
return None
elif expected_type == 'TIMESTAMP':
if hasattr(value, 'isoformat'):
return value.isoformat()
elif isinstance(value, str):
return value
else:
return str(value) if value is not None else None
elif expected_type == 'JSONB':
if isinstance(value, (dict, list)):
return json.dumps(value, default=self._json_serializer)
elif isinstance(value, str):
# Check if it's already valid JSON
try:
json.loads(value)
return value
except (json.JSONDecodeError, TypeError):
# Not valid JSON, wrap it
return json.dumps(value)
else:
return json.dumps(value, default=self._json_serializer)
# Fallback to original logic if no expected type or type not recognized
if isinstance(value, bool):
return value
elif isinstance(value, (int, float)):
return value
elif isinstance(value, str):
return value
elif isinstance(value, (dict, list)):
return json.dumps(value, default=self._json_serializer)
elif hasattr(value, 'isoformat'): # datetime
return value.isoformat()
elif hasattr(value, '__str__'):
str_value = str(value)
if len(str_value) > 65535:
return str_value[:65535]
return str_value
else:
return str(value)
def _json_serializer(self, obj):
"""Custom JSON serializer for complex objects with better error handling"""
try:
if hasattr(obj, 'isoformat'): # datetime
return obj.isoformat()
elif hasattr(obj, '__str__'):
return str(obj)
else:
return None
except Exception as e:
self.log_debug(f"JSON serialization error: {e}")
return str(obj)
def run_conversion(self, sample_size: int = 100, batch_size: int = 1000):
"""Run the full conversion process with focused debugging"""
print("Starting MongoDB to PostgreSQL conversion...")
print("This will convert your Rocket.Chat database from MongoDB to PostgreSQL")
if self.debug_mode:
if self.debug_collections:
print(f"🐛 DEBUG MODE: Focusing on collections: {', '.join(self.debug_collections)}")
else:
print("🐛 DEBUG MODE: All collections")
print("=" * 70)
# Step 1: Discover collections
self.discover_collections()
# Step 2: Analyze schemas
print("\nAnalyzing collection schemas...")
for collection_name in self.collections:
self.analyze_schema(collection_name, sample_size)
# Sample problematic collections if debugging
if self.debug_mode and self.debug_collections:
for coll in self.debug_collections:
if coll in self.collections:
self.sample_documents(coll, 2)
# Step 3: Generate PostgreSQL schema
table_definitions = self.generate_postgres_schema()
# Step 4: Create database schema
self.create_postgres_database(table_definitions)
# Step 5: Convert and insert data
self.convert_and_insert_data(batch_size)
# Step 6: Show error summary
self._print_error_summary()
print("=" * 70)
print("✅ Conversion completed!")
print(f" Database: {self.postgres_config['database']}")
print(f" Tables created: {len(table_definitions)}")
def _print_error_summary(self):
"""Print a focused summary of errors"""
if not self.error_log:
print("\n✅ No errors encountered during conversion!")
return
print("\n⚠️ ERROR SUMMARY:")
print("=" * 50)
# Sort by error count descending
sorted_collections = sorted(self.error_log.items(),
key=lambda x: len(x[1]), reverse=True)
for collection, errors in sorted_collections:
error_types = {}
for error in errors:
error_type = error['type']
if error_type not in error_types:
error_types[error_type] = []
error_types[error_type].append(error['details'])
print(f"\n🔴 {collection} ({len(errors)} total errors):")
for error_type, details_list in error_types.items():
print(f" {error_type}: {len(details_list)} errors")
# Show sample errors for critical collections
if collection in ['rocketchat_settings', 'rocketchat_room'] and len(details_list) > 0:
print(f" Sample: {details_list[0][:100]}...")
def main():
parser = argparse.ArgumentParser(
description='Convert MongoDB BSON export to PostgreSQL',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Basic usage
python3 mongo_to_postgres_converter.py \\
--mongo-path db/database/62df06d44234d20001289144 \\
--pg-database rocketchat_converted \\
--pg-user rocketchat_user \\
--pg-password mypassword
# Debug specific failing collections
python3 mongo_to_postgres_converter.py \\
--mongo-path db/database/62df06d44234d20001289144 \\
--pg-database rocketchat_converted \\
--pg-user rocketchat_user \\
--pg-password mypassword \\
--debug-collections rocketchat_settings rocketchat_room
Before running this script:
1. Run: sudo -u postgres psql -f reset_database.sql
2. Update the password in reset_database.sql
"""
)
parser.add_argument('--mongo-path', required=True, help='Path to MongoDB export directory')
parser.add_argument('--pg-host', default='localhost', help='PostgreSQL host (default: localhost)')
parser.add_argument('--pg-port', default='5432', help='PostgreSQL port (default: 5432)')
parser.add_argument('--pg-database', required=True, help='PostgreSQL database name')
parser.add_argument('--pg-user', required=True, help='PostgreSQL username')
parser.add_argument('--pg-password', required=True, help='PostgreSQL password')
parser.add_argument('--sample-size', type=int, default=100, help='Number of documents to sample for schema analysis (default: 100)')
parser.add_argument('--batch-size', type=int, default=1000, help='Batch size for data insertion (default: 1000)')
parser.add_argument('--debug', action='store_true', help='Enable debug mode with detailed error logging')
parser.add_argument('--debug-collections', nargs='*', help='Specific collections to debug (e.g., rocketchat_settings rocketchat_room)')
args = parser.parse_args()
postgres_config = {
'host': args.pg_host,
'port': args.pg_port,
'database': args.pg_database,
'user': args.pg_user,
'password': args.pg_password
}
# Enable debug mode if debug collections are specified
debug_mode = args.debug or (args.debug_collections is not None)
converter = MongoToPostgresConverter(args.mongo_path, postgres_config, debug_mode, args.debug_collections)
converter.run_conversion(args.sample_size, args.batch_size)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,41 @@
-- PostgreSQL Database Reset Script for Rocket.Chat Import
-- Run as: sudo -u postgres psql -f reset_database.sql
-- Terminate all connections to the database (force disconnect users)
SELECT pg_terminate_backend(pid)
FROM pg_stat_activity
WHERE datname = 'rocketchat_converted' AND pid <> pg_backend_pid();
-- Drop the database if it exists
DROP DATABASE IF EXISTS rocketchat_converted;
-- Create fresh database
CREATE DATABASE rocketchat_converted;
-- Create user (if not exists)
DO $$
BEGIN
IF NOT EXISTS (SELECT FROM pg_user WHERE usename = 'rocketchat_user') THEN
CREATE USER rocketchat_user WITH PASSWORD 'HKjLgt23gWuPXzEAn3rW';
END IF;
END $$;
-- Grant database privileges
GRANT CONNECT ON DATABASE rocketchat_converted TO rocketchat_user;
GRANT CREATE ON DATABASE rocketchat_converted TO rocketchat_user;
-- Connect to the new database
\c rocketchat_converted;
-- Grant schema privileges
GRANT CREATE ON SCHEMA public TO rocketchat_user;
GRANT USAGE ON SCHEMA public TO rocketchat_user;
-- Grant privileges on all future tables and sequences
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO rocketchat_user;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT USAGE, SELECT ON SEQUENCES TO rocketchat_user;
-- Display success message
\echo 'Database reset completed successfully!'
\echo 'You can now run the converter with:'
\echo 'python3 mongo_to_postgres_converter.py --mongo-path db/database/62df06d44234d20001289144 --pg-database rocketchat_converted --pg-user rocketchat_user --pg-password your_password'

View File

@@ -0,0 +1,54 @@
#!/usr/bin/env python3
"""
Quick test script to verify the converter fixes work for problematic collections
"""
from mongo_to_postgres_converter import MongoToPostgresConverter
def test_problematic_collections():
print("🧪 Testing converter fixes for problematic collections...")
postgres_config = {
'host': 'localhost',
'port': '5432',
'database': 'rocketchat_test',
'user': 'rocketchat_user',
'password': 'password123'
}
converter = MongoToPostgresConverter(
'db/database/62df06d44234d20001289144',
postgres_config,
debug_mode=True,
debug_collections=['rocketchat_settings', 'rocketchat_room']
)
# Test just discovery and schema analysis
print("\n1. Testing collection discovery...")
converter.discover_collections()
print("\n2. Testing schema analysis...")
if 'rocketchat_settings' in converter.collections:
settings_schema = converter.analyze_schema('rocketchat_settings', 10)
print(f"Settings schema fields: {len(settings_schema)}")
# Check specific problematic fields
if 'packageValue' in settings_schema:
packagevalue_info = settings_schema['packageValue']
pg_type = converter._determine_postgres_type(packagevalue_info)
print(f"packageValue types: {packagevalue_info['types']} -> PostgreSQL: {pg_type}")
if 'rocketchat_room' in converter.collections:
room_schema = converter.analyze_schema('rocketchat_room', 10)
print(f"Room schema fields: {len(room_schema)}")
# Check specific problematic fields
if 'sysMes' in room_schema:
sysmes_info = room_schema['sysMes']
pg_type = converter._determine_postgres_type(sysmes_info)
print(f"sysMes types: {sysmes_info['types']} -> PostgreSQL: {pg_type}")
print("\n✅ Test completed - check the type mappings above!")
if __name__ == '__main__':
test_problematic_collections()

1447
inventory-server/chat/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,20 @@
{
"name": "chat-server",
"version": "1.0.0",
"description": "Chat archive server for Rocket.Chat data",
"main": "server.js",
"scripts": {
"start": "node server.js",
"dev": "nodemon server.js"
},
"dependencies": {
"express": "^4.18.2",
"cors": "^2.8.5",
"pg": "^8.11.0",
"dotenv": "^16.0.3",
"morgan": "^1.10.0"
},
"devDependencies": {
"nodemon": "^2.0.22"
}
}

View File

@@ -0,0 +1,649 @@
const express = require('express');
const path = require('path');
const router = express.Router();
// Serve uploaded files with proper mapping from database paths to actual file locations
router.get('/files/uploads/*', async (req, res) => {
try {
// Extract the path from the URL (everything after /files/uploads/)
const requestPath = req.params[0];
// The URL path will be like: ufs/AmazonS3:Uploads/274Mf9CyHNG72oF86/filename.jpg
// We need to extract the mongo_id (274Mf9CyHNG72oF86) from this path
const pathParts = requestPath.split('/');
let mongoId = null;
// Find the mongo_id in the path structure
for (let i = 0; i < pathParts.length; i++) {
if (pathParts[i].includes('AmazonS3:Uploads') && i + 1 < pathParts.length) {
mongoId = pathParts[i + 1];
break;
}
// Sometimes the mongo_id might be the last part of ufs/AmazonS3:Uploads/mongoId
if (pathParts[i] === 'AmazonS3:Uploads' && i + 1 < pathParts.length) {
mongoId = pathParts[i + 1];
break;
}
}
if (!mongoId) {
// Try to get mongo_id from database by matching the full path
const result = await global.pool.query(`
SELECT mongo_id, name, type
FROM uploads
WHERE path = $1 OR url = $1
LIMIT 1
`, [`/ufs/AmazonS3:Uploads/${requestPath}`, `/ufs/AmazonS3:Uploads/${requestPath}`]);
if (result.rows.length > 0) {
mongoId = result.rows[0].mongo_id;
}
}
if (!mongoId) {
return res.status(404).json({ error: 'File not found' });
}
// The actual file is stored with just the mongo_id as filename
const filePath = path.join(__dirname, 'db-convert/db/files/uploads', mongoId);
// Get file info from database for proper content-type
const fileInfo = await global.pool.query(`
SELECT name, type
FROM uploads
WHERE mongo_id = $1
LIMIT 1
`, [mongoId]);
if (fileInfo.rows.length === 0) {
return res.status(404).json({ error: 'File metadata not found' });
}
const { name, type } = fileInfo.rows[0];
// Set proper content type
if (type) {
res.set('Content-Type', type);
}
// Set content disposition with original filename
if (name) {
res.set('Content-Disposition', `inline; filename="${name}"`);
}
// Send the file
res.sendFile(filePath, (err) => {
if (err) {
console.error('Error serving file:', err);
if (!res.headersSent) {
res.status(404).json({ error: 'File not found on disk' });
}
}
});
} catch (error) {
console.error('Error serving upload:', error);
res.status(500).json({ error: 'Server error' });
}
});
// Also serve files directly by mongo_id for simpler access
router.get('/files/by-id/:mongoId', async (req, res) => {
try {
const { mongoId } = req.params;
// Get file info from database
const fileInfo = await global.pool.query(`
SELECT name, type
FROM uploads
WHERE mongo_id = $1
LIMIT 1
`, [mongoId]);
if (fileInfo.rows.length === 0) {
return res.status(404).json({ error: 'File not found' });
}
const { name, type } = fileInfo.rows[0];
const filePath = path.join(__dirname, 'db-convert/db/files/uploads', mongoId);
// Set proper content type and filename
if (type) {
res.set('Content-Type', type);
}
if (name) {
res.set('Content-Disposition', `inline; filename="${name}"`);
}
// Send the file
res.sendFile(filePath, (err) => {
if (err) {
console.error('Error serving file:', err);
if (!res.headersSent) {
res.status(404).json({ error: 'File not found on disk' });
}
}
});
} catch (error) {
console.error('Error serving upload by ID:', error);
res.status(500).json({ error: 'Server error' });
}
});
// Serve user avatars by mongo_id
router.get('/avatar/:mongoId', async (req, res) => {
try {
const { mongoId } = req.params;
console.log(`[Avatar Debug] Looking up avatar for user mongo_id: ${mongoId}`);
// First try to find avatar by user's avataretag
const userResult = await global.pool.query(`
SELECT avataretag, username FROM users WHERE mongo_id = $1
`, [mongoId]);
let avatarPath = null;
if (userResult.rows.length > 0) {
const username = userResult.rows[0].username;
const avataretag = userResult.rows[0].avataretag;
// Try method 1: Look up by avataretag -> etag (for users with avataretag set)
if (avataretag) {
console.log(`[Avatar Debug] Found user ${username} with avataretag: ${avataretag}`);
const avatarResult = await global.pool.query(`
SELECT url, path FROM avatars WHERE etag = $1
`, [avataretag]);
if (avatarResult.rows.length > 0) {
const dbPath = avatarResult.rows[0].path || avatarResult.rows[0].url;
console.log(`[Avatar Debug] Found avatar record with path: ${dbPath}`);
if (dbPath) {
const pathParts = dbPath.split('/');
for (let i = 0; i < pathParts.length; i++) {
if (pathParts[i].includes('AmazonS3:Avatars') && i + 1 < pathParts.length) {
const avatarMongoId = pathParts[i + 1];
avatarPath = path.join(__dirname, 'db-convert/db/files/avatars', avatarMongoId);
console.log(`[Avatar Debug] Extracted avatar mongo_id: ${avatarMongoId}, full path: ${avatarPath}`);
break;
}
}
}
} else {
console.log(`[Avatar Debug] No avatar record found for etag: ${avataretag}`);
}
}
// Try method 2: Look up by userid directly (for users without avataretag)
if (!avatarPath) {
console.log(`[Avatar Debug] Trying direct userid lookup for user ${username} (${mongoId})`);
const avatarResult = await global.pool.query(`
SELECT url, path FROM avatars WHERE userid = $1
`, [mongoId]);
if (avatarResult.rows.length > 0) {
const dbPath = avatarResult.rows[0].path || avatarResult.rows[0].url;
console.log(`[Avatar Debug] Found avatar record by userid with path: ${dbPath}`);
if (dbPath) {
const pathParts = dbPath.split('/');
for (let i = 0; i < pathParts.length; i++) {
if (pathParts[i].includes('AmazonS3:Avatars') && i + 1 < pathParts.length) {
const avatarMongoId = pathParts[i + 1];
avatarPath = path.join(__dirname, 'db-convert/db/files/avatars', avatarMongoId);
console.log(`[Avatar Debug] Extracted avatar mongo_id: ${avatarMongoId}, full path: ${avatarPath}`);
break;
}
}
}
} else {
console.log(`[Avatar Debug] No avatar record found for userid: ${mongoId}`);
}
}
} else {
console.log(`[Avatar Debug] No user found for mongo_id: ${mongoId}`);
}
// Fallback: try direct lookup by user mongo_id
if (!avatarPath) {
avatarPath = path.join(__dirname, 'db-convert/db/files/avatars', mongoId);
console.log(`[Avatar Debug] Using fallback path: ${avatarPath}`);
}
// Set proper content type for images
res.set('Content-Type', 'image/jpeg'); // Most avatars are likely JPEG
// Send the file
res.sendFile(avatarPath, (err) => {
if (err) {
// If avatar doesn't exist, send a default 404 or generate initials
console.log(`[Avatar Debug] Avatar file not found at path: ${avatarPath}, error:`, err.message);
if (!res.headersSent) {
res.status(404).json({ error: 'Avatar not found' });
}
} else {
console.log(`[Avatar Debug] Successfully served avatar from: ${avatarPath}`);
}
});
} catch (error) {
console.error('Error serving avatar:', error);
res.status(500).json({ error: 'Server error' });
}
});
// Serve avatars statically as fallback
router.use('/files/avatars', express.static(path.join(__dirname, 'db-convert/db/files/avatars')));
// Get all users for the "view as" dropdown (active and inactive)
router.get('/users', async (req, res) => {
try {
const result = await global.pool.query(`
SELECT id, username, name, type, active, status, lastlogin,
statustext, utcoffset, statusconnection, mongo_id, avataretag
FROM users
WHERE type = 'user'
ORDER BY
active DESC, -- Active users first
CASE
WHEN status = 'online' THEN 1
WHEN status = 'away' THEN 2
WHEN status = 'busy' THEN 3
ELSE 4
END,
name ASC
`);
res.json({
status: 'success',
users: result.rows
});
} catch (error) {
console.error('Error fetching users:', error);
res.status(500).json({
status: 'error',
error: 'Failed to fetch users',
details: error.message
});
}
});
// Get rooms for a specific user with enhanced room names for direct messages
router.get('/users/:userId/rooms', async (req, res) => {
const { userId } = req.params;
try {
// Get the current user's mongo_id for filtering
const userResult = await global.pool.query(`
SELECT mongo_id, username FROM users WHERE id = $1
`, [userId]);
if (userResult.rows.length === 0) {
return res.status(404).json({
status: 'error',
error: 'User not found'
});
}
const currentUserMongoId = userResult.rows[0].mongo_id;
const currentUsername = userResult.rows[0].username;
// Get rooms where the user is a member with proper naming from subscription table
// Include archived and closed rooms but sort them at the bottom
const result = await global.pool.query(`
SELECT DISTINCT
r.id,
r.mongo_id as room_mongo_id,
r.name,
r.fname,
r.t as type,
r.msgs,
r.lm as last_message_date,
r.usernames,
r.uids,
r.userscount,
r.description,
r.teamid,
r.archived,
s.open,
-- Use the subscription's name for direct messages (excludes current user)
-- For channels/groups, use room's fname or name
CASE
WHEN r.t = 'd' THEN COALESCE(s.fname, s.name, 'Unknown User')
ELSE COALESCE(r.fname, r.name, 'Unnamed Room')
END as display_name
FROM room r
JOIN subscription s ON s.rid = r.mongo_id
WHERE s.u->>'_id' = $1
ORDER BY
s.open DESC NULLS LAST, -- Open rooms first
r.archived NULLS FIRST, -- Non-archived first (nulls treated as false)
r.lm DESC NULLS LAST
LIMIT 50
`, [currentUserMongoId]);
// Enhance rooms with participant information for direct messages
const enhancedRooms = await Promise.all(result.rows.map(async (room) => {
if (room.type === 'd' && room.uids) {
// Get participant info (excluding current user) for direct messages
const participantResult = await global.pool.query(`
SELECT u.username, u.name, u.mongo_id, u.avataretag
FROM users u
WHERE u.mongo_id = ANY($1::text[])
AND u.mongo_id != $2
`, [room.uids, currentUserMongoId]);
room.participants = participantResult.rows;
}
return room;
}));
res.json({
status: 'success',
rooms: enhancedRooms
});
} catch (error) {
console.error('Error fetching user rooms:', error);
res.status(500).json({
status: 'error',
error: 'Failed to fetch user rooms',
details: error.message
});
}
});
// Get room details including participants
router.get('/rooms/:roomId', async (req, res) => {
const { roomId } = req.params;
const { userId } = req.query; // Accept current user ID as query parameter
try {
const result = await global.pool.query(`
SELECT r.id, r.name, r.fname, r.t as type, r.msgs, r.description,
r.lm as last_message_date, r.usernames, r.uids, r.userscount, r.teamid
FROM room r
WHERE r.id = $1
`, [roomId]);
if (result.rows.length === 0) {
return res.status(404).json({
status: 'error',
error: 'Room not found'
});
}
const room = result.rows[0];
// For direct messages, get the proper display name based on current user
if (room.type === 'd' && room.uids && userId) {
// Get current user's mongo_id
const userResult = await global.pool.query(`
SELECT mongo_id FROM users WHERE id = $1
`, [userId]);
if (userResult.rows.length > 0) {
const currentUserMongoId = userResult.rows[0].mongo_id;
// Get display name from subscription table for this user
// Use room mongo_id to match with subscription.rid
const roomMongoResult = await global.pool.query(`
SELECT mongo_id FROM room WHERE id = $1
`, [roomId]);
if (roomMongoResult.rows.length > 0) {
const roomMongoId = roomMongoResult.rows[0].mongo_id;
const subscriptionResult = await global.pool.query(`
SELECT fname, name FROM subscription
WHERE rid = $1 AND u->>'_id' = $2
`, [roomMongoId, currentUserMongoId]);
if (subscriptionResult.rows.length > 0) {
const sub = subscriptionResult.rows[0];
room.display_name = sub.fname || sub.name || 'Unknown User';
}
}
}
// Get all participants for additional info
const participantResult = await global.pool.query(`
SELECT username, name
FROM users
WHERE mongo_id = ANY($1::text[])
`, [room.uids]);
room.participants = participantResult.rows;
} else {
// For channels/groups, use room's fname or name
room.display_name = room.fname || room.name || 'Unnamed Room';
}
res.json({
status: 'success',
room: room
});
} catch (error) {
console.error('Error fetching room details:', error);
res.status(500).json({
status: 'error',
error: 'Failed to fetch room details',
details: error.message
});
}
});
// Get messages for a specific room (fast, without attachments)
router.get('/rooms/:roomId/messages', async (req, res) => {
const { roomId } = req.params;
const { limit = 50, offset = 0, before } = req.query;
try {
// Fast query - just get messages without expensive attachment joins
let query = `
SELECT m.id, m.msg, m.ts, m.u, m._updatedat, m.urls, m.mentions, m.md
FROM message m
JOIN room r ON m.rid = r.mongo_id
WHERE r.id = $1
`;
const params = [roomId];
if (before) {
query += ` AND m.ts < $${params.length + 1}`;
params.push(before);
}
query += ` ORDER BY m.ts DESC LIMIT $${params.length + 1} OFFSET $${params.length + 2}`;
params.push(limit, offset);
const result = await global.pool.query(query, params);
// Add empty attachments array for now - attachments will be loaded separately if needed
const messages = result.rows.map(msg => ({
...msg,
attachments: []
}));
res.json({
status: 'success',
messages: messages.reverse() // Reverse to show oldest first
});
} catch (error) {
console.error('Error fetching messages:', error);
res.status(500).json({
status: 'error',
error: 'Failed to fetch messages',
details: error.message
});
}
});
// Get attachments for specific messages (called separately for performance)
router.post('/messages/attachments', async (req, res) => {
const { messageIds } = req.body;
if (!messageIds || !Array.isArray(messageIds) || messageIds.length === 0) {
return res.json({ status: 'success', attachments: {} });
}
try {
// Get room mongo_id from first message to limit search scope
const roomQuery = await global.pool.query(`
SELECT r.mongo_id as room_mongo_id
FROM message m
JOIN room r ON m.rid = r.mongo_id
WHERE m.id = $1
LIMIT 1
`, [messageIds[0]]);
if (roomQuery.rows.length === 0) {
return res.json({ status: 'success', attachments: {} });
}
const roomMongoId = roomQuery.rows[0].room_mongo_id;
// Get messages and their upload timestamps
const messagesQuery = await global.pool.query(`
SELECT m.id, m.ts, m.u->>'_id' as user_id
FROM message m
WHERE m.id = ANY($1::int[])
`, [messageIds]);
if (messagesQuery.rows.length === 0) {
return res.json({ status: 'success', attachments: {} });
}
// Build a map of user_id -> array of message timestamps for efficient lookup
const userTimeMap = {};
const messageMap = {};
messagesQuery.rows.forEach(msg => {
if (!userTimeMap[msg.user_id]) {
userTimeMap[msg.user_id] = [];
}
userTimeMap[msg.user_id].push(msg.ts);
messageMap[msg.id] = { ts: msg.ts, user_id: msg.user_id };
});
// Get attachments for this room and these users
const uploadsQuery = await global.pool.query(`
SELECT mongo_id, name, size, type, url, path, typegroup, identify,
userid, uploadedat
FROM uploads
WHERE rid = $1
AND userid = ANY($2::text[])
ORDER BY uploadedat
`, [roomMongoId, Object.keys(userTimeMap)]);
// Match attachments to messages based on timestamp proximity (within 5 minutes)
const attachmentsByMessage = {};
uploadsQuery.rows.forEach(upload => {
const uploadTime = new Date(upload.uploadedat).getTime();
// Find the closest message from this user within 5 minutes
let closestMessageId = null;
let closestTimeDiff = Infinity;
Object.entries(messageMap).forEach(([msgId, msgData]) => {
if (msgData.user_id === upload.userid) {
const msgTime = new Date(msgData.ts).getTime();
const timeDiff = Math.abs(uploadTime - msgTime);
if (timeDiff < 300000 && timeDiff < closestTimeDiff) { // 5 minutes = 300000ms
closestMessageId = msgId;
closestTimeDiff = timeDiff;
}
}
});
if (closestMessageId) {
if (!attachmentsByMessage[closestMessageId]) {
attachmentsByMessage[closestMessageId] = [];
}
attachmentsByMessage[closestMessageId].push({
id: upload.id,
mongo_id: upload.mongo_id,
name: upload.name,
size: upload.size,
type: upload.type,
url: upload.url,
path: upload.path,
typegroup: upload.typegroup,
identify: upload.identify
});
}
});
res.json({
status: 'success',
attachments: attachmentsByMessage
});
} catch (error) {
console.error('Error fetching message attachments:', error);
res.status(500).json({
status: 'error',
error: 'Failed to fetch attachments',
details: error.message
});
}
});
// Search messages in accessible rooms for a user
router.get('/users/:userId/search', async (req, res) => {
const { userId } = req.params;
const { q, limit = 20 } = req.query;
if (!q || q.length < 2) {
return res.status(400).json({
status: 'error',
error: 'Search query must be at least 2 characters'
});
}
try {
const userResult = await global.pool.query(`
SELECT mongo_id FROM users WHERE id = $1
`, [userId]);
if (userResult.rows.length === 0) {
return res.status(404).json({
status: 'error',
error: 'User not found'
});
}
const currentUserMongoId = userResult.rows[0].mongo_id;
const result = await global.pool.query(`
SELECT m.id, m.msg, m.ts, m.u, r.id as room_id, r.name as room_name, r.fname as room_fname, r.t as room_type
FROM message m
JOIN room r ON m.rid = r.mongo_id
JOIN subscription s ON s.rid = r.mongo_id AND s.u->>'_id' = $1
WHERE m.msg ILIKE $2
AND r.archived IS NOT TRUE
ORDER BY m.ts DESC
LIMIT $3
`, [currentUserMongoId, `%${q}%`, limit]);
res.json({
status: 'success',
results: result.rows
});
} catch (error) {
console.error('Error searching messages:', error);
res.status(500).json({
status: 'error',
error: 'Failed to search messages',
details: error.message
});
}
});
module.exports = router;

View File

@@ -0,0 +1,83 @@
require('dotenv').config({ path: '../.env' });
const express = require('express');
const cors = require('cors');
const { Pool } = require('pg');
const morgan = require('morgan');
const chatRoutes = require('./routes');
// Log startup configuration
console.log('Starting chat server with config:', {
host: process.env.CHAT_DB_HOST,
user: process.env.CHAT_DB_USER,
database: process.env.CHAT_DB_NAME || 'rocketchat_converted',
port: process.env.CHAT_DB_PORT,
chat_port: process.env.CHAT_PORT || 3014
});
const app = express();
const port = process.env.CHAT_PORT || 3014;
// Database configuration for rocketchat_converted database
const pool = new Pool({
host: process.env.CHAT_DB_HOST,
user: process.env.CHAT_DB_USER,
password: process.env.CHAT_DB_PASSWORD,
database: process.env.CHAT_DB_NAME || 'rocketchat_converted',
port: process.env.CHAT_DB_PORT,
});
// Make pool available globally
global.pool = pool;
// Middleware
app.use(express.json());
app.use(morgan('combined'));
app.use(cors({
origin: ['http://localhost:5175', 'http://localhost:5174', 'https://inventory.kent.pw'],
credentials: true
}));
// Test database connection endpoint
app.get('/test-db', async (req, res) => {
try {
const result = await pool.query('SELECT COUNT(*) as user_count FROM users WHERE active = true');
const messageResult = await pool.query('SELECT COUNT(*) as message_count FROM message');
const roomResult = await pool.query('SELECT COUNT(*) as room_count FROM room');
res.json({
status: 'success',
database: 'rocketchat_converted',
stats: {
active_users: parseInt(result.rows[0].user_count),
total_messages: parseInt(messageResult.rows[0].message_count),
total_rooms: parseInt(roomResult.rows[0].room_count)
}
});
} catch (error) {
console.error('Database test error:', error);
res.status(500).json({
status: 'error',
error: 'Database connection failed',
details: error.message
});
}
});
// Mount all routes from routes.js
app.use('/', chatRoutes);
// Health check endpoint
app.get('/health', (req, res) => {
res.json({ status: 'healthy' });
});
// Error handling middleware
app.use((err, req, res, next) => {
console.error(err.stack);
res.status(500).json({ error: 'Something broke!' });
});
// Start server
app.listen(port, () => {
console.log(`Chat server running on port ${port}`);
});

View File

@@ -0,0 +1,20 @@
# Caching Server Configuration
PORT=3010
NODE_ENV=production
# Database Configuration
MONGODB_URI=mongodb://dashboard_user:WDRFWiGXEeaC6aAyUKuT@localhost:27017/dashboard?authSource=dashboard
REDIS_URL=redis://:Wgj32YXxxVLtPZoVzUnP@localhost:6379
# Gorgias
GORGIAS_API_USERNAME=matt@acherryontop.com
GORGIAS_API_PASSWORD=d2ed0d23d2a7bf11a633a12fb260769f4e4a970d440693e7d64b8d2223fa6503
# GA4 credentials
GA_PROPERTY_ID=281045851
GOOGLE_APPLICATION_CREDENTIALS_JSON={"type": "service_account","project_id": "acot-stats","private_key_id": "259d1fd9864efbfa38b8ba02fdd74dc008ace3c5","private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC5Y6foai8WF98k\nIA0yLn94Y3lmDYlyvI9xL2YqSZSyvgK35wdWRTIaEvHKdiUWuYi3ZPdkYmz1OYiV\njVfR2g+mFpA7MI/JMwyGWwjnV4WW2q6INfgi/PvHlbP3LyyQo0B8CvAY0CHqrpDs\nlJQhAkqmteU24dqcdZoV3vM8JMsDiXm44DqwXsEfWibKv4i0mWNkwiEQr0yImHwb\nbjgclwVLLi5kdM2+49PXr47LCODdL+xmX0uSdgSG6XYqEIVsEOXIUJKzqUe036b/\nEFQ0BxWdJBWs/MYOapn/NNv+Mts+am2ipUuIcgPbOut4xa2Fkky93WnJf0tB+VJP\njFnyZJhdAgMBAAECggEAC980Cp/4zvSNZMNWr6l8ST8u2thavnRmcoGYtx7ffQjK\nT3Dl2TefgJLzqpr2lLt3OVint7p5LsUAmE8lBLpu+RxbH9HkIKbPvQTfD5gyZQQx\nBruqCGzkn2st9fzZNj6gwQYe9P/TGYkUnR8wqI0nLwDZTQful3QNKixiWC4lAAoK\nqdd6H++pqjVUiTqgFwFD3zBAhO0Lp8m/c5vTRT5kxi0wCTK66FaaGLr2OwZHcohp\nE8rEcTZ5kaJzBwqEz522R6ufQqN1Swoq4K6Ul3aAc59539VdrLNs++/eRH38MMVq\n5UTwBrH+zIkXIYv4mtGpR1NWGO2bZ652GzGXNEXcQQKBgQD9WsMmioIeWR9P9I0r\nIY+yyxz1EyscutUtnOtROT36OxokrzQaAKDz/OC3jVnhZSkzG6RcmmK/AJrcU+2m\n1L4mZGfF3DdeTqtK/KkNzGs9yRPDkbb/MF0wgtcvfE8tJH/suiDJKQNsjeaQIQW3\n4NvDxs0w60m9r9tk1CQau94ovQKBgQC7UzeA0mDSxIB5agGbvnzaJJTvAFvnCvhz\nu3ZakTlNecAHu4eOMc0+OCHFPLJlLL4b0oraOxZIszX9BTlgcstBmTUk03TibNsS\nsDiImHFC4hE5x6EPdifnkVFUXPMZ/eF0mHUPBEn41ipw1hoLfl6W+aYW9QUxBMWA\nzdMH4rg4IQKBgQCFcMaUiCNchKhfXnj0HKspCp3n3v64FReu/JVcpH+mSnbMl5Mj\nlu0vVSOuyb5rXvLCPm7lb1NPMqxeG75yPl8grYWSyxhGjbzetBD+eYqKclv8h8UQ\nx5JtuJxKIHk7V5whPS+DhByPknW7uAjg/ogBp7XvbB3c0MEHbEzP3991KQKBgC+a\n610Kmd6WX4v7e6Mn2rTZXRwL/E8QA6nttxs3Etf0m++bIczqLR2lyDdGwJNjtoB9\nlhn1sCkTmiHOBRHUuoDWPaI5NtggD+CE9ikIjKgRqY0EhZLXVTbNQFzvLjypv3UR\nFZaWYXIigzCfyIipOcKmeSYWaJZXfxXHuNylKmnhAoGAFa84AuOOGUr+pEvtUzIr\nvBKu1mnQbbsLEhgf3Tw88K3sO5OlguAwBEvD4eitj/aU5u2vJJhFa67cuERLsZru\n0sjtQwP6CJbWF4uaH0Hso4KQvnwl4BfdKwUncqoKtHrQiuGMvr5P5G941+Ax8brE\nJlC2e/RPUQKxScpK3nNK9mc=\n-----END PRIVATE KEY-----\n","client_email": "matt-dashboard@acot-stats.iam.gserviceaccount.com","client_id": "106112731322970982546","auth_uri": "https://accounts.google.com/o/oauth2/auth","token_uri": "https://oauth2.googleapis.com/token","auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs","client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/matt-dashboard%40acot-stats.iam.gserviceaccount.com","universe_domain": "googleapis.com"}
# Logging
LOG_LEVEL=info
LOG_MAX_SIZE=10m
LOG_MAX_FILES=5

View File

@@ -0,0 +1,205 @@
# ACOT Server
This server replaces the Klaviyo integration with direct database queries to the production MySQL database via SSH tunnel. It provides seamless API compatibility for all frontend components without requiring any frontend changes.
## Setup
1. **Environment Variables**: Copy `.env.example` to `.env` and configure:
```
DB_HOST=localhost
DB_PORT=3306
DB_USER=your_db_user
DB_PASSWORD=your_db_password
DB_NAME=your_db_name
PORT=3007
NODE_ENV=development
```
2. **SSH Tunnel**: Ensure your SSH tunnel to the production database is running on localhost:3306.
3. **Install Dependencies**:
```bash
npm install
```
4. **Start Server**:
```bash
npm start
```
## API Endpoints
All endpoints provide exact API compatibility with the previous Klaviyo implementation:
### Main Statistics
- `GET /api/acot/events/stats` - Complete statistics dashboard data
- Query params: `timeRange` (today, yesterday, thisWeek, lastWeek, thisMonth, lastMonth, last7days, last30days, last90days) or `startDate`/`endDate` for custom ranges
- Returns: Revenue, orders, AOV, shipping data, order types, brands/categories, refunds, cancellations, best day, peak hour, order ranges, period progress, projections
### Daily Details
- `GET /api/acot/events/stats/details` - Daily breakdown with previous period comparisons
- Query params: `timeRange`, `metric` (revenue, orders, average_order, etc.), `daily=true`
- Returns: Array of daily data points with trend comparisons
### Products
- `GET /api/acot/events/products` - Top products with sales data
- Query params: `timeRange`
- Returns: Product list with images, sales quantities, revenue, and order counts
### Projections
- `GET /api/acot/events/projection` - Smart revenue projections for incomplete periods
- Query params: `timeRange`
- Returns: Projected revenue with confidence levels based on historical patterns
### Health Check
- `GET /api/acot/test` - Server health and database connectivity test
## Database Schema
The server queries the following main tables:
### Orders (`_order`)
- **Key fields**: `order_id`, `date_placed`, `summary_total`, `order_status`, `ship_method_selected`, `stats_waiting_preorder`
- **Valid orders**: `order_status > 15`
- **Cancelled orders**: `order_status = 15`
- **Shipped orders**: `order_status IN (100, 92)`
- **Pre-orders**: `stats_waiting_preorder > 0`
- **Local pickup**: `ship_method_selected = 'localpickup'`
- **On-hold orders**: `ship_method_selected = 'holdit'`
### Order Items (`order_items`)
- **Fields**: `order_id`, `prod_pid`, `qty_ordered`, `prod_price`
- **Purpose**: Links orders to products for detailed analysis
### Products (`products`)
- **Fields**: `pid`, `description` (product name), `company`
- **Purpose**: Product information and brand data
### Product Images (`product_images`)
- **Fields**: `pid`, `iid`, `order` (priority)
- **Primary image**: `order = 255` (highest priority)
- **Image URL generation**: `https://sbing.com/i/products/0000/{prefix}/{pid}-{type}-{iid}.jpg`
### Payments (`order_payment`)
- **Refunds**: `payment_amount < 0`
- **Purpose**: Track refund amounts and counts
## Business Logic
### Time Handling
- **Timezone**: All calculations in UTC-5 (Eastern Time)
- **Business Day**: 1 AM - 12:59 AM Eastern (25-hour business day)
- **Format**: MySQL DATETIME format (YYYY-MM-DD HH:MM:SS)
- **Period Boundaries**: Calculated using `timeUtils.js` for consistent time range handling
### Order Processing
- **Revenue Calculation**: Only includes orders with `order_status > 15`
- **Order Types**:
- Pre-orders: `stats_waiting_preorder > 0`
- Local pickup: `ship_method_selected = 'localpickup'`
- On-hold: `ship_method_selected = 'holdit'`
- **Shipping Methods**: Mapped to friendly names (e.g., `usps_ground_advantage` → "USPS Ground Advantage")
### Projections
- **Period Progress**: Calculated based on current time within the selected period
- **Simple Projection**: Linear extrapolation based on current progress
- **Smart Projection**: Uses historical data patterns for more accurate forecasting
- **Confidence Levels**: Based on data consistency and historical accuracy
### Image URL Generation
- **Pattern**: `https://sbing.com/i/products/0000/{prefix}/{pid}-{type}-{iid}.jpg`
- **Prefix**: First 2 digits of product ID
- **Type**: "main" for primary images
- **Fallback**: Uses primary image (order=255) when available
## Frontend Integration
### Service Layer (`services/acotService.js`)
- **Purpose**: Replaces direct Klaviyo API calls with acot-server calls
- **Methods**: `getStats()`, `getStatsDetails()`, `getProducts()`, `getProjection()`
- **Logging**: Axios interceptors for request/response logging
- **Environment**: Automatic URL handling (proxy in dev, direct in production)
### Component Updates
All 5 main components updated to use `acotService`:
- **StatCards.jsx**: Main dashboard statistics
- **MiniStatCards.jsx**: Compact statistics view
- **SalesChart.jsx**: Revenue and order trends
- **MiniSalesChart.jsx**: Compact chart view
- **ProductGrid.jsx**: Top products table
### Proxy Configuration (`vite.config.js`)
```javascript
'/api/acot': {
target: 'http://localhost:3007',
changeOrigin: true,
secure: false
}
```
## Key Features
### Complete Business Intelligence
- **Revenue Analytics**: Total revenue, trends, projections
- **Order Analysis**: Counts, types, status tracking
- **Product Performance**: Top sellers, revenue contribution
- **Shipping Intelligence**: Methods, locations, distribution
- **Customer Insights**: Order value ranges, patterns
- **Operational Metrics**: Refunds, cancellations, peak hours
### Performance Optimizations
- **Connection Pooling**: Efficient database connection management
- **Query Optimization**: Indexed queries with proper WHERE clauses
- **Caching Strategy**: Frontend caching for detail views
- **Batch Processing**: Efficient data aggregation
### Error Handling
- **Database Connectivity**: Graceful handling of connection issues
- **Query Failures**: Detailed error logging and user-friendly messages
- **Data Validation**: Input sanitization and validation
- **Fallback Mechanisms**: Default values for missing data
## Simplified Elements
Due to database complexity, some features are simplified:
- **Brands**: Shows "Various Brands" (companies table structure complex)
- **Categories**: Shows "General" (category relationships complex)
These can be enhanced in future iterations with proper category mapping.
## Testing
Test the server functionality:
```bash
# Health check
curl http://localhost:3007/api/acot/test
# Today's stats
curl http://localhost:3007/api/acot/events/stats?timeRange=today
# Last 30 days with details
curl http://localhost:3007/api/acot/events/stats/details?timeRange=last30days&daily=true
# Top products
curl http://localhost:3007/api/acot/events/products?timeRange=thisWeek
# Revenue projection
curl http://localhost:3007/api/acot/events/projection?timeRange=today
```
## Development Notes
- **No Frontend Changes**: Complete drop-in replacement for Klaviyo
- **API Compatibility**: Maintains exact response structure
- **Business Logic**: Implements all complex e-commerce calculations
- **Scalability**: Designed for production workloads
- **Maintainability**: Well-documented code with clear separation of concerns
## Future Enhancements
- Enhanced category and brand mapping
- Real-time notifications for significant events
- Advanced analytics and forecasting
- Customer segmentation analysis
- Inventory integration

View File

@@ -0,0 +1,297 @@
const { Client } = require('ssh2');
const mysql = require('mysql2/promise');
const fs = require('fs');
// Connection pool configuration
const connectionPool = {
connections: [],
maxConnections: 20,
currentConnections: 0,
pendingRequests: [],
// Cache for query results (key: query string, value: {data, timestamp})
queryCache: new Map(),
// Cache duration for different query types in milliseconds
cacheDuration: {
'stats': 60 * 1000, // 1 minute for stats
'products': 5 * 60 * 1000, // 5 minutes for products
'orders': 60 * 1000, // 1 minute for orders
'default': 60 * 1000 // 1 minute default
},
// Circuit breaker state
circuitBreaker: {
failures: 0,
lastFailure: 0,
isOpen: false,
threshold: 5,
timeout: 30000 // 30 seconds
}
};
/**
* Get a database connection from the pool
* @returns {Promise<{connection: object, release: function}>} The database connection and release function
*/
async function getDbConnection() {
return new Promise(async (resolve, reject) => {
// Check circuit breaker
const now = Date.now();
if (connectionPool.circuitBreaker.isOpen) {
if (now - connectionPool.circuitBreaker.lastFailure > connectionPool.circuitBreaker.timeout) {
// Reset circuit breaker
connectionPool.circuitBreaker.isOpen = false;
connectionPool.circuitBreaker.failures = 0;
console.log('Circuit breaker reset');
} else {
reject(new Error('Circuit breaker is open - too many connection failures'));
return;
}
}
// Check if there's an available connection in the pool
if (connectionPool.connections.length > 0) {
const conn = connectionPool.connections.pop();
console.log(`Using pooled connection. Pool size: ${connectionPool.connections.length}`);
resolve({
connection: conn.connection,
release: () => releaseConnection(conn)
});
return;
}
// If we haven't reached max connections, create a new one
if (connectionPool.currentConnections < connectionPool.maxConnections) {
try {
console.log(`Creating new connection. Current: ${connectionPool.currentConnections}/${connectionPool.maxConnections}`);
connectionPool.currentConnections++;
const tunnel = await setupSshTunnel();
const { ssh, stream, dbConfig } = tunnel;
const connection = await mysql.createConnection({
...dbConfig,
stream
});
const conn = { ssh, connection, inUse: true, created: Date.now() };
console.log('Database connection established');
// Reset circuit breaker on successful connection
if (connectionPool.circuitBreaker.failures > 0) {
connectionPool.circuitBreaker.failures = 0;
connectionPool.circuitBreaker.isOpen = false;
}
resolve({
connection: conn.connection,
release: () => releaseConnection(conn)
});
} catch (error) {
connectionPool.currentConnections--;
// Track circuit breaker failures
connectionPool.circuitBreaker.failures++;
connectionPool.circuitBreaker.lastFailure = Date.now();
if (connectionPool.circuitBreaker.failures >= connectionPool.circuitBreaker.threshold) {
connectionPool.circuitBreaker.isOpen = true;
console.log(`Circuit breaker opened after ${connectionPool.circuitBreaker.failures} failures`);
}
reject(error);
}
return;
}
// Pool is full, queue the request with timeout
console.log('Connection pool full, queuing request...');
const timeoutId = setTimeout(() => {
// Remove from queue if still there
const index = connectionPool.pendingRequests.findIndex(req => req.resolve === resolve);
if (index !== -1) {
connectionPool.pendingRequests.splice(index, 1);
reject(new Error('Connection pool queue timeout after 15 seconds'));
}
}, 15000);
connectionPool.pendingRequests.push({
resolve,
reject,
timeoutId,
timestamp: Date.now()
});
});
}
/**
* Release a connection back to the pool
*/
function releaseConnection(conn) {
conn.inUse = false;
// Check if there are pending requests
if (connectionPool.pendingRequests.length > 0) {
const { resolve, timeoutId } = connectionPool.pendingRequests.shift();
// Clear the timeout since we're serving the request
if (timeoutId) {
clearTimeout(timeoutId);
}
conn.inUse = true;
console.log(`Serving queued request. Queue length: ${connectionPool.pendingRequests.length}`);
resolve({
connection: conn.connection,
release: () => releaseConnection(conn)
});
} else {
// Return to pool
connectionPool.connections.push(conn);
console.log(`Connection returned to pool. Pool size: ${connectionPool.connections.length}, Active: ${connectionPool.currentConnections}`);
}
}
/**
* Get cached query results or execute query if not cached
* @param {string} cacheKey - Unique key to identify the query
* @param {string} queryType - Type of query (stats, products, orders, etc.)
* @param {Function} queryFn - Function to execute if cache miss
* @returns {Promise<any>} The query result
*/
async function getCachedQuery(cacheKey, queryType, queryFn) {
// Get cache duration based on query type
const cacheDuration = connectionPool.cacheDuration[queryType] || connectionPool.cacheDuration.default;
// Check if we have a valid cached result
const cachedResult = connectionPool.queryCache.get(cacheKey);
const now = Date.now();
if (cachedResult && (now - cachedResult.timestamp < cacheDuration)) {
console.log(`Cache hit for ${queryType} query: ${cacheKey}`);
return cachedResult.data;
}
// No valid cache found, execute the query
console.log(`Cache miss for ${queryType} query: ${cacheKey}`);
const result = await queryFn();
// Cache the result
connectionPool.queryCache.set(cacheKey, {
data: result,
timestamp: now
});
return result;
}
/**
* Setup SSH tunnel to production database
* @private - Should only be used by getDbConnection
* @returns {Promise<{ssh: object, stream: object, dbConfig: object}>}
*/
async function setupSshTunnel() {
const sshConfig = {
host: process.env.PROD_SSH_HOST,
port: process.env.PROD_SSH_PORT || 22,
username: process.env.PROD_SSH_USER,
privateKey: process.env.PROD_SSH_KEY_PATH
? fs.readFileSync(process.env.PROD_SSH_KEY_PATH)
: undefined,
compress: true
};
const dbConfig = {
host: process.env.PROD_DB_HOST || 'localhost',
user: process.env.PROD_DB_USER,
password: process.env.PROD_DB_PASSWORD,
database: process.env.PROD_DB_NAME,
port: process.env.PROD_DB_PORT || 3306,
timezone: 'Z'
};
return new Promise((resolve, reject) => {
const ssh = new Client();
ssh.on('error', (err) => {
console.error('SSH connection error:', err);
reject(err);
});
ssh.on('ready', () => {
ssh.forwardOut(
'127.0.0.1',
0,
dbConfig.host,
dbConfig.port,
(err, stream) => {
if (err) reject(err);
resolve({ ssh, stream, dbConfig });
}
);
}).connect(sshConfig);
});
}
/**
* Clear cached query results
* @param {string} [cacheKey] - Specific cache key to clear (clears all if not provided)
*/
function clearQueryCache(cacheKey) {
if (cacheKey) {
connectionPool.queryCache.delete(cacheKey);
console.log(`Cleared cache for key: ${cacheKey}`);
} else {
connectionPool.queryCache.clear();
console.log('Cleared all query cache');
}
}
/**
* Force close all active connections
* Useful for server shutdown or manual connection reset
*/
async function closeAllConnections() {
// Close all pooled connections
for (const conn of connectionPool.connections) {
try {
await conn.connection.end();
conn.ssh.end();
console.log('Closed pooled connection');
} catch (error) {
console.error('Error closing pooled connection:', error);
}
}
// Reset pool state
connectionPool.connections = [];
connectionPool.currentConnections = 0;
connectionPool.pendingRequests = [];
connectionPool.queryCache.clear();
console.log('All connections closed and pool reset');
}
/**
* Get connection pool status for debugging
*/
function getPoolStatus() {
return {
poolSize: connectionPool.connections.length,
activeConnections: connectionPool.currentConnections,
maxConnections: connectionPool.maxConnections,
pendingRequests: connectionPool.pendingRequests.length,
cacheSize: connectionPool.queryCache.size,
queuedRequests: connectionPool.pendingRequests.map(req => ({
waitTime: Date.now() - req.timestamp,
hasTimeout: !!req.timeoutId
}))
};
}
module.exports = {
getDbConnection,
getCachedQuery,
clearQueryCache,
closeAllConnections,
getPoolStatus
};

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,23 @@
{
"name": "acot-server",
"version": "1.0.0",
"description": "A Cherry On Top production database server",
"main": "server.js",
"scripts": {
"start": "node server.js",
"dev": "nodemon server.js"
},
"dependencies": {
"express": "^4.18.2",
"cors": "^2.8.5",
"dotenv": "^16.3.1",
"morgan": "^1.10.0",
"ssh2": "^1.14.0",
"mysql2": "^3.6.5",
"compression": "^1.7.4",
"luxon": "^3.5.0"
},
"devDependencies": {
"nodemon": "^3.0.1"
}
}

View File

@@ -0,0 +1,495 @@
const express = require('express');
const { DateTime } = require('luxon');
const { getDbConnection } = require('../db/connection');
const router = express.Router();
const RANGE_BOUNDS = [
10, 20, 30, 40, 50, 60, 70, 80, 90,
100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200,
300, 400, 500, 1000, 1500, 2000
];
const FINAL_BUCKET_KEY = 'PLUS';
function buildRangeDefinitions() {
const ranges = [];
let previous = 0;
for (const bound of RANGE_BOUNDS) {
const label = `$${previous.toLocaleString()} - $${bound.toLocaleString()}`;
const key = bound.toString().padStart(5, '0');
ranges.push({
min: previous,
max: bound,
label,
key,
sort: bound
});
previous = bound;
}
// Remove the 2000+ category - all orders >2000 will go into the 2000 bucket
return ranges;
}
const RANGE_DEFINITIONS = buildRangeDefinitions();
const BUCKET_CASE = (() => {
const parts = [];
for (let i = 0; i < RANGE_BOUNDS.length; i++) {
const bound = RANGE_BOUNDS[i];
const key = bound.toString().padStart(5, '0');
if (i === RANGE_BOUNDS.length - 1) {
// For the last bucket (2000), include all orders >= 1500 (previous bound)
parts.push(`ELSE '${key}'`);
} else {
parts.push(`WHEN o.summary_subtotal <= ${bound} THEN '${key}'`);
}
}
return `CASE\n ${parts.join('\n ')}\n END`;
})();
const DEFAULT_POINT_DOLLAR_VALUE = 0.005; // 1000 points = $5
const DEFAULTS = {
merchantFeePercent: 2.9,
fixedCostPerOrder: 1.5,
pointsPerDollar: 0,
pointsRedemptionRate: 0,
pointDollarValue: DEFAULT_POINT_DOLLAR_VALUE,
};
function parseDate(value, fallback) {
if (!value) {
return fallback;
}
const parsed = DateTime.fromISO(value);
if (!parsed.isValid) {
return fallback;
}
return parsed;
}
function formatDateForSql(dt) {
return dt.toFormat('yyyy-LL-dd HH:mm:ss');
}
function getMidpoint(range) {
if (range.max == null) {
return range.min + 200; // Rough estimate for 2000+
}
return (range.min + range.max) / 2;
}
router.get('/promos', async (req, res) => {
let connection;
try {
const { connection: conn, release } = await getDbConnection();
connection = conn;
const releaseConnection = release;
const { startDate, endDate } = req.query || {};
const now = DateTime.now().endOf('day');
const defaultStart = now.minus({ years: 3 }).startOf('day');
const parsedStart = startDate ? parseDate(startDate, defaultStart).startOf('day') : defaultStart;
const parsedEnd = endDate ? parseDate(endDate, now).endOf('day') : now;
const rangeStart = parsedStart <= parsedEnd ? parsedStart : parsedEnd;
const rangeEnd = parsedEnd >= parsedStart ? parsedEnd : parsedStart;
const rangeStartSql = formatDateForSql(rangeStart);
const rangeEndSql = formatDateForSql(rangeEnd);
const sql = `
SELECT
p.promo_id AS id,
p.promo_code AS code,
p.promo_description_online AS description_online,
p.promo_description_private AS description_private,
p.date_start,
p.date_end,
COALESCE(u.usage_count, 0) AS usage_count
FROM promos p
LEFT JOIN (
SELECT
discount_code,
COUNT(DISTINCT order_id) AS usage_count
FROM order_discounts
WHERE discount_type = 10 AND discount_active = 1
GROUP BY discount_code
) u ON u.discount_code = p.promo_id
WHERE p.date_start IS NOT NULL
AND p.date_end IS NOT NULL
AND NOT (p.date_end < ? OR p.date_start > ?)
AND p.store = 1
AND p.date_start >= '2010-01-01'
ORDER BY p.promo_id DESC
LIMIT 200
`;
const [rows] = await connection.execute(sql, [rangeStartSql, rangeEndSql]);
releaseConnection();
const promos = rows.map(row => ({
id: Number(row.id),
code: row.code,
description: row.description_online || row.description_private || '',
privateDescription: row.description_private || '',
promo_description_online: row.description_online || '',
promo_description_private: row.description_private || '',
dateStart: row.date_start,
dateEnd: row.date_end,
usageCount: Number(row.usage_count || 0)
}));
res.json({ promos });
} catch (error) {
if (connection) {
try {
connection.destroy();
} catch (destroyError) {
console.error('Failed to destroy connection after error:', destroyError);
}
}
console.error('Error fetching promos:', error);
res.status(500).json({ error: 'Failed to fetch promos' });
}
});
router.post('/simulate', async (req, res) => {
const {
dateRange = {},
filters = {},
productPromo = {},
shippingPromo = {},
shippingTiers = [],
merchantFeePercent,
fixedCostPerOrder,
pointsConfig = {}
} = req.body || {};
const endDefault = DateTime.now();
const startDefault = endDefault.minus({ months: 6 });
const startDt = parseDate(dateRange.start, startDefault).startOf('day');
const endDt = parseDate(dateRange.end, endDefault).endOf('day');
const shipCountry = filters.shipCountry || 'US';
const promoIds = Array.isArray(filters.promoIds) ? filters.promoIds.filter(Boolean) : [];
const config = {
merchantFeePercent: typeof merchantFeePercent === 'number' ? merchantFeePercent : DEFAULTS.merchantFeePercent,
fixedCostPerOrder: typeof fixedCostPerOrder === 'number' ? fixedCostPerOrder : DEFAULTS.fixedCostPerOrder,
productPromo: {
type: productPromo.type || 'none',
value: Number(productPromo.value || 0),
minSubtotal: Number(productPromo.minSubtotal || 0)
},
shippingPromo: {
type: shippingPromo.type || 'none',
value: Number(shippingPromo.value || 0),
minSubtotal: Number(shippingPromo.minSubtotal || 0),
maxDiscount: Number(shippingPromo.maxDiscount || 0)
},
shippingTiers: Array.isArray(shippingTiers)
? shippingTiers
.map(tier => ({
threshold: Number(tier.threshold || 0),
mode: tier.mode === 'percentage' || tier.mode === 'flat' ? tier.mode : 'percentage',
value: Number(tier.value || 0)
}))
.filter(tier => tier.threshold >= 0 && tier.value >= 0)
.sort((a, b) => a.threshold - b.threshold)
: [],
points: {
pointsPerDollar: typeof pointsConfig.pointsPerDollar === 'number' ? pointsConfig.pointsPerDollar : null,
redemptionRate: typeof pointsConfig.redemptionRate === 'number' ? pointsConfig.redemptionRate : null,
pointDollarValue: typeof pointsConfig.pointDollarValue === 'number'
? pointsConfig.pointDollarValue
: DEFAULT_POINT_DOLLAR_VALUE
}
};
let connection;
let release;
try {
const dbConn = await getDbConnection();
connection = dbConn.connection;
release = dbConn.release;
const params = [
shipCountry,
formatDateForSql(startDt),
formatDateForSql(endDt)
];
const promoJoin = promoIds.length > 0
? 'JOIN order_discounts od ON od.order_id = o.order_id AND od.discount_active = 1 AND od.discount_type = 10'
: '';
if (promoIds.length > 0) {
params.push(promoIds);
}
params.push(formatDateForSql(startDt), formatDateForSql(endDt));
const filteredOrdersQuery = `
SELECT
o.order_id,
o.summary_subtotal,
o.summary_discount_subtotal,
o.summary_shipping,
o.ship_method_rate,
o.ship_method_cost,
o.summary_points,
${BUCKET_CASE} AS bucket_key
FROM _order o
${promoJoin}
WHERE o.summary_total > 0
AND o.summary_subtotal > 0
AND o.order_status NOT IN (15)
AND o.ship_method_selected <> 'holdit'
AND o.ship_country = ?
AND o.date_placed BETWEEN ? AND ?
${promoIds.length > 0 ? 'AND od.discount_code IN (?)' : ''}
`;
const bucketQuery = `
SELECT
f.bucket_key,
COUNT(*) AS order_count,
SUM(f.summary_subtotal) AS subtotal_sum,
SUM(f.summary_discount_subtotal) AS product_discount_sum,
SUM(f.summary_subtotal + f.summary_discount_subtotal) AS regular_subtotal_sum,
SUM(f.ship_method_rate) AS ship_rate_sum,
SUM(f.ship_method_cost) AS ship_cost_sum,
SUM(f.summary_points) AS points_awarded_sum,
SUM(COALESCE(p.points_redeemed, 0)) AS points_redeemed_sum,
SUM(COALESCE(c.total_cogs, 0)) AS cogs_sum,
AVG(f.summary_subtotal) AS avg_subtotal,
AVG(f.summary_discount_subtotal) AS avg_product_discount,
AVG(f.ship_method_rate) AS avg_ship_rate,
AVG(f.ship_method_cost) AS avg_ship_cost,
AVG(COALESCE(c.total_cogs, 0)) AS avg_cogs
FROM (
${filteredOrdersQuery}
) AS f
LEFT JOIN (
SELECT order_id, SUM(cogs_amount) AS total_cogs
FROM report_sales_data
WHERE action IN (1,2,3)
AND date_change BETWEEN ? AND ?
GROUP BY order_id
) AS c ON c.order_id = f.order_id
LEFT JOIN (
SELECT order_id, SUM(discount_amount_points) AS points_redeemed
FROM order_discounts
WHERE discount_type = 20 AND discount_active = 1
GROUP BY order_id
) AS p ON p.order_id = f.order_id
GROUP BY f.bucket_key
`;
const [rows] = await connection.execute(bucketQuery, params);
const totals = {
orders: 0,
subtotal: 0,
productDiscount: 0,
regularSubtotal: 0,
shipRate: 0,
shipCost: 0,
cogs: 0,
pointsAwarded: 0,
pointsRedeemed: 0
};
const rowMap = new Map();
for (const row of rows) {
const key = row.bucket_key || FINAL_BUCKET_KEY;
const parsed = {
orderCount: Number(row.order_count || 0),
subtotalSum: Number(row.subtotal_sum || 0),
productDiscountSum: Number(row.product_discount_sum || 0),
regularSubtotalSum: Number(row.regular_subtotal_sum || 0),
shipRateSum: Number(row.ship_rate_sum || 0),
shipCostSum: Number(row.ship_cost_sum || 0),
pointsAwardedSum: Number(row.points_awarded_sum || 0),
pointsRedeemedSum: Number(row.points_redeemed_sum || 0),
cogsSum: Number(row.cogs_sum || 0),
avgSubtotal: Number(row.avg_subtotal || 0),
avgProductDiscount: Number(row.avg_product_discount || 0),
avgShipRate: Number(row.avg_ship_rate || 0),
avgShipCost: Number(row.avg_ship_cost || 0),
avgCogs: Number(row.avg_cogs || 0)
};
rowMap.set(key, parsed);
totals.orders += parsed.orderCount;
totals.subtotal += parsed.subtotalSum;
totals.productDiscount += parsed.productDiscountSum;
totals.regularSubtotal += parsed.regularSubtotalSum;
totals.shipRate += parsed.shipRateSum;
totals.shipCost += parsed.shipCostSum;
totals.cogs += parsed.cogsSum;
totals.pointsAwarded += parsed.pointsAwardedSum;
totals.pointsRedeemed += parsed.pointsRedeemedSum;
}
const productDiscountRate = totals.regularSubtotal > 0
? totals.productDiscount / totals.regularSubtotal
: 0;
const pointsPerDollar = config.points.pointsPerDollar != null
? config.points.pointsPerDollar
: totals.subtotal > 0
? totals.pointsAwarded / totals.subtotal
: 0;
const redemptionRate = config.points.redemptionRate != null
? config.points.redemptionRate
: totals.pointsAwarded > 0
? Math.min(1, totals.pointsRedeemed / totals.pointsAwarded)
: 0;
const pointDollarValue = config.points.pointDollarValue || DEFAULT_POINT_DOLLAR_VALUE;
const bucketResults = [];
let weightedProfitAmount = 0;
let weightedProfitPercent = 0;
for (const range of RANGE_DEFINITIONS) {
const data = rowMap.get(range.key) || {
orderCount: 0,
avgSubtotal: 0,
avgShipRate: 0,
avgShipCost: 0,
avgCogs: 0
};
const orderValue = data.avgSubtotal > 0 ? data.avgSubtotal : getMidpoint(range);
const shippingChargeBase = data.avgShipRate > 0 ? data.avgShipRate : 0;
const actualShippingCost = data.avgShipCost > 0 ? data.avgShipCost : 0;
const productCogs = data.avgCogs > 0 ? data.avgCogs : 0;
const productDiscountAmount = orderValue * productDiscountRate;
const effectiveRegularPrice = productDiscountRate < 0.99
? orderValue / (1 - productDiscountRate)
: orderValue;
let promoProductDiscount = 0;
if (config.productPromo.type === 'percentage_subtotal' && orderValue >= config.productPromo.minSubtotal) {
promoProductDiscount = Math.min(orderValue, (config.productPromo.value / 100) * orderValue);
} else if (config.productPromo.type === 'percentage_regular' && orderValue >= config.productPromo.minSubtotal) {
const targetRate = config.productPromo.value / 100;
const additionalRate = Math.max(0, targetRate - productDiscountRate);
promoProductDiscount = Math.min(orderValue, additionalRate * effectiveRegularPrice);
} else if (config.productPromo.type === 'fixed_amount' && orderValue >= config.productPromo.minSubtotal) {
promoProductDiscount = Math.min(orderValue, config.productPromo.value);
}
let shippingAfterAuto = shippingChargeBase;
for (const tier of config.shippingTiers) {
if (orderValue >= tier.threshold) {
if (tier.mode === 'percentage') {
shippingAfterAuto = shippingChargeBase * Math.max(0, 1 - tier.value / 100);
} else if (tier.mode === 'flat') {
shippingAfterAuto = tier.value;
}
}
}
let shipPromoDiscount = 0;
if (config.shippingPromo.type !== 'none' && orderValue >= config.shippingPromo.minSubtotal) {
if (config.shippingPromo.type === 'percentage') {
shipPromoDiscount = shippingAfterAuto * (config.shippingPromo.value / 100);
} else if (config.shippingPromo.type === 'fixed') {
shipPromoDiscount = config.shippingPromo.value;
}
if (config.shippingPromo.maxDiscount > 0) {
shipPromoDiscount = Math.min(shipPromoDiscount, config.shippingPromo.maxDiscount);
}
shipPromoDiscount = Math.min(shipPromoDiscount, shippingAfterAuto);
}
const customerShipCost = Math.max(0, shippingAfterAuto - shipPromoDiscount);
const customerItemCost = Math.max(0, orderValue - promoProductDiscount);
const totalRevenue = customerItemCost + customerShipCost;
const merchantFees = totalRevenue * (config.merchantFeePercent / 100);
const pointsCost = customerItemCost * pointsPerDollar * redemptionRate * pointDollarValue;
const fixedCosts = config.fixedCostPerOrder;
const totalCosts = productCogs + actualShippingCost + merchantFees + pointsCost + fixedCosts;
const profit = totalRevenue - totalCosts;
const profitPercent = totalRevenue > 0 ? (profit / totalRevenue) : 0;
const weight = totals.orders > 0 ? (data.orderCount || 0) / totals.orders : 0;
weightedProfitAmount += profit * weight;
weightedProfitPercent += profitPercent * weight;
bucketResults.push({
key: range.key,
label: range.label,
min: range.min,
max: range.max,
orderCount: data.orderCount || 0,
weight,
orderValue,
productDiscountAmount,
promoProductDiscount,
customerItemCost,
shippingChargeBase,
shippingAfterAuto,
shipPromoDiscount,
customerShipCost,
actualShippingCost,
totalRevenue,
productCogs,
merchantFees,
pointsCost,
fixedCosts,
totalCosts,
profit,
profitPercent
});
}
if (release) {
release();
}
res.json({
dateRange: {
start: startDt.toISO(),
end: endDt.toISO()
},
totals: {
orders: totals.orders,
subtotal: totals.subtotal,
productDiscountRate,
pointsPerDollar,
redemptionRate,
pointDollarValue,
weightedProfitAmount,
weightedProfitPercent
},
buckets: bucketResults
});
} catch (error) {
if (release) {
try {
release();
} catch (releaseError) {
console.error('Failed to release connection after error:', releaseError);
}
} else if (connection) {
try {
connection.destroy();
} catch (destroyError) {
console.error('Failed to destroy connection after error:', destroyError);
}
}
console.error('Error running discount simulation:', error);
res.status(500).json({ error: 'Failed to run discount simulation' });
}
});
module.exports = router;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,57 @@
const express = require('express');
const router = express.Router();
const { getDbConnection, getCachedQuery } = require('../db/connection');
// Test endpoint to count orders
router.get('/order-count', async (req, res) => {
try {
const { connection } = await getDbConnection();
// Simple query to count orders from _order table
const queryFn = async () => {
const [rows] = await connection.execute('SELECT COUNT(*) as count FROM _order');
return rows[0].count;
};
const cacheKey = 'order-count';
const count = await getCachedQuery(cacheKey, 'default', queryFn);
res.json({
success: true,
data: {
orderCount: count,
timestamp: new Date().toISOString()
}
});
} catch (error) {
console.error('Error fetching order count:', error);
res.status(500).json({
success: false,
error: error.message
});
}
});
// Test connection endpoint
router.get('/test-connection', async (req, res) => {
try {
const { connection } = await getDbConnection();
// Test the connection with a simple query
const [rows] = await connection.execute('SELECT 1 as test');
res.json({
success: true,
message: 'Database connection successful',
data: rows[0]
});
} catch (error) {
console.error('Error testing connection:', error);
res.status(500).json({
success: false,
error: error.message
});
}
});
module.exports = router;

View File

@@ -0,0 +1,99 @@
require('dotenv').config();
const express = require('express');
const cors = require('cors');
const morgan = require('morgan');
const compression = require('compression');
const fs = require('fs');
const path = require('path');
const { closeAllConnections } = require('./db/connection');
const app = express();
const PORT = process.env.ACOT_PORT || 3012;
// Create logs directory if it doesn't exist
const logDir = path.join(__dirname, 'logs/app');
if (!fs.existsSync(logDir)) {
fs.mkdirSync(logDir, { recursive: true });
}
// Create a write stream for access logs
const accessLogStream = fs.createWriteStream(
path.join(logDir, 'access.log'),
{ flags: 'a' }
);
// Middleware
app.use(compression());
app.use(cors());
app.use(express.json());
app.use(express.urlencoded({ extended: true }));
// Logging middleware
if (process.env.NODE_ENV === 'production') {
app.use(morgan('combined', { stream: accessLogStream }));
} else {
app.use(morgan('dev'));
}
// Health check endpoint
app.get('/health', (req, res) => {
res.json({
status: 'healthy',
service: 'acot-server',
timestamp: new Date().toISOString(),
uptime: process.uptime()
});
});
// Routes
app.use('/api/acot/test', require('./routes/test'));
app.use('/api/acot/events', require('./routes/events'));
app.use('/api/acot/discounts', require('./routes/discounts'));
// Error handling middleware
app.use((err, req, res, next) => {
console.error('Unhandled error:', err);
res.status(500).json({
success: false,
error: process.env.NODE_ENV === 'production'
? 'Internal server error'
: err.message
});
});
// 404 handler
app.use((req, res) => {
res.status(404).json({
success: false,
error: 'Route not found'
});
});
// Start server
const server = app.listen(PORT, () => {
console.log(`ACOT Server running on port ${PORT}`);
console.log(`Environment: ${process.env.NODE_ENV}`);
});
// Graceful shutdown
const gracefulShutdown = async () => {
console.log('SIGTERM signal received: closing HTTP server');
server.close(async () => {
console.log('HTTP server closed');
// Close database connections
try {
await closeAllConnections();
console.log('Database connections closed');
} catch (error) {
console.error('Error closing database connections:', error);
}
process.exit(0);
});
};
process.on('SIGTERM', gracefulShutdown);
process.on('SIGINT', gracefulShutdown);
module.exports = app;

View File

@@ -0,0 +1,312 @@
const { DateTime } = require('luxon');
const TIMEZONE = 'America/New_York';
const DB_TIMEZONE = 'UTC-05:00';
const BUSINESS_DAY_START_HOUR = 1; // 1 AM Eastern
const WEEK_START_DAY = 7; // Sunday (Luxon uses 1 = Monday, 7 = Sunday)
const DB_DATETIME_FORMAT = 'yyyy-LL-dd HH:mm:ss';
const isDateTime = (value) => DateTime.isDateTime(value);
const ensureDateTime = (value, { zone = TIMEZONE } = {}) => {
if (!value) return null;
if (isDateTime(value)) {
return value.setZone(zone);
}
if (value instanceof Date) {
return DateTime.fromJSDate(value, { zone });
}
if (typeof value === 'number') {
return DateTime.fromMillis(value, { zone });
}
if (typeof value === 'string') {
let dt = DateTime.fromISO(value, { zone, setZone: true });
if (!dt.isValid) {
dt = DateTime.fromSQL(value, { zone });
}
return dt.isValid ? dt : null;
}
return null;
};
const getNow = () => DateTime.now().setZone(TIMEZONE);
const getDayStart = (input = getNow()) => {
const dt = ensureDateTime(input);
if (!dt || !dt.isValid) {
const fallback = getNow();
return fallback.set({
hour: BUSINESS_DAY_START_HOUR,
minute: 0,
second: 0,
millisecond: 0
});
}
const sameDayStart = dt.set({
hour: BUSINESS_DAY_START_HOUR,
minute: 0,
second: 0,
millisecond: 0
});
return dt.hour < BUSINESS_DAY_START_HOUR
? sameDayStart.minus({ days: 1 })
: sameDayStart;
};
const getDayEnd = (input = getNow()) => {
return getDayStart(input).plus({ days: 1 }).minus({ milliseconds: 1 });
};
const getWeekStart = (input = getNow()) => {
const dt = ensureDateTime(input);
if (!dt || !dt.isValid) {
return getDayStart();
}
const startOfWeek = dt.set({ weekday: WEEK_START_DAY }).startOf('day');
const normalized = startOfWeek > dt ? startOfWeek.minus({ weeks: 1 }) : startOfWeek;
return normalized.set({
hour: BUSINESS_DAY_START_HOUR,
minute: 0,
second: 0,
millisecond: 0
});
};
const getRangeForTimeRange = (timeRange = 'today', now = getNow()) => {
const current = ensureDateTime(now);
if (!current || !current.isValid) {
throw new Error('Invalid reference time for range calculation');
}
switch (timeRange) {
case 'today': {
return {
start: getDayStart(current),
end: getDayEnd(current)
};
}
case 'yesterday': {
const target = current.minus({ days: 1 });
return {
start: getDayStart(target),
end: getDayEnd(target)
};
}
case 'twoDaysAgo': {
const target = current.minus({ days: 2 });
return {
start: getDayStart(target),
end: getDayEnd(target)
};
}
case 'thisWeek': {
return {
start: getWeekStart(current),
end: getDayEnd(current)
};
}
case 'lastWeek': {
const lastWeek = current.minus({ weeks: 1 });
const weekStart = getWeekStart(lastWeek);
const weekEnd = weekStart.plus({ days: 6 });
return {
start: weekStart,
end: getDayEnd(weekEnd)
};
}
case 'thisMonth': {
const dayStart = getDayStart(current);
const monthStart = dayStart.startOf('month').set({ hour: BUSINESS_DAY_START_HOUR });
return {
start: monthStart,
end: getDayEnd(current)
};
}
case 'lastMonth': {
const lastMonth = current.minus({ months: 1 });
const monthStart = lastMonth
.startOf('month')
.set({ hour: BUSINESS_DAY_START_HOUR, minute: 0, second: 0, millisecond: 0 });
const monthEnd = monthStart.plus({ months: 1 }).minus({ days: 1 });
return {
start: monthStart,
end: getDayEnd(monthEnd)
};
}
case 'last7days': {
const dayStart = getDayStart(current);
return {
start: dayStart.minus({ days: 6 }),
end: getDayEnd(current)
};
}
case 'last30days': {
const dayStart = getDayStart(current);
return {
start: dayStart.minus({ days: 29 }),
end: getDayEnd(current)
};
}
case 'last90days': {
const dayStart = getDayStart(current);
return {
start: dayStart.minus({ days: 89 }),
end: getDayEnd(current)
};
}
case 'previous7days': {
const currentPeriodStart = getDayStart(current).minus({ days: 6 });
const previousEndDay = currentPeriodStart.minus({ days: 1 });
const previousStartDay = previousEndDay.minus({ days: 6 });
return {
start: getDayStart(previousStartDay),
end: getDayEnd(previousEndDay)
};
}
case 'previous30days': {
const currentPeriodStart = getDayStart(current).minus({ days: 29 });
const previousEndDay = currentPeriodStart.minus({ days: 1 });
const previousStartDay = previousEndDay.minus({ days: 29 });
return {
start: getDayStart(previousStartDay),
end: getDayEnd(previousEndDay)
};
}
case 'previous90days': {
const currentPeriodStart = getDayStart(current).minus({ days: 89 });
const previousEndDay = currentPeriodStart.minus({ days: 1 });
const previousStartDay = previousEndDay.minus({ days: 89 });
return {
start: getDayStart(previousStartDay),
end: getDayEnd(previousEndDay)
};
}
default:
throw new Error(`Unknown time range: ${timeRange}`);
}
};
const toDatabaseSqlString = (dt) => {
const normalized = ensureDateTime(dt);
if (!normalized || !normalized.isValid) {
throw new Error('Invalid datetime provided for SQL conversion');
}
const dbTime = normalized.setZone(DB_TIMEZONE, { keepLocalTime: true });
return dbTime.toFormat(DB_DATETIME_FORMAT);
};
const formatBusinessDate = (input) => {
const dt = ensureDateTime(input);
if (!dt || !dt.isValid) return '';
return dt.setZone(TIMEZONE).toFormat('LLL d, yyyy');
};
const getTimeRangeLabel = (timeRange) => {
const labels = {
today: 'Today',
yesterday: 'Yesterday',
twoDaysAgo: 'Two Days Ago',
thisWeek: 'This Week',
lastWeek: 'Last Week',
thisMonth: 'This Month',
lastMonth: 'Last Month',
last7days: 'Last 7 Days',
last30days: 'Last 30 Days',
last90days: 'Last 90 Days',
previous7days: 'Previous 7 Days',
previous30days: 'Previous 30 Days',
previous90days: 'Previous 90 Days'
};
return labels[timeRange] || timeRange;
};
const getTimeRangeConditions = (timeRange, startDate, endDate) => {
if (timeRange === 'custom' && startDate && endDate) {
const start = ensureDateTime(startDate);
const end = ensureDateTime(endDate);
if (!start || !start.isValid || !end || !end.isValid) {
throw new Error('Invalid custom date range provided');
}
return {
whereClause: 'date_placed >= ? AND date_placed <= ?',
params: [toDatabaseSqlString(start), toDatabaseSqlString(end)],
dateRange: {
start: start.toUTC().toISO(),
end: end.toUTC().toISO(),
label: `${formatBusinessDate(start)} - ${formatBusinessDate(end)}`
}
};
}
const normalizedRange = timeRange || 'today';
const range = getRangeForTimeRange(normalizedRange);
return {
whereClause: 'date_placed >= ? AND date_placed <= ?',
params: [toDatabaseSqlString(range.start), toDatabaseSqlString(range.end)],
dateRange: {
start: range.start.toUTC().toISO(),
end: range.end.toUTC().toISO(),
label: getTimeRangeLabel(normalizedRange)
}
};
};
const getBusinessDayBounds = (timeRange) => {
const range = getRangeForTimeRange(timeRange);
return {
start: range.start.toJSDate(),
end: range.end.toJSDate()
};
};
const parseBusinessDate = (mysqlDatetime) => {
if (!mysqlDatetime || mysqlDatetime === '0000-00-00 00:00:00') {
return null;
}
const dt = DateTime.fromSQL(mysqlDatetime, { zone: DB_TIMEZONE });
if (!dt.isValid) {
console.error('[timeUtils] Failed to parse MySQL datetime:', mysqlDatetime, dt.invalidExplanation);
return null;
}
return dt.toUTC().toJSDate();
};
const formatMySQLDate = (input) => {
if (!input) return null;
const dt = ensureDateTime(input, { zone: 'utc' });
if (!dt || !dt.isValid) return null;
return dt.setZone(DB_TIMEZONE).toFormat(DB_DATETIME_FORMAT);
};
module.exports = {
getBusinessDayBounds,
getTimeRangeConditions,
formatBusinessDate,
getTimeRangeLabel,
parseBusinessDate,
formatMySQLDate,
// Expose helpers for tests or advanced consumers
_internal: {
getDayStart,
getDayEnd,
getWeekStart,
getRangeForTimeRange,
BUSINESS_DAY_START_HOUR
}
};

View File

@@ -0,0 +1,21 @@
# Server Configuration
NODE_ENV=development
AIRCALL_PORT=3002
LOG_LEVEL=info
# Aircall API Credentials
AIRCALL_API_ID=your_aircall_api_id
AIRCALL_API_TOKEN=your_aircall_api_token
# Database Configuration
MONGODB_URI=mongodb://localhost:27017/dashboard
MONGODB_DB=dashboard
REDIS_URL=redis://localhost:6379
# Service Configuration
TIMEZONE=America/New_York
DAY_STARTS_AT=1 # Business day starts at 1 AM ET
# Optional Settings
REDIS_TTL=300 # Cache TTL in seconds (5 minutes)
COLLECTION_NAME=aircall_daily_data

View File

@@ -0,0 +1,55 @@
# Aircall Server
A standalone server for handling Aircall metrics and data processing.
## Setup
1. Install dependencies:
```bash
npm install
```
2. Set up environment variables:
```bash
cp .env.example .env
```
Then edit `.env` with your configuration.
Required environment variables:
- `AIRCALL_API_ID`: Your Aircall API ID
- `AIRCALL_API_TOKEN`: Your Aircall API Token
- `MONGODB_URI`: MongoDB connection string
- `REDIS_URL`: Redis connection string
- `AIRCALL_PORT`: Server port (default: 3002)
## Running the Server
### Development
```bash
npm run dev
```
### Production
Using PM2:
```bash
pm2 start ecosystem.config.js --env production
```
## API Endpoints
### GET /api/aircall/metrics/:timeRange
Get Aircall metrics for a specific time range.
Parameters:
- `timeRange`: One of ['today', 'yesterday', 'last7days', 'last30days', 'last90days']
### GET /api/aircall/health
Get server health status.
## Architecture
The server uses:
- Express.js for the API
- MongoDB for data storage
- Redis for caching
- Winston for logging

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,23 @@
{
"name": "aircall-server",
"version": "1.0.0",
"description": "Aircall metrics server",
"type": "module",
"main": "server.js",
"scripts": {
"start": "node server.js",
"dev": "nodemon server.js"
},
"dependencies": {
"axios": "^1.6.2",
"cors": "^2.8.5",
"dotenv": "^16.3.1",
"express": "^4.18.2",
"mongodb": "^6.3.0",
"redis": "^4.6.11",
"winston": "^3.11.0"
},
"devDependencies": {
"nodemon": "^3.0.2"
}
}

View File

@@ -0,0 +1,83 @@
import express from 'express';
import cors from 'cors';
import dotenv from 'dotenv';
import path from 'path';
import { fileURLToPath } from 'url';
import { createRoutes } from './src/routes/index.js';
import { aircallConfig } from './src/config/aircall.config.js';
import { connectMongoDB } from './src/utils/db.js';
import { createRedisClient } from './src/utils/redis.js';
import { createLogger } from './src/utils/logger.js';
// Get directory name in ES modules
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
// Load environment variables from the correct path
dotenv.config({ path: path.resolve(__dirname, '.env') });
// Validate required environment variables
const requiredEnvVars = ['AIRCALL_API_ID', 'AIRCALL_API_TOKEN', 'MONGODB_URI', 'REDIS_URL'];
const missingEnvVars = requiredEnvVars.filter(envVar => !process.env[envVar]);
if (missingEnvVars.length > 0) {
console.error('Missing required environment variables:', missingEnvVars);
process.exit(1);
}
const app = express();
const port = process.env.AIRCALL_PORT || 3002;
const logger = createLogger('aircall-server');
// Middleware
app.use(cors());
app.use(express.json());
// Connect to databases
let mongodb;
let redis;
async function initializeServer() {
try {
// Connect to MongoDB
mongodb = await connectMongoDB();
logger.info('Connected to MongoDB');
// Connect to Redis
redis = await createRedisClient();
logger.info('Connected to Redis');
// Initialize configs with database connections
const configs = {
aircall: {
...aircallConfig,
mongodb,
redis,
logger
}
};
// Initialize routes
const routes = createRoutes(configs, logger);
app.use('/api', routes);
// Error handling middleware
app.use((err, req, res, next) => {
logger.error('Server error:', err);
res.status(500).json({
error: 'Internal server error',
message: err.message
});
});
// Start server
app.listen(port, () => {
logger.info(`Aircall server listening on port ${port}`);
});
} catch (error) {
logger.error('Failed to initialize server:', error);
process.exit(1);
}
}
initializeServer();

View File

@@ -0,0 +1,15 @@
export const aircallConfig = {
serviceName: 'aircall',
apiId: process.env.AIRCALL_API_ID,
apiToken: process.env.AIRCALL_API_TOKEN,
timezone: 'America/New_York',
dayStartsAt: 1,
storeHistory: true,
collection: 'aircall_daily_data',
redisTTL: 300, // 5 minutes cache for current day
endpoints: {
metrics: {
ttl: 300
}
}
};

View File

@@ -0,0 +1,57 @@
import express from 'express';
import { AircallService } from '../services/aircall/AircallService.js';
export const createAircallRoutes = (config, logger) => {
const router = express.Router();
const aircallService = new AircallService(config);
router.get('/metrics/:timeRange?', async (req, res) => {
try {
const { timeRange = 'today' } = req.params;
const allowedRanges = ['today', 'yesterday', 'last7days', 'last30days', 'last90days'];
if (!allowedRanges.includes(timeRange)) {
return res.status(400).json({
error: 'Invalid time range',
allowedRanges
});
}
const metrics = await aircallService.getMetrics(timeRange);
res.json({
...metrics,
_meta: {
timeRange,
generatedAt: new Date().toISOString(),
dataPoints: metrics.daily_data?.length || 0
}
});
} catch (error) {
logger.error('Error fetching Aircall metrics:', error);
res.status(500).json({
error: 'Failed to fetch Aircall metrics',
message: error.message
});
}
});
// Health check endpoint
router.get('/health', (req, res) => {
const mongoConnected = !!aircallService.mongodb?.db;
const redisConnected = !!aircallService.redis?.isOpen;
const health = {
status: mongoConnected && redisConnected ? 'ok' : 'degraded',
service: 'aircall',
timestamp: new Date().toISOString(),
connections: {
mongodb: mongoConnected,
redis: redisConnected
}
};
res.json(health);
});
return router;
};

View File

@@ -0,0 +1,32 @@
import express from 'express';
import { createAircallRoutes } from './aircall.routes.js';
export const createRoutes = (configs, logger) => {
const router = express.Router();
// Mount Aircall routes
router.use('/aircall', createAircallRoutes(configs.aircall, logger));
// Health check endpoint
router.get('/health', (req, res) => {
const services = req.services || {};
res.status(200).json({
status: 'ok',
timestamp: new Date(),
services: {
redis: services.redis?.isReady || false,
mongodb: services.mongo?.readyState === 1 || false
}
});
});
// Catch-all 404 handler
router.use('*', (req, res) => {
res.status(404).json({
error: 'Not Found',
message: `Route ${req.originalUrl} not found`
});
});
return router;
};

View File

@@ -0,0 +1,298 @@
import { DataManager } from "../base/DataManager.js";
export class AircallDataManager extends DataManager {
constructor(mongodb, redis, timeManager) {
const options = {
collection: "aircall_daily_data",
redisTTL: 300 // 5 minutes cache
};
super(mongodb, redis, timeManager, options);
this.options = options;
}
ensureDate(d) {
if (d instanceof Date) return d;
if (typeof d === 'string') return new Date(d);
if (typeof d === 'number') return new Date(d);
console.error('Invalid date value:', d);
return new Date(); // fallback to current date
}
async storeHistoricalPeriod(start, end, calls) {
if (!this.mongodb) return;
try {
if (!Array.isArray(calls)) {
console.error("Invalid calls data:", calls);
return;
}
// Group calls by true day boundaries using TimeManager
const dailyCallsMap = new Map();
calls.forEach((call) => {
try {
const timestamp = call.started_at * 1000; // Convert to milliseconds
const callDate = this.ensureDate(timestamp);
const dayBounds = this.timeManager.getDayBounds(callDate);
const dayKey = dayBounds.start.toISOString();
if (!dailyCallsMap.has(dayKey)) {
dailyCallsMap.set(dayKey, {
date: dayBounds.start,
calls: [],
});
}
dailyCallsMap.get(dayKey).calls.push(call);
} catch (err) {
console.error('Error processing call:', err, call);
}
});
// Iterate over each day in the period using day boundaries
const dates = [];
let currentDate = this.ensureDate(start);
const endDate = this.ensureDate(end);
while (currentDate < endDate) {
const dayBounds = this.timeManager.getDayBounds(currentDate);
dates.push(dayBounds.start);
currentDate.setUTCDate(currentDate.getUTCDate() + 1);
}
for (const date of dates) {
try {
const dateKey = date.toISOString();
const dayData = dailyCallsMap.get(dateKey);
const dayCalls = dayData ? dayData.calls : [];
// Process calls for this day using the same processing logic
const metrics = this.processCallData(dayCalls);
// Insert a daily_data record for this day
metrics.daily_data = [
{
date: date.toISOString().split("T")[0],
inbound: metrics.by_direction.inbound,
outbound: metrics.by_direction.outbound,
},
];
// Store this day's processed data as historical
await this.storeHistoricalDay(date, metrics);
} catch (err) {
console.error('Error processing date:', err, date);
}
}
} catch (error) {
console.error("Error storing historical period:", error, error.stack);
throw error;
}
}
processCallData(calls) {
// If calls is already processed (has total, by_direction, etc.), return it
if (calls && calls.total !== undefined) {
console.log('Data already processed:', {
total: calls.total,
by_direction: calls.by_direction
});
// Return a clean copy of the processed data
return {
total: calls.total,
by_direction: calls.by_direction,
by_status: calls.by_status,
by_missed_reason: calls.by_missed_reason,
by_hour: calls.by_hour,
by_users: calls.by_users,
daily_data: calls.daily_data,
duration_distribution: calls.duration_distribution,
average_duration: calls.average_duration
};
}
console.log('Processing raw calls:', {
count: calls.length,
sample: calls.length > 0 ? {
id: calls[0].id,
direction: calls[0].direction,
status: calls[0].status
} : null
});
// Process raw calls
const metrics = {
total: calls.length,
by_direction: { inbound: 0, outbound: 0 },
by_status: { answered: 0, missed: 0 },
by_missed_reason: {},
by_hour: Array(24).fill(0),
by_users: {},
daily_data: [],
duration_distribution: [
{ range: "0-1m", count: 0 },
{ range: "1-5m", count: 0 },
{ range: "5-15m", count: 0 },
{ range: "15-30m", count: 0 },
{ range: "30m+", count: 0 },
],
average_duration: 0,
total_duration: 0,
};
// Group calls by date for daily data
const dailyCallsMap = new Map();
calls.forEach((call) => {
try {
// Direction metrics
metrics.by_direction[call.direction]++;
// Get call date and hour using TimeManager
const timestamp = call.started_at * 1000; // Convert to milliseconds
const callDate = this.ensureDate(timestamp);
const dayBounds = this.timeManager.getDayBounds(callDate);
const dayKey = dayBounds.start.toISOString().split("T")[0];
const hour = callDate.getHours();
metrics.by_hour[hour]++;
// Status and duration metrics
if (call.answered_at) {
metrics.by_status.answered++;
const duration = call.ended_at - call.answered_at;
metrics.total_duration += duration;
// Duration distribution
if (duration <= 60) {
metrics.duration_distribution[0].count++;
} else if (duration <= 300) {
metrics.duration_distribution[1].count++;
} else if (duration <= 900) {
metrics.duration_distribution[2].count++;
} else if (duration <= 1800) {
metrics.duration_distribution[3].count++;
} else {
metrics.duration_distribution[4].count++;
}
// Track user performance
if (call.user) {
const userId = call.user.id;
if (!metrics.by_users[userId]) {
metrics.by_users[userId] = {
id: userId,
name: call.user.name,
total: 0,
answered: 0,
missed: 0,
total_duration: 0,
average_duration: 0,
};
}
metrics.by_users[userId].total++;
metrics.by_users[userId].answered++;
metrics.by_users[userId].total_duration += duration;
}
} else {
metrics.by_status.missed++;
if (call.missed_call_reason) {
metrics.by_missed_reason[call.missed_call_reason] =
(metrics.by_missed_reason[call.missed_call_reason] || 0) + 1;
}
// Track missed calls by user
if (call.user) {
const userId = call.user.id;
if (!metrics.by_users[userId]) {
metrics.by_users[userId] = {
id: userId,
name: call.user.name,
total: 0,
answered: 0,
missed: 0,
total_duration: 0,
average_duration: 0,
};
}
metrics.by_users[userId].total++;
metrics.by_users[userId].missed++;
}
}
// Group by date for daily data
if (!dailyCallsMap.has(dayKey)) {
dailyCallsMap.set(dayKey, { date: dayKey, inbound: 0, outbound: 0 });
}
dailyCallsMap.get(dayKey)[call.direction]++;
} catch (err) {
console.error('Error processing call:', err, call);
}
});
// Calculate average durations for users
Object.values(metrics.by_users).forEach((user) => {
if (user.answered > 0) {
user.average_duration = Math.round(user.total_duration / user.answered);
}
});
// Calculate global average duration
if (metrics.by_status.answered > 0) {
metrics.average_duration = Math.round(
metrics.total_duration / metrics.by_status.answered
);
}
// Convert daily data map to sorted array
metrics.daily_data = Array.from(dailyCallsMap.values()).sort((a, b) =>
a.date.localeCompare(b.date)
);
delete metrics.total_duration;
console.log('Processed metrics:', {
total: metrics.total,
by_direction: metrics.by_direction,
by_status: metrics.by_status,
daily_data_count: metrics.daily_data.length
});
return metrics;
}
async storeHistoricalDay(date, data) {
if (!this.mongodb) return;
try {
const collection = this.mongodb.collection(this.options.collection);
const dayBounds = this.timeManager.getDayBounds(this.ensureDate(date));
// Ensure consistent data structure with metrics nested in data field
const document = {
date: dayBounds.start,
data: {
total: data.total,
by_direction: data.by_direction,
by_status: data.by_status,
by_missed_reason: data.by_missed_reason,
by_hour: data.by_hour,
by_users: data.by_users,
daily_data: data.daily_data,
duration_distribution: data.duration_distribution,
average_duration: data.average_duration
},
updatedAt: new Date()
};
await collection.updateOne(
{ date: dayBounds.start },
{ $set: document },
{ upsert: true }
);
} catch (error) {
console.error("Error storing historical day:", error);
throw error;
}
}
}

View File

@@ -0,0 +1,138 @@
import axios from "axios";
import { Buffer } from "buffer";
import { BaseService } from "../base/BaseService.js";
import { AircallDataManager } from "./AircallDataManager.js";
export class AircallService extends BaseService {
constructor(config) {
super(config);
this.baseUrl = "https://api.aircall.io/v1";
console.log('Initializing Aircall service with credentials:', {
apiId: config.apiId ? 'present' : 'missing',
apiToken: config.apiToken ? 'present' : 'missing'
});
this.auth = Buffer.from(`${config.apiId}:${config.apiToken}`).toString(
"base64"
);
this.dataManager = new AircallDataManager(
this.mongodb,
this.redis,
this.timeManager
);
if (!config.apiId || !config.apiToken) {
throw new Error("Aircall API credentials are required");
}
}
async getMetrics(timeRange) {
const dateRange = await this.timeManager.getDateRange(timeRange);
console.log('Fetching metrics for date range:', {
start: dateRange.start.toISOString(),
end: dateRange.end.toISOString()
});
return this.dataManager.getData(dateRange, async (range) => {
const calls = await this.fetchAllCalls(range.start, range.end);
console.log('Fetched calls:', {
count: calls.length,
sample: calls.length > 0 ? calls[0] : null
});
return calls;
});
}
async fetchAllCalls(start, end) {
try {
let allCalls = [];
let currentPage = 1;
let hasMore = true;
let totalPages = null;
while (hasMore) {
const response = await this.makeRequest("/calls", {
from: Math.floor(start.getTime() / 1000),
to: Math.floor(end.getTime() / 1000),
order: "asc",
page: currentPage,
per_page: 50,
});
console.log('API Response:', {
page: currentPage,
totalPages: response.meta.total_pages,
callsCount: response.calls?.length,
params: {
from: Math.floor(start.getTime() / 1000),
to: Math.floor(end.getTime() / 1000)
}
});
if (!response.calls) {
throw new Error("Invalid API response format");
}
allCalls = [...allCalls, ...response.calls];
hasMore = response.meta.next_page_link !== null;
totalPages = response.meta.total_pages;
currentPage++;
if (hasMore) {
// Rate limiting pause
await new Promise((resolve) => setTimeout(resolve, 1));
}
}
return allCalls;
} catch (error) {
console.error("Error fetching all calls:", error);
throw error;
}
}
async makeRequest(endpoint, params = {}) {
try {
console.log('Making API request:', {
endpoint,
params
});
const response = await axios.get(`${this.baseUrl}${endpoint}`, {
headers: {
Authorization: `Basic ${this.auth}`,
"Content-Type": "application/json",
},
params,
});
return response.data;
} catch (error) {
if (error.response?.status === 429) {
console.log("Rate limit reached, waiting before retry...");
await new Promise((resolve) => setTimeout(resolve, 5000));
return this.makeRequest(endpoint, params);
}
this.handleApiError(error, `Error making request to ${endpoint}`);
}
}
validateApiResponse(response, context = "") {
if (!response || typeof response !== "object") {
throw new Error(`${context}: Invalid API response format`);
}
if (response.error) {
throw new Error(`${context}: ${response.error}`);
}
return true;
}
getPaginationInfo(meta) {
return {
currentPage: meta.current_page,
totalPages: meta.total_pages,
hasNextPage: meta.next_page_link !== null,
totalRecords: meta.total,
};
}
}

View File

@@ -0,0 +1,32 @@
import { createTimeManager } from '../../utils/timeUtils.js';
export class BaseService {
constructor(config) {
this.config = config;
this.mongodb = config.mongodb;
this.redis = config.redis;
this.logger = config.logger;
this.timeManager = createTimeManager(config.timezone, config.dayStartsAt);
}
handleApiError(error, context = '') {
this.logger.error(`API Error ${context}:`, {
message: error.message,
status: error.response?.status,
data: error.response?.data,
});
if (error.response) {
const status = error.response.status;
const message = error.response.data?.message || error.response.statusText;
if (status === 429) {
throw new Error('API rate limit exceeded. Please try again later.');
}
throw new Error(`API error (${status}): ${message}`);
}
throw error;
}
}

View File

@@ -0,0 +1,320 @@
export class DataManager {
constructor(mongodb, redis, timeManager, options) {
this.mongodb = mongodb;
this.redis = redis;
this.timeManager = timeManager;
this.options = options || {};
}
ensureDate(d) {
if (d instanceof Date) return d;
if (typeof d === 'string') return new Date(d);
if (typeof d === 'number') return new Date(d);
if (d && d.date) return new Date(d.date); // Handle MongoDB records
console.error('Invalid date value:', d);
return new Date(); // fallback to current date
}
async getData(dateRange, fetchFn) {
try {
// Get historical data from MongoDB
const historicalData = await this.getHistoricalDays(dateRange.start, dateRange.end);
// Find any missing date ranges
const missingRanges = this.findMissingDateRanges(dateRange.start, dateRange.end, historicalData);
// Fetch missing data
for (const range of missingRanges) {
const data = await fetchFn(range);
await this.storeHistoricalPeriod(range.start, range.end, data);
}
// Get updated historical data
const updatedData = await this.getHistoricalDays(dateRange.start, dateRange.end);
// Handle both nested and flat data structures
if (updatedData && updatedData.length > 0) {
// Process each record and combine them
const processedData = updatedData.map(record => {
if (record.data) {
return record.data;
}
if (record.total !== undefined) {
return {
total: record.total,
by_direction: record.by_direction,
by_status: record.by_status,
by_missed_reason: record.by_missed_reason,
by_hour: record.by_hour,
by_users: record.by_users,
daily_data: record.daily_data,
duration_distribution: record.duration_distribution,
average_duration: record.average_duration
};
}
return null;
}).filter(Boolean);
// Combine the data
if (processedData.length > 0) {
return this.combineMetrics(processedData);
}
}
// Otherwise process as raw call data
return this.processCallData(updatedData);
} catch (error) {
console.error('Error in getData:', error);
throw error;
}
}
findMissingDateRanges(start, end, existingDates) {
const missingRanges = [];
const existingDatesSet = new Set(
existingDates.map((d) => {
// Handle both nested and flat data structures
const date = d.date ? d.date : d;
return this.ensureDate(date).toISOString().split("T")[0];
})
);
let current = new Date(start);
const endDate = new Date(end);
while (current < endDate) {
const dayBounds = this.timeManager.getDayBounds(current);
const dayKey = dayBounds.start.toISOString().split("T")[0];
if (!existingDatesSet.has(dayKey)) {
// Found a missing day
const missingStart = new Date(dayBounds.start);
const missingEnd = new Date(dayBounds.end);
missingRanges.push({
start: missingStart,
end: missingEnd,
});
}
// Move to the next day using timeManager to ensure proper business day boundaries
current = new Date(dayBounds.end.getTime() + 1);
}
return missingRanges;
}
async getCurrentDay(fetchFn) {
const now = new Date();
const todayBounds = this.timeManager.getDayBounds(now);
const todayKey = this.timeManager.formatDate(todayBounds.start);
const cacheKey = `${this.options.collection}:current_day:${todayKey}`;
try {
// Check cache first
if (this.redis?.isOpen) {
const cached = await this.redis.get(cacheKey);
if (cached) {
const parsedCache = JSON.parse(cached);
if (parsedCache.total !== undefined) {
// Use timeManager to check if the cached data is for today
const cachedDate = new Date(parsedCache.daily_data[0].date);
const isToday = this.timeManager.isToday(cachedDate);
if (isToday) {
return parsedCache;
}
}
}
}
// Get safe end time that's never in the future
const safeEnd = this.timeManager.getCurrentBusinessDayEnd();
// Fetch and process current day data with safe end time
const data = await fetchFn({
start: todayBounds.start,
end: safeEnd
});
if (!data) {
return null;
}
// Cache the data with a shorter TTL for today's data
if (this.redis?.isOpen) {
const ttl = Math.min(
this.options.redisTTL,
60 * 5 // 5 minutes max for today's data
);
await this.redis.set(cacheKey, JSON.stringify(data), {
EX: ttl,
});
}
return data;
} catch (error) {
console.error('Error in getCurrentDay:', error);
throw error;
}
}
getDayCount(start, end) {
// Calculate full days between dates using timeManager
const startDay = this.timeManager.getDayBounds(start);
const endDay = this.timeManager.getDayBounds(end);
return Math.ceil((endDay.end - startDay.start) / (24 * 60 * 60 * 1000));
}
async fetchMissingDays(start, end, existingData, fetchFn) {
const existingDates = new Set(
existingData.map((d) => this.timeManager.formatDate(d.date))
);
const missingData = [];
let currentDate = new Date(start);
while (currentDate < end) {
const dayBounds = this.timeManager.getDayBounds(currentDate);
const dateString = this.timeManager.formatDate(dayBounds.start);
if (!existingDates.has(dateString)) {
const data = await fetchFn({
start: dayBounds.start,
end: dayBounds.end,
});
await this.storeHistoricalDay(dayBounds.start, data);
missingData.push(data);
}
// Move to next day using timeManager to ensure proper business day boundaries
currentDate = new Date(dayBounds.end.getTime() + 1);
}
return missingData;
}
async getHistoricalDays(start, end) {
try {
if (!this.mongodb) return [];
const collection = this.mongodb.collection(this.options.collection);
const startDay = this.timeManager.getDayBounds(start);
const endDay = this.timeManager.getDayBounds(end);
const records = await collection
.find({
date: {
$gte: startDay.start,
$lt: endDay.start,
},
})
.sort({ date: 1 })
.toArray();
return records;
} catch (error) {
console.error('Error getting historical days:', error);
return [];
}
}
combineMetrics(metricsArray) {
if (!metricsArray || metricsArray.length === 0) return null;
if (metricsArray.length === 1) return metricsArray[0];
const combined = {
total: 0,
by_direction: { inbound: 0, outbound: 0 },
by_status: { answered: 0, missed: 0 },
by_missed_reason: {},
by_hour: Array(24).fill(0),
by_users: {},
daily_data: [],
duration_distribution: [
{ range: '0-1m', count: 0 },
{ range: '1-5m', count: 0 },
{ range: '5-15m', count: 0 },
{ range: '15-30m', count: 0 },
{ range: '30m+', count: 0 }
],
average_duration: 0
};
let totalAnswered = 0;
let totalDuration = 0;
metricsArray.forEach(metrics => {
// Sum basic metrics
combined.total += metrics.total;
combined.by_direction.inbound += metrics.by_direction.inbound;
combined.by_direction.outbound += metrics.by_direction.outbound;
combined.by_status.answered += metrics.by_status.answered;
combined.by_status.missed += metrics.by_status.missed;
// Combine missed reasons
Object.entries(metrics.by_missed_reason).forEach(([reason, count]) => {
combined.by_missed_reason[reason] = (combined.by_missed_reason[reason] || 0) + count;
});
// Sum hourly data
metrics.by_hour.forEach((count, hour) => {
combined.by_hour[hour] += count;
});
// Combine user data
Object.entries(metrics.by_users).forEach(([userId, userData]) => {
if (!combined.by_users[userId]) {
combined.by_users[userId] = {
id: userData.id,
name: userData.name,
total: 0,
answered: 0,
missed: 0,
total_duration: 0,
average_duration: 0
};
}
combined.by_users[userId].total += userData.total;
combined.by_users[userId].answered += userData.answered;
combined.by_users[userId].missed += userData.missed;
combined.by_users[userId].total_duration += userData.total_duration || 0;
});
// Combine duration distribution
metrics.duration_distribution.forEach((dist, index) => {
combined.duration_distribution[index].count += dist.count;
});
// Accumulate for average duration calculation
if (metrics.average_duration && metrics.by_status.answered) {
totalDuration += metrics.average_duration * metrics.by_status.answered;
totalAnswered += metrics.by_status.answered;
}
// Merge daily data
if (metrics.daily_data) {
combined.daily_data.push(...metrics.daily_data);
}
});
// Calculate final average duration
if (totalAnswered > 0) {
combined.average_duration = Math.round(totalDuration / totalAnswered);
}
// Calculate user averages
Object.values(combined.by_users).forEach(user => {
if (user.answered > 0) {
user.average_duration = Math.round(user.total_duration / user.answered);
}
});
// Sort and deduplicate daily data
combined.daily_data = Array.from(
new Map(combined.daily_data.map(item => [item.date, item])).values()
).sort((a, b) => a.date.localeCompare(b.date));
return combined;
}
}

View File

@@ -0,0 +1,15 @@
import { MongoClient } from 'mongodb';
const MONGODB_URI = process.env.MONGODB_URI || 'mongodb://localhost:27017/dashboard';
const DB_NAME = process.env.MONGODB_DB || 'dashboard';
export async function connectMongoDB() {
try {
const client = await MongoClient.connect(MONGODB_URI);
console.log('Connected to MongoDB');
return client.db(DB_NAME);
} catch (error) {
console.error('MongoDB connection error:', error);
throw error;
}
}

View File

@@ -0,0 +1,30 @@
import winston from 'winston';
import path from 'path';
export function createLogger(service) {
return winston.createLogger({
level: process.env.LOG_LEVEL || 'info',
format: winston.format.combine(
winston.format.timestamp(),
winston.format.json()
),
defaultMeta: { service },
transports: [
// Write all logs to console
new winston.transports.Console({
format: winston.format.combine(
winston.format.colorize(),
winston.format.simple()
)
}),
// Write all logs to service-specific files
new winston.transports.File({
filename: path.join('logs', `${service}-error.log`),
level: 'error'
}),
new winston.transports.File({
filename: path.join('logs', `${service}-combined.log`)
})
]
});
}

View File

@@ -0,0 +1,23 @@
import { createClient } from 'redis';
const REDIS_URL = process.env.REDIS_URL || 'redis://localhost:6379';
export async function createRedisClient() {
try {
const client = createClient({
url: REDIS_URL
});
await client.connect();
console.log('Connected to Redis');
client.on('error', (err) => {
console.error('Redis error:', err);
});
return client;
} catch (error) {
console.error('Redis connection error:', error);
throw error;
}
}

View File

@@ -0,0 +1,262 @@
class TimeManager {
static ALLOWED_RANGES = ['today', 'yesterday', 'last2days', 'last7days', 'last30days', 'last90days',
'previous7days', 'previous30days', 'previous90days'];
constructor(timezone = 'America/New_York', dayStartsAt = 1) {
this.timezone = timezone;
this.dayStartsAt = dayStartsAt;
}
getDayBounds(date) {
try {
const now = new Date();
const targetDate = new Date(date);
// For today
if (
targetDate.getUTCFullYear() === now.getUTCFullYear() &&
targetDate.getUTCMonth() === now.getUTCMonth() &&
targetDate.getUTCDate() === now.getUTCDate()
) {
// If current time is before day start (1 AM ET / 6 AM UTC),
// use previous day's start until now
const todayStart = new Date(Date.UTC(
now.getUTCFullYear(),
now.getUTCMonth(),
now.getUTCDate(),
this.dayStartsAt + 5,
0,
0,
0
));
if (now < todayStart) {
const yesterdayStart = new Date(todayStart);
yesterdayStart.setUTCDate(yesterdayStart.getUTCDate() - 1);
return { start: yesterdayStart, end: now };
}
return { start: todayStart, end: now };
}
// For past days, use full 24-hour period
const normalizedDate = new Date(Date.UTC(
targetDate.getUTCFullYear(),
targetDate.getUTCMonth(),
targetDate.getUTCDate()
));
const dayStart = new Date(normalizedDate);
dayStart.setUTCHours(this.dayStartsAt + 5, 0, 0, 0);
const dayEnd = new Date(dayStart);
dayEnd.setUTCDate(dayEnd.getUTCDate() + 1);
return { start: dayStart, end: dayEnd };
} catch (error) {
console.error('Error in getDayBounds:', error);
throw new Error(`Failed to calculate day bounds: ${error.message}`);
}
}
getDateRange(period) {
try {
const now = new Date();
const todayBounds = this.getDayBounds(now);
const end = new Date();
switch (period) {
case 'today':
return {
start: todayBounds.start,
end
};
case 'yesterday': {
const yesterday = new Date(now);
yesterday.setDate(yesterday.getDate() - 1);
return this.getDayBounds(yesterday);
}
case 'last2days': {
const twoDaysAgo = new Date(now);
twoDaysAgo.setDate(twoDaysAgo.getDate() - 2);
return this.getDayBounds(twoDaysAgo);
}
case 'last7days': {
const start = new Date(now);
start.setDate(start.getDate() - 6);
return {
start: this.getDayBounds(start).start,
end
};
}
case 'previous7days': {
const end = new Date(now);
end.setDate(end.getDate() - 7);
const start = new Date(end);
start.setDate(start.getDate() - 6);
return {
start: this.getDayBounds(start).start,
end: this.getDayBounds(end).end
};
}
case 'last30days': {
const start = new Date(now);
start.setDate(start.getDate() - 29);
return {
start: this.getDayBounds(start).start,
end
};
}
case 'previous30days': {
const end = new Date(now);
end.setDate(end.getDate() - 30);
const start = new Date(end);
start.setDate(start.getDate() - 29);
return {
start: this.getDayBounds(start).start,
end: this.getDayBounds(end).end
};
}
case 'last90days': {
const start = new Date(now);
start.setDate(start.getDate() - 89);
return {
start: this.getDayBounds(start).start,
end
};
}
case 'previous90days': {
const end = new Date(now);
end.setDate(end.getDate() - 90);
const start = new Date(end);
start.setDate(start.getDate() - 89);
return {
start: this.getDayBounds(start).start,
end: this.getDayBounds(end).end
};
}
default:
throw new Error(`Unsupported time period: ${period}`);
}
} catch (error) {
console.error('Error in getDateRange:', error);
throw error;
}
}
getPreviousPeriod(period) {
try {
const now = new Date();
switch (period) {
case 'today':
return 'yesterday';
case 'yesterday': {
// Return bounds for 2 days ago
const twoDaysAgo = new Date(now);
twoDaysAgo.setDate(twoDaysAgo.getDate() - 2);
return this.getDayBounds(twoDaysAgo);
}
case 'last7days': {
// Return bounds for previous 7 days
const end = new Date(now);
end.setDate(end.getDate() - 7);
const start = new Date(end);
start.setDate(start.getDate() - 7);
return {
start: this.getDayBounds(start).start,
end: this.getDayBounds(end).end
};
}
case 'last30days': {
const end = new Date(now);
end.setDate(end.getDate() - 30);
const start = new Date(end);
start.setDate(start.getDate() - 30);
return {
start: this.getDayBounds(start).start,
end: this.getDayBounds(end).end
};
}
case 'last90days': {
const end = new Date(now);
end.setDate(end.getDate() - 90);
const start = new Date(end);
start.setDate(start.getDate() - 90);
return {
start: this.getDayBounds(start).start,
end: this.getDayBounds(end).end
};
}
default:
throw new Error(`Unsupported time period: ${period}`);
}
} catch (error) {
console.error('Error in getPreviousPeriod:', error);
throw error;
}
}
getCurrentBusinessDayEnd() {
try {
const now = new Date();
const todayBounds = this.getDayBounds(now);
// If current time is before day start (1 AM ET / 6 AM UTC),
// then we're still in yesterday's business day
const todayStart = new Date(Date.UTC(
now.getUTCFullYear(),
now.getUTCMonth(),
now.getUTCDate(),
this.dayStartsAt + 5,
0,
0,
0
));
if (now < todayStart) {
const yesterdayBounds = this.getDayBounds(new Date(now.getTime() - 24 * 60 * 60 * 1000));
return yesterdayBounds.end;
}
// Return the earlier of current time or today's end
return now < todayBounds.end ? now : todayBounds.end;
} catch (error) {
console.error('Error in getCurrentBusinessDayEnd:', error);
return new Date();
}
}
isValidTimeRange(timeRange) {
return TimeManager.ALLOWED_RANGES.includes(timeRange);
}
isToday(date) {
const now = new Date();
const targetDate = new Date(date);
return (
targetDate.getUTCFullYear() === now.getUTCFullYear() &&
targetDate.getUTCMonth() === now.getUTCMonth() &&
targetDate.getUTCDate() === now.getUTCDate()
);
}
formatDate(date) {
try {
return date.toLocaleString('en-US', {
timeZone: this.timezone,
year: 'numeric',
month: '2-digit',
day: '2-digit',
hour: '2-digit',
minute: '2-digit',
second: '2-digit'
});
} catch (error) {
console.error('Error formatting date:', error);
return date.toISOString();
}
}
}
export const createTimeManager = (timezone, dayStartsAt) => new TimeManager(timezone, dayStartsAt);

View File

@@ -0,0 +1,10 @@
# Server Configuration
NODE_ENV=development
PORT=3003
# Authentication
JWT_SECRET=your-secret-key-here
DASHBOARD_PASSWORD=your-dashboard-password-here
# Cookie Settings
COOKIE_DOMAIN=localhost # In production: .kent.pw

View File

@@ -0,0 +1,203 @@
// auth-server/index.js
const path = require('path');
require('dotenv').config({ path: path.join(__dirname, '.env') });
const express = require('express');
const cors = require('cors');
const cookieParser = require('cookie-parser');
const jwt = require('jsonwebtoken');
// Debug environment variables
console.log('Environment variables loaded from:', path.join(__dirname, '.env'));
console.log('Current directory:', __dirname);
console.log('Available env vars:', Object.keys(process.env));
const app = express();
const PORT = process.env.PORT || 3003;
const JWT_SECRET = process.env.JWT_SECRET;
const DASHBOARD_PASSWORD = process.env.DASHBOARD_PASSWORD;
// Validate required environment variables
if (!JWT_SECRET || !DASHBOARD_PASSWORD) {
console.error('Missing required environment variables:');
if (!JWT_SECRET) console.error('- JWT_SECRET');
if (!DASHBOARD_PASSWORD) console.error('- DASHBOARD_PASSWORD');
process.exit(1);
}
// Middleware
app.use(express.json());
app.use(cookieParser());
// Configure CORS
const corsOptions = {
origin: function(origin, callback) {
const allowedOrigins = [
'http://localhost:3000',
'https://dashboard.kent.pw'
];
console.log('CORS check for origin:', origin);
// Allow local network IPs (192.168.1.xxx)
if (origin && origin.match(/^http:\/\/192\.168\.1\.\d{1,3}(:\d+)?$/)) {
callback(null, true);
return;
}
// Check if origin is in allowed list
if (!origin || allowedOrigins.indexOf(origin) !== -1) {
callback(null, true);
} else {
callback(new Error('Not allowed by CORS'));
}
},
credentials: true,
methods: ['GET', 'POST', 'OPTIONS'],
allowedHeaders: ['Content-Type', 'Authorization', 'Cookie', 'Accept'],
exposedHeaders: ['Set-Cookie']
};
app.use(cors(corsOptions));
app.options('*', cors(corsOptions));
// Debug logging
app.use((req, res, next) => {
console.log(`${new Date().toISOString()} ${req.method} ${req.url}`);
console.log('Headers:', req.headers);
console.log('Cookies:', req.cookies);
next();
});
// Health check endpoint
app.get('/health', (req, res) => {
res.json({
status: 'ok',
timestamp: new Date().toISOString()
});
});
// Auth endpoints
app.post('/login', (req, res) => {
console.log('Login attempt received');
console.log('Request body:', req.body);
console.log('Origin:', req.headers.origin);
const { password } = req.body;
if (!password) {
console.log('No password provided');
return res.status(400).json({
success: false,
message: 'Password is required'
});
}
console.log('Comparing passwords...');
console.log('Provided password length:', password.length);
console.log('Expected password length:', DASHBOARD_PASSWORD.length);
if (password === DASHBOARD_PASSWORD) {
console.log('Password matched');
const token = jwt.sign({ authorized: true }, JWT_SECRET, {
expiresIn: '24h'
});
// Determine if request is from local network
const isLocalNetwork = req.headers.origin?.includes('192.168.1.') || req.headers.origin?.includes('localhost');
const cookieOptions = {
httpOnly: true,
secure: !isLocalNetwork, // Only use secure for non-local requests
sameSite: isLocalNetwork ? 'lax' : 'none',
path: '/',
maxAge: 24 * 60 * 60 * 1000 // 24 hours
};
// Only set domain for production
if (!isLocalNetwork) {
cookieOptions.domain = '.kent.pw';
}
console.log('Setting cookie with options:', cookieOptions);
res.cookie('token', token, cookieOptions);
console.log('Response headers:', res.getHeaders());
res.json({
success: true,
debug: {
origin: req.headers.origin,
cookieOptions
}
});
} else {
console.log('Password mismatch');
res.status(401).json({
success: false,
message: 'Invalid password'
});
}
});
// Modify the check endpoint to log more info
app.get('/check', (req, res) => {
console.log('Auth check received');
console.log('All cookies:', req.cookies);
console.log('Headers:', req.headers);
const token = req.cookies.token;
if (!token) {
console.log('No token found in cookies');
return res.status(401).json({
authenticated: false,
error: 'no_token'
});
}
try {
const decoded = jwt.verify(token, JWT_SECRET);
console.log('Token verified successfully:', decoded);
res.json({ authenticated: true });
} catch (err) {
console.log('Token verification failed:', err.message);
res.status(401).json({
authenticated: false,
error: 'invalid_token',
message: err.message
});
}
});
app.post('/logout', (req, res) => {
const isLocalNetwork = req.headers.origin?.includes('192.168.1.') || req.headers.origin?.includes('localhost');
const cookieOptions = {
httpOnly: true,
secure: !isLocalNetwork,
sameSite: isLocalNetwork ? 'lax' : 'none',
path: '/',
domain: isLocalNetwork ? undefined : '.kent.pw'
};
console.log('Clearing cookie with options:', cookieOptions);
res.clearCookie('token', cookieOptions);
res.json({ success: true });
});
// Error handling middleware
app.use((err, req, res, next) => {
console.error('Server error:', err);
res.status(500).json({
success: false,
message: 'Internal server error',
error: err.message
});
});
// Start server
app.listen(PORT, () => {
console.log(`Auth server running on port ${PORT}`);
console.log('Environment:', process.env.NODE_ENV);
console.log('CORS origins:', corsOptions.origin);
console.log('JWT_SECRET length:', JWT_SECRET?.length);
console.log('DASHBOARD_PASSWORD length:', DASHBOARD_PASSWORD?.length);
});

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,22 @@
{
"name": "auth-server",
"version": "1.0.0",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"keywords": [],
"author": "",
"license": "ISC",
"description": "",
"dependencies": {
"cookie-parser": "^1.4.7",
"cors": "^2.8.5",
"date-fns": "^4.1.0",
"date-fns-tz": "^3.2.0",
"dotenv": "^16.4.7",
"express": "^4.21.1",
"express-session": "^1.18.1",
"jsonwebtoken": "^9.0.2"
}
}

View File

@@ -0,0 +1 @@
/etc/nginx/sites-enabled/dashboard.conf

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,21 @@
{
"name": "google-analytics-server",
"version": "1.0.0",
"description": "Google Analytics server for dashboard",
"main": "server.js",
"scripts": {
"start": "node server.js",
"dev": "nodemon server.js"
},
"dependencies": {
"@google-analytics/data": "^4.0.0",
"cors": "^2.8.5",
"dotenv": "^16.3.1",
"express": "^4.18.2",
"redis": "^4.6.11",
"winston": "^3.11.0"
},
"devDependencies": {
"nodemon": "^3.0.2"
}
}

View File

@@ -0,0 +1,254 @@
const express = require('express');
const { BetaAnalyticsDataClient } = require('@google-analytics/data');
const router = express.Router();
const logger = require('../utils/logger');
// Initialize GA4 client
const analyticsClient = new BetaAnalyticsDataClient({
credentials: JSON.parse(process.env.GOOGLE_APPLICATION_CREDENTIALS_JSON)
});
const propertyId = process.env.GA_PROPERTY_ID;
// Cache durations
const CACHE_DURATIONS = {
REALTIME_BASIC: 60, // 1 minute
REALTIME_DETAILED: 300, // 5 minutes
BASIC_METRICS: 3600, // 1 hour
USER_BEHAVIOR: 3600 // 1 hour
};
// Basic metrics endpoint
router.get('/metrics', async (req, res) => {
try {
const { startDate = '7daysAgo' } = req.query;
const cacheKey = `analytics:basic_metrics:${startDate}`;
// Check Redis cache
const cachedData = await req.redisClient.get(cacheKey);
if (cachedData) {
logger.info('Returning cached basic metrics data');
return res.json({ success: true, data: JSON.parse(cachedData) });
}
// Fetch from GA4
const [response] = await analyticsClient.runReport({
property: `properties/${propertyId}`,
dateRanges: [{ startDate, endDate: 'today' }],
dimensions: [{ name: 'date' }],
metrics: [
{ name: 'activeUsers' },
{ name: 'newUsers' },
{ name: 'averageSessionDuration' },
{ name: 'screenPageViews' },
{ name: 'bounceRate' },
{ name: 'conversions' }
],
returnPropertyQuota: true
});
// Cache the response
await req.redisClient.set(cacheKey, JSON.stringify(response), {
EX: CACHE_DURATIONS.BASIC_METRICS
});
res.json({ success: true, data: response });
} catch (error) {
logger.error('Error fetching basic metrics:', error);
res.status(500).json({ success: false, error: error.message });
}
});
// Realtime basic data endpoint
router.get('/realtime/basic', async (req, res) => {
try {
const cacheKey = 'analytics:realtime:basic';
// Check Redis cache
const cachedData = await req.redisClient.get(cacheKey);
if (cachedData) {
logger.info('Returning cached realtime basic data');
return res.json({ success: true, data: JSON.parse(cachedData) });
}
// Fetch active users
const [userResponse] = await analyticsClient.runRealtimeReport({
property: `properties/${propertyId}`,
metrics: [{ name: 'activeUsers' }],
returnPropertyQuota: true
});
// Fetch last 5 minutes
const [fiveMinResponse] = await analyticsClient.runRealtimeReport({
property: `properties/${propertyId}`,
metrics: [{ name: 'activeUsers' }],
minuteRanges: [{ startMinutesAgo: 5, endMinutesAgo: 0 }]
});
// Fetch time series data
const [timeSeriesResponse] = await analyticsClient.runRealtimeReport({
property: `properties/${propertyId}`,
dimensions: [{ name: 'minutesAgo' }],
metrics: [{ name: 'activeUsers' }]
});
const response = {
userResponse,
fiveMinResponse,
timeSeriesResponse,
quotaInfo: {
projectHourly: userResponse.propertyQuota.tokensPerProjectPerHour,
daily: userResponse.propertyQuota.tokensPerDay,
serverErrors: userResponse.propertyQuota.serverErrorsPerProjectPerHour,
thresholdedRequests: userResponse.propertyQuota.potentiallyThresholdedRequestsPerHour
}
};
// Cache the response
await req.redisClient.set(cacheKey, JSON.stringify(response), {
EX: CACHE_DURATIONS.REALTIME_BASIC
});
res.json({ success: true, data: response });
} catch (error) {
logger.error('Error fetching realtime basic data:', error);
res.status(500).json({ success: false, error: error.message });
}
});
// Realtime detailed data endpoint
router.get('/realtime/detailed', async (req, res) => {
try {
const cacheKey = 'analytics:realtime:detailed';
// Check Redis cache
const cachedData = await req.redisClient.get(cacheKey);
if (cachedData) {
logger.info('Returning cached realtime detailed data');
return res.json({ success: true, data: JSON.parse(cachedData) });
}
// Fetch current pages
const [pageResponse] = await analyticsClient.runRealtimeReport({
property: `properties/${propertyId}`,
dimensions: [{ name: 'unifiedScreenName' }],
metrics: [{ name: 'screenPageViews' }],
orderBy: [{ metric: { metricName: 'screenPageViews' }, desc: true }],
limit: 25
});
// Fetch events
const [eventResponse] = await analyticsClient.runRealtimeReport({
property: `properties/${propertyId}`,
dimensions: [{ name: 'eventName' }],
metrics: [{ name: 'eventCount' }],
orderBy: [{ metric: { metricName: 'eventCount' }, desc: true }],
limit: 25
});
// Fetch device categories
const [deviceResponse] = await analyticsClient.runRealtimeReport({
property: `properties/${propertyId}`,
dimensions: [{ name: 'deviceCategory' }],
metrics: [{ name: 'activeUsers' }],
orderBy: [{ metric: { metricName: 'activeUsers' }, desc: true }],
limit: 10,
returnPropertyQuota: true
});
const response = {
pageResponse,
eventResponse,
sourceResponse: deviceResponse
};
// Cache the response
await req.redisClient.set(cacheKey, JSON.stringify(response), {
EX: CACHE_DURATIONS.REALTIME_DETAILED
});
res.json({ success: true, data: response });
} catch (error) {
logger.error('Error fetching realtime detailed data:', error);
res.status(500).json({ success: false, error: error.message });
}
});
// User behavior endpoint
router.get('/user-behavior', async (req, res) => {
try {
const { timeRange = '30' } = req.query;
const cacheKey = `analytics:user_behavior:${timeRange}`;
// Check Redis cache
const cachedData = await req.redisClient.get(cacheKey);
if (cachedData) {
logger.info('Returning cached user behavior data');
return res.json({ success: true, data: JSON.parse(cachedData) });
}
// Fetch page data
const [pageResponse] = await analyticsClient.runReport({
property: `properties/${propertyId}`,
dateRanges: [{ startDate: `${timeRange}daysAgo`, endDate: 'today' }],
dimensions: [{ name: 'pagePath' }],
metrics: [
{ name: 'screenPageViews' },
{ name: 'averageSessionDuration' },
{ name: 'bounceRate' },
{ name: 'sessions' }
],
orderBy: [{
metric: { metricName: 'screenPageViews' },
desc: true
}],
limit: 25
});
// Fetch device data
const [deviceResponse] = await analyticsClient.runReport({
property: `properties/${propertyId}`,
dateRanges: [{ startDate: `${timeRange}daysAgo`, endDate: 'today' }],
dimensions: [{ name: 'deviceCategory' }],
metrics: [
{ name: 'screenPageViews' },
{ name: 'sessions' }
]
});
// Fetch source data
const [sourceResponse] = await analyticsClient.runReport({
property: `properties/${propertyId}`,
dateRanges: [{ startDate: `${timeRange}daysAgo`, endDate: 'today' }],
dimensions: [{ name: 'sessionSource' }],
metrics: [
{ name: 'sessions' },
{ name: 'conversions' }
],
orderBy: [{
metric: { metricName: 'sessions' },
desc: true
}],
limit: 25,
returnPropertyQuota: true
});
const response = {
pageResponse,
deviceResponse,
sourceResponse
};
// Cache the response
await req.redisClient.set(cacheKey, JSON.stringify(response), {
EX: CACHE_DURATIONS.USER_BEHAVIOR
});
res.json({ success: true, data: response });
} catch (error) {
logger.error('Error fetching user behavior data:', error);
res.status(500).json({ success: false, error: error.message });
}
});
module.exports = router;

View File

@@ -0,0 +1,91 @@
const express = require('express');
const router = express.Router();
const analyticsService = require('../services/analytics.service');
// Basic metrics endpoint
router.get('/metrics', async (req, res) => {
try {
const { startDate = '7daysAgo' } = req.query;
console.log(`Fetching metrics with startDate: ${startDate}`);
const data = await analyticsService.getBasicMetrics(startDate);
res.json({ success: true, data });
} catch (error) {
console.error('Metrics error:', {
startDate: req.query.startDate,
error: error.message,
stack: error.stack
});
res.status(500).json({
success: false,
error: 'Failed to fetch metrics',
details: error.message
});
}
});
// Realtime basic data endpoint
router.get('/realtime/basic', async (req, res) => {
try {
console.log('Fetching realtime basic data');
const data = await analyticsService.getRealTimeBasicData();
res.json({ success: true, data });
} catch (error) {
console.error('Realtime basic error:', {
error: error.message,
stack: error.stack
});
res.status(500).json({
success: false,
error: 'Failed to fetch realtime basic data',
details: error.message
});
}
});
// Realtime detailed data endpoint
router.get('/realtime/detailed', async (req, res) => {
try {
console.log('Fetching realtime detailed data');
const data = await analyticsService.getRealTimeDetailedData();
res.json({ success: true, data });
} catch (error) {
console.error('Realtime detailed error:', {
error: error.message,
stack: error.stack
});
res.status(500).json({
success: false,
error: 'Failed to fetch realtime detailed data',
details: error.message
});
}
});
// User behavior endpoint
router.get('/user-behavior', async (req, res) => {
try {
const { timeRange = '30' } = req.query;
console.log(`Fetching user behavior with timeRange: ${timeRange}`);
const data = await analyticsService.getUserBehavior(timeRange);
res.json({ success: true, data });
} catch (error) {
console.error('User behavior error:', {
timeRange: req.query.timeRange,
error: error.message,
stack: error.stack
});
res.status(500).json({
success: false,
error: 'Failed to fetch user behavior data',
details: error.message
});
}
});
module.exports = router;

View File

@@ -0,0 +1,65 @@
const express = require('express');
const cors = require('cors');
const { createClient } = require('redis');
const analyticsRoutes = require('./routes/analytics.routes');
const app = express();
const port = process.env.GOOGLE_ANALYTICS_PORT || 3007;
// Redis client setup
const redisClient = createClient({
url: process.env.REDIS_URL || 'redis://localhost:6379'
});
redisClient.on('error', (err) => console.error('Redis Client Error:', err));
redisClient.on('connect', () => console.log('Redis Client Connected'));
// Connect to Redis
(async () => {
try {
await redisClient.connect();
} catch (err) {
console.error('Redis connection error:', err);
}
})();
// Middleware
app.use(cors());
app.use(express.json());
// Make Redis client available in requests
app.use((req, res, next) => {
req.redisClient = redisClient;
next();
});
// Routes
app.use('/api/analytics', analyticsRoutes);
// Error handling middleware
app.use((err, req, res, next) => {
console.error('Server error:', err);
res.status(err.status || 500).json({
success: false,
message: err.message || 'Internal server error',
error: process.env.NODE_ENV === 'production' ? err : {}
});
});
// Start server
app.listen(port, () => {
console.log(`Google Analytics server running on port ${port}`);
});
// Handle graceful shutdown
process.on('SIGTERM', async () => {
console.log('SIGTERM received. Shutting down gracefully...');
await redisClient.quit();
process.exit(0);
});
process.on('SIGINT', async () => {
console.log('SIGINT received. Shutting down gracefully...');
await redisClient.quit();
process.exit(0);
});

View File

@@ -0,0 +1,283 @@
const { BetaAnalyticsDataClient } = require('@google-analytics/data');
const { createClient } = require('redis');
class AnalyticsService {
constructor() {
// Initialize Redis client
this.redis = createClient({
url: process.env.REDIS_URL || 'redis://localhost:6379'
});
this.redis.on('error', err => console.error('Redis Client Error:', err));
this.redis.connect().catch(err => console.error('Redis connection error:', err));
try {
// Initialize GA4 client
const credentials = process.env.GOOGLE_APPLICATION_CREDENTIALS_JSON;
this.analyticsClient = new BetaAnalyticsDataClient({
credentials: typeof credentials === 'string' ? JSON.parse(credentials) : credentials
});
this.propertyId = process.env.GA_PROPERTY_ID;
} catch (error) {
console.error('Failed to initialize GA4 client:', error);
throw error;
}
}
// Cache durations
CACHE_DURATIONS = {
REALTIME_BASIC: 60, // 1 minute
REALTIME_DETAILED: 300, // 5 minutes
BASIC_METRICS: 3600, // 1 hour
USER_BEHAVIOR: 3600 // 1 hour
};
async getBasicMetrics(startDate = '7daysAgo') {
const cacheKey = `analytics:basic_metrics:${startDate}`;
try {
// Try Redis first
const cachedData = await this.redis.get(cacheKey);
if (cachedData) {
console.log('Analytics metrics found in Redis cache');
return JSON.parse(cachedData);
}
// Fetch from GA4
console.log('Fetching fresh metrics data from GA4');
const [response] = await this.analyticsClient.runReport({
property: `properties/${this.propertyId}`,
dateRanges: [{ startDate, endDate: 'today' }],
dimensions: [{ name: 'date' }],
metrics: [
{ name: 'activeUsers' },
{ name: 'newUsers' },
{ name: 'averageSessionDuration' },
{ name: 'screenPageViews' },
{ name: 'bounceRate' },
{ name: 'conversions' }
],
returnPropertyQuota: true
});
// Cache the response
await this.redis.set(cacheKey, JSON.stringify(response), {
EX: this.CACHE_DURATIONS.BASIC_METRICS
});
return response;
} catch (error) {
console.error('Error fetching analytics metrics:', {
error: error.message,
stack: error.stack
});
throw error;
}
}
async getRealTimeBasicData() {
const cacheKey = 'analytics:realtime:basic';
try {
// Try Redis first
const cachedData = await this.redis.get(cacheKey);
if (cachedData) {
console.log('Realtime basic data found in Redis cache');
return JSON.parse(cachedData);
}
console.log('Fetching fresh realtime data from GA4');
// Fetch active users
const [userResponse] = await this.analyticsClient.runRealtimeReport({
property: `properties/${this.propertyId}`,
metrics: [{ name: 'activeUsers' }],
returnPropertyQuota: true
});
// Fetch last 5 minutes
const [fiveMinResponse] = await this.analyticsClient.runRealtimeReport({
property: `properties/${this.propertyId}`,
metrics: [{ name: 'activeUsers' }],
minuteRanges: [{ startMinutesAgo: 5, endMinutesAgo: 0 }]
});
// Fetch time series data
const [timeSeriesResponse] = await this.analyticsClient.runRealtimeReport({
property: `properties/${this.propertyId}`,
dimensions: [{ name: 'minutesAgo' }],
metrics: [{ name: 'activeUsers' }]
});
const response = {
userResponse,
fiveMinResponse,
timeSeriesResponse,
quotaInfo: {
projectHourly: userResponse.propertyQuota.tokensPerProjectPerHour,
daily: userResponse.propertyQuota.tokensPerDay,
serverErrors: userResponse.propertyQuota.serverErrorsPerProjectPerHour,
thresholdedRequests: userResponse.propertyQuota.potentiallyThresholdedRequestsPerHour
}
};
// Cache the response
await this.redis.set(cacheKey, JSON.stringify(response), {
EX: this.CACHE_DURATIONS.REALTIME_BASIC
});
return response;
} catch (error) {
console.error('Error fetching realtime basic data:', {
error: error.message,
stack: error.stack
});
throw error;
}
}
async getRealTimeDetailedData() {
const cacheKey = 'analytics:realtime:detailed';
try {
// Try Redis first
const cachedData = await this.redis.get(cacheKey);
if (cachedData) {
console.log('Realtime detailed data found in Redis cache');
return JSON.parse(cachedData);
}
console.log('Fetching fresh realtime detailed data from GA4');
// Fetch current pages
const [pageResponse] = await this.analyticsClient.runRealtimeReport({
property: `properties/${this.propertyId}`,
dimensions: [{ name: 'unifiedScreenName' }],
metrics: [{ name: 'screenPageViews' }],
orderBy: [{ metric: { metricName: 'screenPageViews' }, desc: true }],
limit: 25
});
// Fetch events
const [eventResponse] = await this.analyticsClient.runRealtimeReport({
property: `properties/${this.propertyId}`,
dimensions: [{ name: 'eventName' }],
metrics: [{ name: 'eventCount' }],
orderBy: [{ metric: { metricName: 'eventCount' }, desc: true }],
limit: 25
});
// Fetch device categories
const [deviceResponse] = await this.analyticsClient.runRealtimeReport({
property: `properties/${this.propertyId}`,
dimensions: [{ name: 'deviceCategory' }],
metrics: [{ name: 'activeUsers' }],
orderBy: [{ metric: { metricName: 'activeUsers' }, desc: true }],
limit: 10,
returnPropertyQuota: true
});
const response = {
pageResponse,
eventResponse,
sourceResponse: deviceResponse
};
// Cache the response
await this.redis.set(cacheKey, JSON.stringify(response), {
EX: this.CACHE_DURATIONS.REALTIME_DETAILED
});
return response;
} catch (error) {
console.error('Error fetching realtime detailed data:', {
error: error.message,
stack: error.stack
});
throw error;
}
}
async getUserBehavior(timeRange = '30') {
const cacheKey = `analytics:user_behavior:${timeRange}`;
try {
// Try Redis first
const cachedData = await this.redis.get(cacheKey);
if (cachedData) {
console.log('User behavior data found in Redis cache');
return JSON.parse(cachedData);
}
console.log('Fetching fresh user behavior data from GA4');
// Fetch page data
const [pageResponse] = await this.analyticsClient.runReport({
property: `properties/${this.propertyId}`,
dateRanges: [{ startDate: `${timeRange}daysAgo`, endDate: 'today' }],
dimensions: [{ name: 'pagePath' }],
metrics: [
{ name: 'screenPageViews' },
{ name: 'averageSessionDuration' },
{ name: 'bounceRate' },
{ name: 'sessions' }
],
orderBy: [{
metric: { metricName: 'screenPageViews' },
desc: true
}],
limit: 25
});
// Fetch device data
const [deviceResponse] = await this.analyticsClient.runReport({
property: `properties/${this.propertyId}`,
dateRanges: [{ startDate: `${timeRange}daysAgo`, endDate: 'today' }],
dimensions: [{ name: 'deviceCategory' }],
metrics: [
{ name: 'screenPageViews' },
{ name: 'sessions' }
]
});
// Fetch source data
const [sourceResponse] = await this.analyticsClient.runReport({
property: `properties/${this.propertyId}`,
dateRanges: [{ startDate: `${timeRange}daysAgo`, endDate: 'today' }],
dimensions: [{ name: 'sessionSource' }],
metrics: [
{ name: 'sessions' },
{ name: 'conversions' }
],
orderBy: [{
metric: { metricName: 'sessions' },
desc: true
}],
limit: 25,
returnPropertyQuota: true
});
const response = {
pageResponse,
deviceResponse,
sourceResponse
};
// Cache the response
await this.redis.set(cacheKey, JSON.stringify(response), {
EX: this.CACHE_DURATIONS.USER_BEHAVIOR
});
return response;
} catch (error) {
console.error('Error fetching user behavior data:', {
error: error.message,
stack: error.stack
});
throw error;
}
}
}
module.exports = new AnalyticsService();

View File

@@ -0,0 +1,35 @@
const winston = require('winston');
const path = require('path');
const logger = winston.createLogger({
level: process.env.LOG_LEVEL || 'info',
format: winston.format.combine(
winston.format.timestamp(),
winston.format.json()
),
transports: [
new winston.transports.File({
filename: path.join(__dirname, '../logs/pm2/error.log'),
level: 'error',
maxsize: 10485760, // 10MB
maxFiles: 5
}),
new winston.transports.File({
filename: path.join(__dirname, '../logs/pm2/combined.log'),
maxsize: 10485760, // 10MB
maxFiles: 5
})
]
});
// Add console transport in development
if (process.env.NODE_ENV !== 'production') {
logger.add(new winston.transports.Console({
format: winston.format.combine(
winston.format.colorize(),
winston.format.simple()
)
}));
}
module.exports = logger;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,19 @@
{
"name": "gorgias-server",
"version": "1.0.0",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"keywords": [],
"author": "",
"license": "ISC",
"description": "",
"dependencies": {
"axios": "^1.7.9",
"cors": "^2.8.5",
"dotenv": "^16.4.7",
"express": "^4.21.2",
"redis": "^4.7.0"
}
}

View File

@@ -0,0 +1,119 @@
const express = require('express');
const router = express.Router();
const gorgiasService = require('../services/gorgias.service');
// Get statistics
router.post('/stats/:name', async (req, res) => {
try {
const { name } = req.params;
const filters = req.body;
console.log(`Fetching ${name} statistics with filters:`, filters);
if (!name) {
return res.status(400).json({
error: 'Missing statistic name',
details: 'The name parameter is required'
});
}
const data = await gorgiasService.getStatistics(name, filters);
if (!data) {
return res.status(404).json({
error: 'No data found',
details: `No statistics found for ${name}`
});
}
res.json({ data });
} catch (error) {
console.error('Statistics error:', {
name: req.params.name,
filters: req.body,
error: error.message,
stack: error.stack,
response: error.response?.data
});
// Handle specific error cases
if (error.response?.status === 401) {
return res.status(401).json({
error: 'Authentication failed',
details: 'Invalid Gorgias API credentials'
});
}
if (error.response?.status === 404) {
return res.status(404).json({
error: 'Not found',
details: `Statistics type '${req.params.name}' not found`
});
}
if (error.response?.status === 400) {
return res.status(400).json({
error: 'Invalid request',
details: error.response?.data?.message || 'The request was invalid',
data: error.response?.data
});
}
res.status(500).json({
error: 'Failed to fetch statistics',
details: error.response?.data?.message || error.message,
data: error.response?.data
});
}
});
// Get tickets
router.get('/tickets', async (req, res) => {
try {
const data = await gorgiasService.getTickets(req.query);
res.json(data);
} catch (error) {
console.error('Tickets error:', {
params: req.query,
error: error.message,
response: error.response?.data
});
if (error.response?.status === 401) {
return res.status(401).json({
error: 'Authentication failed',
details: 'Invalid Gorgias API credentials'
});
}
if (error.response?.status === 400) {
return res.status(400).json({
error: 'Invalid request',
details: error.response?.data?.message || 'The request was invalid',
data: error.response?.data
});
}
res.status(500).json({
error: 'Failed to fetch tickets',
details: error.response?.data?.message || error.message,
data: error.response?.data
});
}
});
// Get customer satisfaction
router.get('/satisfaction', async (req, res) => {
try {
const data = await gorgiasService.getCustomerSatisfaction(req.query);
res.json(data);
} catch (error) {
console.error('Satisfaction error:', error);
res.status(500).json({
error: 'Failed to fetch customer satisfaction',
details: error.response?.data || error.message
});
}
});
module.exports = router;

View File

@@ -0,0 +1,31 @@
const express = require('express');
const cors = require('cors');
const path = require('path');
require('dotenv').config({
path: path.resolve(__dirname, '.env')
});
const app = express();
const port = process.env.PORT || 3006;
app.use(cors());
app.use(express.json());
// Import routes
const gorgiasRoutes = require('./routes/gorgias.routes');
// Use routes
app.use('/api/gorgias', gorgiasRoutes);
// Error handling middleware
app.use((err, req, res, next) => {
console.error(err.stack);
res.status(500).json({ error: 'Something went wrong!' });
});
// Start server
app.listen(port, () => {
console.log(`Gorgias API server running on port ${port}`);
});
module.exports = app;

View File

@@ -0,0 +1,119 @@
const axios = require('axios');
const { createClient } = require('redis');
class GorgiasService {
constructor() {
this.redis = createClient({
url: process.env.REDIS_URL
});
this.redis.on('error', err => console.error('Redis Client Error:', err));
this.redis.connect().catch(err => console.error('Redis connection error:', err));
// Create base64 encoded auth string
const auth = Buffer.from(`${process.env.GORGIAS_API_USERNAME}:${process.env.GORGIAS_API_KEY}`).toString('base64');
this.apiClient = axios.create({
baseURL: `https://${process.env.GORGIAS_DOMAIN}.gorgias.com/api`,
headers: {
'Authorization': `Basic ${auth}`,
'Content-Type': 'application/json'
}
});
}
async getStatistics(name, filters = {}) {
const cacheKey = `gorgias:stats:${name}:${JSON.stringify(filters)}`;
try {
// Try Redis first
const cachedData = await this.redis.get(cacheKey);
if (cachedData) {
console.log(`Statistics ${name} found in Redis cache`);
return JSON.parse(cachedData);
}
console.log(`Fetching ${name} statistics with filters:`, filters);
// Convert dates to UTC midnight if not already set
if (!filters.start_datetime || !filters.end_datetime) {
const start = new Date(filters.start_datetime || filters.start_date);
start.setUTCHours(0, 0, 0, 0);
const end = new Date(filters.end_datetime || filters.end_date);
end.setUTCHours(23, 59, 59, 999);
filters = {
...filters,
start_datetime: start.toISOString(),
end_datetime: end.toISOString()
};
}
// Fetch from API
const response = await this.apiClient.post(`/stats/${name}`, filters);
const data = response.data;
// Save to Redis with 5 minute expiry
await this.redis.set(cacheKey, JSON.stringify(data), {
EX: 300 // 5 minutes
});
return data;
} catch (error) {
console.error(`Error in getStatistics for ${name}:`, {
error: error.message,
filters,
response: error.response?.data
});
throw error;
}
}
async getTickets(params = {}) {
const cacheKey = `gorgias:tickets:${JSON.stringify(params)}`;
try {
// Try Redis first
const cachedData = await this.redis.get(cacheKey);
if (cachedData) {
console.log('Tickets found in Redis cache');
return JSON.parse(cachedData);
}
// Convert dates to UTC midnight
const formattedParams = { ...params };
if (params.start_date) {
const start = new Date(params.start_date);
start.setUTCHours(0, 0, 0, 0);
formattedParams.start_datetime = start.toISOString();
delete formattedParams.start_date;
}
if (params.end_date) {
const end = new Date(params.end_date);
end.setUTCHours(23, 59, 59, 999);
formattedParams.end_datetime = end.toISOString();
delete formattedParams.end_date;
}
// Fetch from API
const response = await this.apiClient.get('/tickets', { params: formattedParams });
const data = response.data;
// Save to Redis with 5 minute expiry
await this.redis.set(cacheKey, JSON.stringify(data), {
EX: 300 // 5 minutes
});
return data;
} catch (error) {
console.error('Error fetching tickets:', {
error: error.message,
params,
response: error.response?.data
});
throw error;
}
}
}
module.exports = new GorgiasService();

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,25 @@
{
"name": "klaviyo-server",
"version": "1.0.0",
"description": "Klaviyo API integration server",
"main": "server.js",
"type": "module",
"scripts": {
"start": "node server.js",
"dev": "nodemon server.js"
},
"dependencies": {
"cors": "^2.8.5",
"dotenv": "^16.4.7",
"esm": "^3.2.25",
"express": "^4.18.2",
"express-rate-limit": "^7.5.0",
"ioredis": "^5.4.1",
"luxon": "^3.5.0",
"node-fetch": "^3.3.2",
"recharts": "^2.15.0"
},
"devDependencies": {
"nodemon": "^3.0.2"
}
}

View File

@@ -0,0 +1,71 @@
import express from 'express';
import { CampaignsService } from '../services/campaigns.service.js';
import { TimeManager } from '../utils/time.utils.js';
export function createCampaignsRouter(apiKey, apiRevision) {
const router = express.Router();
const timeManager = new TimeManager();
const campaignsService = new CampaignsService(apiKey, apiRevision);
// Get campaigns with optional filtering
router.get('/', async (req, res) => {
try {
const params = {
pageSize: parseInt(req.query.pageSize) || 50,
sort: req.query.sort || '-send_time',
status: req.query.status,
startDate: req.query.startDate,
endDate: req.query.endDate,
pageCursor: req.query.pageCursor
};
console.log('[Campaigns Route] Fetching campaigns with params:', params);
const data = await campaignsService.getCampaigns(params);
console.log('[Campaigns Route] Success:', {
count: data.data?.length || 0
});
res.json(data);
} catch (error) {
console.error('[Campaigns Route] Error:', error);
res.status(500).json({
status: 'error',
message: error.message,
details: error.response?.data || null
});
}
});
// Get campaigns by time range
router.get('/:timeRange', async (req, res) => {
try {
const { timeRange } = req.params;
const { status } = req.query;
let result;
if (timeRange === 'custom') {
const { startDate, endDate } = req.query;
if (!startDate || !endDate) {
return res.status(400).json({ error: 'Custom range requires startDate and endDate' });
}
result = await campaignsService.getCampaigns({
startDate,
endDate,
status
});
} else {
result = await campaignsService.getCampaignsByTimeRange(
timeRange,
{ status }
);
}
res.json(result);
} catch (error) {
console.error("[Campaigns Route] Error:", error);
res.status(500).json({ error: error.message });
}
});
return router;
}

View File

@@ -0,0 +1,480 @@
import express from 'express';
import { EventsService } from '../services/events.service.js';
import { TimeManager } from '../utils/time.utils.js';
import { RedisService } from '../services/redis.service.js';
// Import METRIC_IDS from events service
const METRIC_IDS = {
PLACED_ORDER: 'Y8cqcF',
SHIPPED_ORDER: 'VExpdL',
ACCOUNT_CREATED: 'TeeypV',
CANCELED_ORDER: 'YjVMNg',
NEW_BLOG_POST: 'YcxeDr',
PAYMENT_REFUNDED: 'R7XUYh'
};
export function createEventsRouter(apiKey, apiRevision) {
const router = express.Router();
const timeManager = new TimeManager();
const eventsService = new EventsService(apiKey, apiRevision);
const redisService = new RedisService();
// Get events with optional filtering
router.get('/', async (req, res) => {
try {
const params = {
pageSize: parseInt(req.query.pageSize) || 50,
sort: req.query.sort || '-datetime',
metricId: req.query.metricId,
startDate: req.query.startDate,
endDate: req.query.endDate,
pageCursor: req.query.pageCursor,
fields: {}
};
// Parse fields parameter if provided
if (req.query.fields) {
try {
params.fields = JSON.parse(req.query.fields);
} catch (e) {
console.warn('[Events Route] Invalid fields parameter:', e);
}
}
console.log('[Events Route] Fetching events with params:', params);
const data = await eventsService.getEvents(params);
console.log('[Events Route] Success:', {
count: data.data?.length || 0,
included: data.included?.length || 0
});
res.json(data);
} catch (error) {
console.error('[Events Route] Error:', error);
res.status(500).json({
status: 'error',
message: error.message,
details: error.response?.data || null
});
}
});
// Get events by time range
router.get('/by-time/:timeRange', async (req, res) => {
try {
const { timeRange } = req.params;
const { metricId, startDate, endDate } = req.query;
let result;
if (timeRange === 'custom') {
if (!startDate || !endDate) {
return res.status(400).json({ error: 'Custom range requires startDate and endDate' });
}
const range = timeManager.getCustomRange(startDate, endDate);
if (!range) {
return res.status(400).json({ error: 'Invalid date range' });
}
result = await eventsService.getEvents({
metricId,
startDate: range.start.toISO(),
endDate: range.end.toISO()
});
} else {
result = await eventsService.getEventsByTimeRange(
timeRange,
{ metricId }
);
}
res.json(result);
} catch (error) {
console.error("[Events Route] Error:", error);
res.status(500).json({ error: error.message });
}
});
// Get comprehensive statistics for a time period
router.get('/stats', async (req, res) => {
try {
const { timeRange, startDate, endDate } = req.query;
console.log('[Events Route] Stats request:', {
timeRange,
startDate,
endDate
});
let range;
if (startDate && endDate) {
range = timeManager.getCustomRange(startDate, endDate);
} else if (timeRange) {
range = timeManager.getDateRange(timeRange);
} else {
return res.status(400).json({ error: 'Must provide either timeRange or startDate and endDate' });
}
if (!range) {
return res.status(400).json({ error: 'Invalid time range' });
}
const params = {
timeRange,
startDate: range.start.toISO(),
endDate: range.end.toISO()
};
console.log('[Events Route] Calculating period stats with params:', params);
const stats = await eventsService.calculatePeriodStats(params);
console.log('[Events Route] Stats response:', {
timeRange: {
start: range.start.toISO(),
end: range.end.toISO()
},
shippedCount: stats?.shipping?.shippedCount,
totalOrders: stats?.orderCount
});
res.json({
timeRange: {
start: range.start.toISO(),
end: range.end.toISO(),
displayStart: timeManager.formatForDisplay(range.start),
displayEnd: timeManager.formatForDisplay(range.end)
},
stats
});
} catch (error) {
console.error("[Events Route] Error:", error);
res.status(500).json({ error: error.message });
}
});
// Add new route for smart revenue projection
router.get('/projection', async (req, res) => {
try {
const { timeRange, startDate, endDate } = req.query;
console.log('[Events Route] Projection request:', {
timeRange,
startDate,
endDate
});
let range;
if (startDate && endDate) {
range = timeManager.getCustomRange(startDate, endDate);
} else if (timeRange) {
range = timeManager.getDateRange(timeRange);
} else {
return res.status(400).json({ error: 'Must provide either timeRange or startDate and endDate' });
}
if (!range) {
return res.status(400).json({ error: 'Invalid time range' });
}
const params = {
timeRange,
startDate: range.start.toISO(),
endDate: range.end.toISO()
};
// Try to get from cache first with a short TTL
const cacheKey = redisService._getCacheKey('projection', params);
const cachedData = await redisService.get(cacheKey);
if (cachedData) {
console.log('[Events Route] Cache hit for projection');
return res.json(cachedData);
}
console.log('[Events Route] Calculating smart projection with params:', params);
const projection = await eventsService.calculateSmartProjection(params);
// Cache the results with a short TTL (5 minutes)
await redisService.set(cacheKey, projection, 300);
res.json(projection);
} catch (error) {
console.error("[Events Route] Error calculating projection:", error);
res.status(500).json({ error: error.message });
}
});
// Add new route for detailed stats
router.get('/stats/details', async (req, res) => {
try {
const { timeRange, startDate, endDate, metric, daily = false } = req.query;
let range;
if (startDate && endDate) {
range = timeManager.getCustomRange(startDate, endDate);
} else if (timeRange) {
range = timeManager.getDateRange(timeRange);
} else {
return res.status(400).json({ error: 'Must provide either timeRange or startDate and endDate' });
}
if (!range) {
return res.status(400).json({ error: 'Invalid time range' });
}
const params = {
timeRange,
startDate: range.start.toISO(),
endDate: range.end.toISO(),
metric,
daily: daily === 'true' || daily === true
};
// Try to get from cache first
const cacheKey = redisService._getCacheKey('stats:details', params);
const cachedData = await redisService.get(cacheKey);
if (cachedData) {
console.log('[Events Route] Cache hit for detailed stats');
return res.json({
timeRange: {
start: range.start.toISO(),
end: range.end.toISO(),
displayStart: timeManager.formatForDisplay(range.start),
displayEnd: timeManager.formatForDisplay(range.end)
},
stats: cachedData
});
}
const stats = await eventsService.calculateDetailedStats(params);
// Cache the results
const ttl = redisService._getTTL(timeRange);
await redisService.set(cacheKey, stats, ttl);
res.json({
timeRange: {
start: range.start.toISO(),
end: range.end.toISO(),
displayStart: timeManager.formatForDisplay(range.start),
displayEnd: timeManager.formatForDisplay(range.end)
},
stats
});
} catch (error) {
console.error("[Events Route] Error:", error);
res.status(500).json({ error: error.message });
}
});
// Get product statistics for a time period
router.get('/products', async (req, res) => {
try {
const { timeRange, startDate, endDate } = req.query;
let range;
if (startDate && endDate) {
range = timeManager.getCustomRange(startDate, endDate);
} else if (timeRange) {
range = timeManager.getDateRange(timeRange);
} else {
return res.status(400).json({ error: 'Must provide either timeRange or startDate and endDate' });
}
if (!range) {
return res.status(400).json({ error: 'Invalid time range' });
}
const params = {
timeRange,
startDate: range.start.toISO(),
endDate: range.end.toISO()
};
// Try to get from cache first
const cacheKey = redisService._getCacheKey('events', params);
const cachedData = await redisService.getEventData('products', params);
if (cachedData) {
console.log('[Events Route] Cache hit for products');
return res.json({
timeRange: {
start: range.start.toISO(),
end: range.end.toISO(),
displayStart: timeManager.formatForDisplay(range.start),
displayEnd: timeManager.formatForDisplay(range.end)
},
stats: {
products: cachedData
}
});
}
const stats = await eventsService.calculatePeriodStats(params);
res.json({
timeRange: {
start: range.start.toISO(),
end: range.end.toISO(),
displayStart: timeManager.formatForDisplay(range.start),
displayEnd: timeManager.formatForDisplay(range.end)
},
stats
});
} catch (error) {
console.error("[Events Route] Error:", error);
res.status(500).json({ error: error.message });
}
});
// Get event feed (multiple event types sorted by time)
router.get('/feed', async (req, res) => {
try {
const { timeRange, startDate, endDate, metricIds } = req.query;
let range;
if (startDate && endDate) {
range = timeManager.getCustomRange(startDate, endDate);
} else if (timeRange) {
range = timeManager.getDateRange(timeRange);
} else {
return res.status(400).json({ error: 'Must provide either timeRange or startDate and endDate' });
}
if (!range) {
return res.status(400).json({ error: 'Invalid time range' });
}
const params = {
timeRange,
startDate: range.start.toISO(),
endDate: range.end.toISO(),
metricIds: metricIds ? JSON.parse(metricIds) : null
};
const result = await eventsService.getMultiMetricEvents(params);
res.json({
timeRange: {
start: range.start.toISO(),
end: range.end.toISO(),
displayStart: timeManager.formatForDisplay(range.start),
displayEnd: timeManager.formatForDisplay(range.end)
},
...result
});
} catch (error) {
console.error("[Events Route] Error:", error);
res.status(500).json({ error: error.message });
}
});
// Get aggregated events data
router.get('/aggregate', async (req, res) => {
try {
const { timeRange, startDate, endDate, interval = 'day', metricId, property } = req.query;
let range;
if (startDate && endDate) {
range = timeManager.getCustomRange(startDate, endDate);
} else if (timeRange) {
range = timeManager.getDateRange(timeRange);
} else {
return res.status(400).json({ error: 'Must provide either timeRange or startDate and endDate' });
}
if (!range) {
return res.status(400).json({ error: 'Invalid time range' });
}
const params = {
timeRange,
startDate: range.start.toISO(),
endDate: range.end.toISO(),
metricId,
interval,
property
};
const result = await eventsService.getEvents(params);
const groupedData = timeManager.groupEventsByInterval(result.data, interval, property);
res.json({
timeRange: {
start: range.start.toISO(),
end: range.end.toISO(),
displayStart: timeManager.formatForDisplay(range.start),
displayEnd: timeManager.formatForDisplay(range.end)
},
data: groupedData
});
} catch (error) {
console.error("[Events Route] Error:", error);
res.status(500).json({ error: error.message });
}
});
// Get date range for a given time period
router.get("/dateRange", async (req, res) => {
try {
const { timeRange, startDate, endDate } = req.query;
let range;
if (startDate && endDate) {
range = timeManager.getCustomRange(startDate, endDate);
} else {
range = timeManager.getDateRange(timeRange || 'today');
}
if (!range) {
return res.status(400).json({
error: "Invalid time range parameters"
});
}
res.json({
start: range.start.toISO(),
end: range.end.toISO(),
displayStart: timeManager.formatForDisplay(range.start),
displayEnd: timeManager.formatForDisplay(range.end)
});
} catch (error) {
console.error('Error getting date range:', error);
res.status(500).json({
error: "Failed to get date range"
});
}
});
// Clear cache for a specific time range
router.post("/clearCache", async (req, res) => {
try {
const { timeRange, startDate, endDate } = req.body;
await redisService.clearCache({ timeRange, startDate, endDate });
res.json({ message: "Cache cleared successfully" });
} catch (error) {
console.error('Error clearing cache:', error);
res.status(500).json({ error: "Failed to clear cache" });
}
});
// Add new batch metrics endpoint
router.get('/batch', async (req, res) => {
try {
const { timeRange, startDate, endDate, metrics } = req.query;
// Parse metrics array from query
const metricsList = metrics ? JSON.parse(metrics) : [];
const params = timeRange === 'custom'
? { startDate, endDate, metrics: metricsList }
: { timeRange, metrics: metricsList };
const results = await eventsService.getBatchMetrics(params);
res.json(results);
} catch (error) {
console.error('[Events Route] Error in batch request:', error);
res.status(500).json({ error: error.message });
}
});
return router;
}

View File

@@ -0,0 +1,17 @@
import express from 'express';
import { createEventsRouter } from './events.routes.js';
import { createMetricsRoutes } from './metrics.routes.js';
import { createCampaignsRouter } from './campaigns.routes.js';
import { createReportingRouter } from './reporting.routes.js';
export function createApiRouter(apiKey, apiRevision) {
const router = express.Router();
// Mount routers
router.use('/events', createEventsRouter(apiKey, apiRevision));
router.use('/metrics', createMetricsRoutes(apiKey, apiRevision));
router.use('/campaigns', createCampaignsRouter(apiKey, apiRevision));
router.use('/reporting', createReportingRouter(apiKey, apiRevision));
return router;
}

View File

@@ -0,0 +1,29 @@
import express from 'express';
import { MetricsService } from '../services/metrics.service.js';
const router = express.Router();
export function createMetricsRoutes(apiKey, apiRevision) {
const metricsService = new MetricsService(apiKey, apiRevision);
// Get all metrics
router.get('/', async (req, res) => {
try {
console.log('[Metrics Route] Fetching metrics');
const data = await metricsService.getMetrics();
console.log('[Metrics Route] Success:', {
count: data.data?.length || 0
});
res.json(data);
} catch (error) {
console.error('[Metrics Route] Error:', error);
res.status(500).json({
status: 'error',
message: error.message,
details: error.response?.data || null
});
}
});
return router;
}

View File

@@ -0,0 +1,29 @@
import express from 'express';
import { ReportingService } from '../services/reporting.service.js';
import { TimeManager } from '../utils/time.utils.js';
export function createReportingRouter(apiKey, apiRevision) {
const router = express.Router();
const reportingService = new ReportingService(apiKey, apiRevision);
const timeManager = new TimeManager();
// Get campaign reports by time range
router.get('/campaigns/:timeRange', async (req, res) => {
try {
const { timeRange } = req.params;
const { channel } = req.query;
const reports = await reportingService.getCampaignReports({
timeRange,
channel
});
res.json(reports);
} catch (error) {
console.error('[ReportingRoutes] Error fetching campaign reports:', error);
res.status(500).json({ error: error.message });
}
});
return router;
}

View File

@@ -0,0 +1,78 @@
import express from 'express';
import cors from 'cors';
import dotenv from 'dotenv';
import rateLimit from 'express-rate-limit';
import { createApiRouter } from './routes/index.js';
import path from 'path';
import { fileURLToPath } from 'url';
// Get directory name in ES modules
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
// Load environment variables
const envPath = path.resolve(__dirname, '.env');
console.log('[Server] Loading .env file from:', envPath);
dotenv.config({ path: envPath });
// Debug environment variables (without exposing sensitive data)
console.log('[Server] Environment variables loaded:', {
REDIS_HOST: process.env.REDIS_HOST || '(not set)',
REDIS_PORT: process.env.REDIS_PORT || '(not set)',
REDIS_USERNAME: process.env.REDIS_USERNAME || '(not set)',
REDIS_PASSWORD: process.env.REDIS_PASSWORD ? '(set)' : '(not set)',
NODE_ENV: process.env.NODE_ENV || '(not set)',
});
const app = express();
const port = process.env.KLAVIYO_PORT || 3004;
// Rate limiting for reporting endpoints
const reportingLimiter = rateLimit({
windowMs: 10 * 60 * 1000, // 10 minutes
max: 10, // limit each IP to 10 requests per windowMs
message: 'Too many requests to reporting endpoint, please try again later',
keyGenerator: (req) => {
// Use a combination of IP and endpoint for more granular control
return `${req.ip}-reporting`;
},
skip: (req) => {
// Only apply to campaign-values-reports endpoint
return !req.path.includes('campaign-values-reports');
}
});
// Middleware
app.use(cors());
app.use(express.json());
// Debug middleware to log all requests
app.use((req, res, next) => {
console.log(`[${new Date().toISOString()}] ${req.method} ${req.url}`);
next();
});
// Apply rate limiting to reporting endpoints
app.use('/api/klaviyo/reporting', reportingLimiter);
// Create and mount API routes
const apiRouter = createApiRouter(
process.env.KLAVIYO_API_KEY,
process.env.KLAVIYO_API_REVISION || '2024-02-15'
);
app.use('/api/klaviyo', apiRouter);
// Error handling middleware
app.use((err, req, res, next) => {
console.error('Unhandled error:', err);
res.status(500).json({
status: 'error',
message: 'Internal server error',
details: process.env.NODE_ENV === 'development' ? err.message : undefined
});
});
// Start server
app.listen(port, '0.0.0.0', () => {
console.log(`Klaviyo server listening at http://0.0.0.0:${port}`);
});

View File

@@ -0,0 +1,206 @@
import fetch from 'node-fetch';
import { TimeManager } from '../utils/time.utils.js';
import { RedisService } from './redis.service.js';
export class CampaignsService {
constructor(apiKey, apiRevision) {
this.apiKey = apiKey;
this.apiRevision = apiRevision;
this.baseUrl = 'https://a.klaviyo.com/api';
this.timeManager = new TimeManager();
this.redisService = new RedisService();
}
async getCampaigns(params = {}) {
try {
// Add request debouncing
const requestKey = JSON.stringify(params);
if (this._pendingRequests && this._pendingRequests[requestKey]) {
return this._pendingRequests[requestKey];
}
// Try to get from cache first
const cacheKey = this.redisService._getCacheKey('campaigns', params);
let cachedData = null;
try {
cachedData = await this.redisService.get(`${cacheKey}:raw`);
if (cachedData) {
return cachedData;
}
} catch (cacheError) {
console.warn('[CampaignsService] Cache error:', cacheError);
}
this._pendingRequests = this._pendingRequests || {};
this._pendingRequests[requestKey] = (async () => {
let allCampaigns = [];
let nextCursor = params.pageCursor;
let pageCount = 0;
const filter = params.filter || this._buildFilter(params);
do {
const queryParams = new URLSearchParams();
if (filter) {
queryParams.append('filter', filter);
}
queryParams.append('sort', params.sort || '-send_time');
if (nextCursor) {
queryParams.append('page[cursor]', nextCursor);
}
const url = `${this.baseUrl}/campaigns?${queryParams.toString()}`;
try {
const response = await fetch(url, {
method: 'GET',
headers: {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': `Klaviyo-API-Key ${this.apiKey}`,
'revision': this.apiRevision
}
});
if (!response.ok) {
const errorData = await response.json();
console.error('[CampaignsService] API Error:', errorData);
throw new Error(`Klaviyo API error: ${response.status} ${response.statusText}`);
}
const responseData = await response.json();
allCampaigns = allCampaigns.concat(responseData.data || []);
pageCount++;
nextCursor = responseData.links?.next ?
new URL(responseData.links.next).searchParams.get('page[cursor]') : null;
if (nextCursor) {
await new Promise(resolve => setTimeout(resolve, 50));
}
} catch (fetchError) {
console.error('[CampaignsService] Fetch error:', fetchError);
throw fetchError;
}
} while (nextCursor);
const transformedCampaigns = this._transformCampaigns(allCampaigns);
const result = {
data: transformedCampaigns,
meta: {
total_count: transformedCampaigns.length,
page_count: pageCount
}
};
try {
const ttl = this.redisService._getTTL(params.timeRange);
await this.redisService.set(`${cacheKey}:raw`, result, ttl);
} catch (cacheError) {
console.warn('[CampaignsService] Cache set error:', cacheError);
}
delete this._pendingRequests[requestKey];
return result;
})();
return await this._pendingRequests[requestKey];
} catch (error) {
console.error('[CampaignsService] Error fetching campaigns:', error);
throw error;
}
}
_buildFilter(params) {
const filters = [];
if (params.startDate && params.endDate) {
const startUtc = this.timeManager.formatForAPI(params.startDate);
const endUtc = this.timeManager.formatForAPI(params.endDate);
filters.push(`greater-or-equal(send_time,${startUtc})`);
filters.push(`less-than(send_time,${endUtc})`);
}
if (params.status) {
filters.push(`equals(status,"${params.status}")`);
}
if (params.customFilters) {
filters.push(...params.customFilters);
}
return filters.length > 0 ? (filters.length > 1 ? `and(${filters.join(',')})` : filters[0]) : null;
}
async getCampaignsByTimeRange(timeRange, options = {}) {
const range = this.timeManager.getDateRange(timeRange);
if (!range) {
throw new Error('Invalid time range specified');
}
const params = {
timeRange,
startDate: range.start.toISO(),
endDate: range.end.toISO(),
...options
};
// Try to get from cache first
const cacheKey = this.redisService._getCacheKey('campaigns', params);
let cachedData = null;
try {
cachedData = await this.redisService.get(`${cacheKey}:raw`);
if (cachedData) {
return cachedData;
}
} catch (cacheError) {
console.warn('[CampaignsService] Cache error:', cacheError);
}
return this.getCampaigns(params);
}
_transformCampaigns(campaigns) {
if (!Array.isArray(campaigns)) {
console.warn('[CampaignsService] Campaigns is not an array:', campaigns);
return [];
}
return campaigns.map(campaign => {
try {
const stats = campaign.attributes?.campaign_message?.stats || {};
return {
id: campaign.id,
name: campaign.attributes?.name || "Unnamed Campaign",
subject: campaign.attributes?.campaign_message?.subject || "",
send_time: campaign.attributes?.send_time,
stats: {
delivery_rate: stats.delivery_rate || 0,
delivered: stats.delivered || 0,
recipients: stats.recipients || 0,
open_rate: stats.open_rate || 0,
opens_unique: stats.opens_unique || 0,
opens: stats.opens || 0,
clicks_unique: stats.clicks_unique || 0,
click_rate: stats.click_rate || 0,
click_to_open_rate: stats.click_to_open_rate || 0,
conversion_value: stats.conversion_value || 0,
conversion_uniques: stats.conversion_uniques || 0
}
};
} catch (error) {
console.error('[CampaignsService] Error transforming campaign:', error, campaign);
return {
id: campaign.id || 'unknown',
name: 'Error Processing Campaign',
stats: {}
};
}
});
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,38 @@
import fetch from 'node-fetch';
export class MetricsService {
constructor(apiKey, apiRevision) {
this.apiKey = apiKey;
this.apiRevision = apiRevision;
this.baseUrl = 'https://a.klaviyo.com/api';
}
async getMetrics() {
try {
const response = await fetch(`${this.baseUrl}/metrics/`, {
headers: {
'Authorization': `Klaviyo-API-Key ${this.apiKey}`,
'revision': this.apiRevision,
'Content-Type': 'application/json',
'Accept': 'application/json'
}
});
if (!response.ok) {
const errorData = await response.json();
console.error('[MetricsService] API Error:', errorData);
throw new Error(`Klaviyo API error: ${response.status} ${response.statusText}`);
}
const data = await response.json();
// Sort the results by name before returning
if (data.data) {
data.data.sort((a, b) => a.attributes.name.localeCompare(b.attributes.name));
}
return data;
} catch (error) {
console.error('[MetricsService] Error fetching metrics:', error);
throw error;
}
}
}

View File

@@ -0,0 +1,262 @@
import Redis from 'ioredis';
import { TimeManager } from '../utils/time.utils.js';
import dotenv from 'dotenv';
import path from 'path';
import { fileURLToPath } from 'url';
// Get directory name in ES modules
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
// Load environment variables again (redundant but safe)
const envPath = path.resolve(__dirname, '../.env');
console.log('[RedisService] Loading .env file from:', envPath);
dotenv.config({ path: envPath });
export class RedisService {
constructor() {
this.timeManager = new TimeManager();
this.DEFAULT_TTL = 5 * 60; // 5 minutes default TTL
this.isConnected = false;
this._initializeRedis();
}
_initializeRedis() {
try {
// Debug: Print all environment variables we're looking for
console.log('[RedisService] Environment variables state:', {
REDIS_HOST: process.env.REDIS_HOST ? '(set)' : '(not set)',
REDIS_PORT: process.env.REDIS_PORT ? '(set)' : '(not set)',
REDIS_USERNAME: process.env.REDIS_USERNAME ? '(set)' : '(not set)',
REDIS_PASSWORD: process.env.REDIS_PASSWORD ? '(set)' : '(not set)',
});
// Log Redis configuration (without password)
const host = process.env.REDIS_HOST || 'localhost';
const port = parseInt(process.env.REDIS_PORT) || 6379;
const username = process.env.REDIS_USERNAME || 'default';
const password = process.env.REDIS_PASSWORD;
console.log('[RedisService] Initializing Redis with config:', {
host,
port,
username,
hasPassword: !!password
});
const config = {
host,
port,
username,
retryStrategy: (times) => {
const delay = Math.min(times * 50, 2000);
return delay;
},
maxRetriesPerRequest: 3,
enableReadyCheck: true,
connectTimeout: 10000,
showFriendlyErrorStack: true,
retryUnfulfilled: true,
maxRetryAttempts: 5
};
// Only add password if it exists
if (password) {
console.log('[RedisService] Adding password to config');
config.password = password;
} else {
console.warn('[RedisService] No Redis password found in environment variables!');
}
this.client = new Redis(config);
// Handle connection events
this.client.on('connect', () => {
console.log('[RedisService] Connected to Redis');
this.isConnected = true;
});
this.client.on('ready', () => {
console.log('[RedisService] Redis is ready');
this.isConnected = true;
});
this.client.on('error', (err) => {
console.error('[RedisService] Redis error:', err);
this.isConnected = false;
// Log more details about the error
if (err.code === 'WRONGPASS') {
console.error('[RedisService] Authentication failed. Please check your Redis password.');
}
});
this.client.on('close', () => {
console.log('[RedisService] Redis connection closed');
this.isConnected = false;
});
this.client.on('reconnecting', (params) => {
console.log('[RedisService] Reconnecting to Redis:', params);
});
} catch (error) {
console.error('[RedisService] Error initializing Redis:', error);
this.isConnected = false;
}
}
async get(key) {
if (!this.isConnected) {
return null;
}
try {
const data = await this.client.get(key);
return data ? JSON.parse(data) : null;
} catch (error) {
console.error('[RedisService] Error getting data:', error);
return null;
}
}
async set(key, data, ttl = this.DEFAULT_TTL) {
if (!this.isConnected) {
return;
}
try {
await this.client.setex(key, ttl, JSON.stringify(data));
} catch (error) {
console.error('[RedisService] Error setting data:', error);
}
}
// Helper to generate cache keys
_getCacheKey(type, params = {}) {
const {
timeRange,
startDate,
endDate,
metricId,
metric,
daily,
cacheKey,
isPreviousPeriod,
customFilters
} = params;
let key = `klaviyo:${type}`;
// Handle "stats:details" for daily or metric-based keys
if (type === 'stats:details') {
// Add metric to key
key += `:${metric || 'all'}`;
// Add daily flag if present
if (daily) {
key += ':daily';
}
// Add custom filters hash if present
if (customFilters?.length) {
const filterHash = customFilters.join('').replace(/[^a-zA-Z0-9]/g, '');
key += `:${filterHash}`;
}
}
// If a specific cache key is provided, use it (highest priority)
if (cacheKey) {
key += `:${cacheKey}`;
}
// Otherwise, build a default cache key
else if (timeRange) {
key += `:${timeRange}`;
if (metricId) {
key += `:${metricId}`;
}
if (isPreviousPeriod) {
key += ':prev';
}
} else if (startDate && endDate) {
// For custom date ranges, include both dates in the key
key += `:custom:${startDate}:${endDate}`;
if (metricId) {
key += `:${metricId}`;
}
if (isPreviousPeriod) {
key += ':prev';
}
}
// Add order type to key if present
if (['pre_orders', 'local_pickup', 'on_hold'].includes(metric)) {
key += `:${metric}`;
}
return key;
}
// Get TTL based on time range
_getTTL(timeRange) {
const TTL_MAP = {
'today': 2 * 60, // 2 minutes
'yesterday': 30 * 60, // 30 minutes
'thisWeek': 5 * 60, // 5 minutes
'lastWeek': 60 * 60, // 1 hour
'thisMonth': 10 * 60, // 10 minutes
'lastMonth': 2 * 60 * 60, // 2 hours
'last7days': 5 * 60, // 5 minutes
'last30days': 15 * 60, // 15 minutes
'custom': 15 * 60 // 15 minutes
};
return TTL_MAP[timeRange] || this.DEFAULT_TTL;
}
async getEventData(type, params) {
if (!this.isConnected) {
return null;
}
try {
const baseKey = this._getCacheKey('events', params);
const data = await this.get(`${baseKey}:${type}`);
return data;
} catch (error) {
console.error('[RedisService] Error getting event data:', error);
return null;
}
}
async cacheEventData(type, params, data) {
if (!this.isConnected) {
return;
}
try {
const ttl = this._getTTL(params.timeRange);
const baseKey = this._getCacheKey('events', params);
// Cache raw event data
await this.set(`${baseKey}:${type}`, data, ttl);
} catch (error) {
console.error('[RedisService] Error caching event data:', error);
}
}
async clearCache(params = {}) {
if (!this.isConnected) {
return;
}
try {
const pattern = this._getCacheKey('events', params) + '*';
const keys = await this.client.keys(pattern);
if (keys.length > 0) {
await this.client.del(...keys);
}
} catch (error) {
console.error('[RedisService] Error clearing cache:', error);
}
}
}

View File

@@ -0,0 +1,254 @@
import fetch from 'node-fetch';
import { TimeManager } from '../utils/time.utils.js';
import { RedisService } from './redis.service.js';
const METRIC_IDS = {
PLACED_ORDER: 'Y8cqcF'
};
export class ReportingService {
constructor(apiKey, apiRevision) {
this.apiKey = apiKey;
this.apiRevision = apiRevision;
this.baseUrl = 'https://a.klaviyo.com/api';
this.timeManager = new TimeManager();
this.redisService = new RedisService();
this._pendingReportRequest = null;
}
async getCampaignReports(params = {}) {
try {
// Check if there's a pending request
if (this._pendingReportRequest) {
console.log('[ReportingService] Using pending campaign report request');
return this._pendingReportRequest;
}
// Try to get from cache first
const cacheKey = this.redisService._getCacheKey('campaign_reports', params);
let cachedData = null;
try {
cachedData = await this.redisService.get(`${cacheKey}:raw`);
if (cachedData) {
console.log('[ReportingService] Using cached campaign report data');
return cachedData;
}
} catch (cacheError) {
console.warn('[ReportingService] Cache error:', cacheError);
}
// Create new request promise
this._pendingReportRequest = (async () => {
console.log('[ReportingService] Fetching fresh campaign report data');
const range = this.timeManager.getDateRange(params.timeRange || 'last30days');
// Determine which channels to fetch based on params
const channelsToFetch = params.channel === 'all' || !params.channel
? ['email', 'sms']
: [params.channel];
const allResults = [];
// Fetch each channel
for (const channel of channelsToFetch) {
const payload = {
data: {
type: "campaign-values-report",
attributes: {
timeframe: {
start: range.start.toISO(),
end: range.end.toISO()
},
statistics: [
"delivery_rate",
"delivered",
"recipients",
"open_rate",
"opens_unique",
"opens",
"click_rate",
"clicks_unique",
"click_to_open_rate",
"conversion_value",
"conversion_uniques"
],
conversion_metric_id: METRIC_IDS.PLACED_ORDER,
filter: `equals(send_channel,"${channel}")`
}
}
};
const response = await fetch(`${this.baseUrl}/campaign-values-reports`, {
method: 'POST',
headers: {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': `Klaviyo-API-Key ${this.apiKey}`,
'revision': this.apiRevision
},
body: JSON.stringify(payload)
});
if (!response.ok) {
const errorData = await response.json();
console.error('[ReportingService] API Error:', errorData);
throw new Error(`Klaviyo API error: ${response.status} ${response.statusText}`);
}
const reportData = await response.json();
console.log(`[ReportingService] Raw ${channel} report data:`, JSON.stringify(reportData, null, 2));
// Get campaign IDs from the report
const campaignIds = reportData.data?.attributes?.results?.map(result =>
result.groupings?.campaign_id
).filter(Boolean) || [];
if (campaignIds.length > 0) {
// Get campaign details including send time and subject lines
const campaignDetails = await this.getCampaignDetails(campaignIds);
// Process results for this channel
const channelResults = reportData.data.attributes.results.map(result => {
const campaignId = result.groupings.campaign_id;
const details = campaignDetails.find(detail => detail.id === campaignId);
return {
id: campaignId,
name: details.attributes.name,
subject: details.attributes.subject,
send_time: details.attributes.send_time,
channel: channel, // Use the channel we're currently processing
stats: {
delivery_rate: result.statistics.delivery_rate,
delivered: result.statistics.delivered,
recipients: result.statistics.recipients,
open_rate: result.statistics.open_rate,
opens_unique: result.statistics.opens_unique,
opens: result.statistics.opens,
click_rate: result.statistics.click_rate,
clicks_unique: result.statistics.clicks_unique,
click_to_open_rate: result.statistics.click_to_open_rate,
conversion_value: result.statistics.conversion_value,
conversion_uniques: result.statistics.conversion_uniques
}
};
});
allResults.push(...channelResults);
}
}
// Sort all results by date
const enrichedData = {
data: allResults.sort((a, b) => {
const dateA = new Date(a.send_time);
const dateB = new Date(b.send_time);
return dateB - dateA; // Sort by date descending
})
};
console.log('[ReportingService] Enriched data:', JSON.stringify(enrichedData, null, 2));
// Cache the enriched response for 10 minutes
try {
await this.redisService.set(`${cacheKey}:raw`, enrichedData, 600);
} catch (cacheError) {
console.warn('[ReportingService] Cache set error:', cacheError);
}
return enrichedData;
})();
const result = await this._pendingReportRequest;
this._pendingReportRequest = null;
return result;
} catch (error) {
console.error('[ReportingService] Error fetching campaign reports:', error);
this._pendingReportRequest = null;
throw error;
}
}
async getCampaignDetails(campaignIds = []) {
if (!Array.isArray(campaignIds) || campaignIds.length === 0) {
return [];
}
const fetchWithTimeout = async (campaignId, retries = 3) => {
for (let i = 0; i < retries; i++) {
try {
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), 10000); // 10 second timeout
const response = await fetch(
`${this.baseUrl}/campaigns/${campaignId}?include=campaign-messages`,
{
headers: {
'Accept': 'application/json',
'Authorization': `Klaviyo-API-Key ${this.apiKey}`,
'revision': this.apiRevision
},
signal: controller.signal
}
);
clearTimeout(timeoutId);
if (!response.ok) {
throw new Error(`Failed to fetch campaign ${campaignId}: ${response.status}`);
}
const data = await response.json();
if (!data.data) {
throw new Error(`Invalid response for campaign ${campaignId}`);
}
const message = data.included?.find(item => item.type === 'campaign-message');
console.log('[ReportingService] Campaign details for ID:', campaignId, {
send_channel: data.data.attributes.send_channel,
raw_attributes: data.data.attributes
});
return {
id: data.data.id,
type: data.data.type,
attributes: {
...data.data.attributes,
name: data.data.attributes.name,
send_time: data.data.attributes.send_time,
subject: message?.attributes?.content?.subject,
send_channel: data.data.attributes.send_channel || 'email'
}
};
} catch (error) {
if (i === retries - 1) throw error;
await new Promise(resolve => setTimeout(resolve, 1000 * (i + 1))); // Exponential backoff
}
}
};
// Process in smaller chunks to avoid overwhelming the API
const chunkSize = 10;
const campaignDetails = [];
for (let i = 0; i < campaignIds.length; i += chunkSize) {
const chunk = campaignIds.slice(i, i + chunkSize);
const results = await Promise.all(
chunk.map(id => fetchWithTimeout(id).catch(error => {
console.error(`Failed to fetch campaign ${id}:`, error);
return null;
}))
);
campaignDetails.push(...results.filter(Boolean));
if (i + chunkSize < campaignIds.length) {
await new Promise(resolve => setTimeout(resolve, 1000)); // 1 second delay between chunks
}
}
return campaignDetails;
}
}

View File

@@ -0,0 +1,448 @@
import { DateTime } from 'luxon';
export class TimeManager {
constructor(dayStartHour = 1) {
this.timezone = 'America/New_York';
this.dayStartHour = dayStartHour; // Hour (0-23) when the business day starts
this.weekStartDay = 7; // 7 = Sunday in Luxon
}
/**
* Get the start of the current business day
* If current time is before dayStartHour, return previous day at dayStartHour
*/
getDayStart(dt = this.getNow()) {
if (!dt.isValid) {
console.error("[TimeManager] Invalid datetime provided to getDayStart");
return this.getNow();
}
const dayStart = dt.set({ hour: this.dayStartHour, minute: 0, second: 0, millisecond: 0 });
return dt.hour < this.dayStartHour ? dayStart.minus({ days: 1 }) : dayStart;
}
/**
* Get the end of the current business day
* End is defined as dayStartHour - 1 minute on the next day
*/
getDayEnd(dt = this.getNow()) {
if (!dt.isValid) {
console.error("[TimeManager] Invalid datetime provided to getDayEnd");
return this.getNow();
}
const nextDay = this.getDayStart(dt).plus({ days: 1 });
return nextDay.minus({ minutes: 1 });
}
/**
* Get the start of the week containing the given date
* Aligns with custom day start time and starts on Sunday
*/
getWeekStart(dt = this.getNow()) {
if (!dt.isValid) {
console.error("[TimeManager] Invalid datetime provided to getWeekStart");
return this.getNow();
}
// Set to start of week (Sunday) and adjust hour
const weekStart = dt.set({ weekday: this.weekStartDay }).startOf('day');
// If the week start time would be after the given time, go back a week
if (weekStart > dt) {
return weekStart.minus({ weeks: 1 }).set({ hour: this.dayStartHour });
}
return weekStart.set({ hour: this.dayStartHour });
}
/**
* Convert any date input to a Luxon DateTime in Eastern time
*/
toDateTime(date) {
if (!date) return null;
if (date instanceof DateTime) {
return date.setZone(this.timezone);
}
// If it's an ISO string or Date object, parse it
const dt = DateTime.fromISO(date instanceof Date ? date.toISOString() : date);
if (!dt.isValid) {
console.error("[TimeManager] Invalid date input:", date);
return null;
}
return dt.setZone(this.timezone);
}
/**
* Format a date for API requests (UTC ISO string)
*/
formatForAPI(date) {
if (!date) return null;
// Parse the input date
const dt = this.toDateTime(date);
if (!dt || !dt.isValid) {
console.error("[TimeManager] Invalid date for API:", date);
return null;
}
// Convert to UTC for API request
const utc = dt.toUTC();
console.log("[TimeManager] API date conversion:", {
input: date,
eastern: dt.toISO(),
utc: utc.toISO(),
offset: dt.offset
});
return utc.toISO();
}
/**
* Format a date for display (in Eastern time)
*/
formatForDisplay(date) {
const dt = this.toDateTime(date);
if (!dt || !dt.isValid) return '';
return dt.toFormat('LLL d, yyyy h:mm a');
}
/**
* Validate if a date range is valid
*/
isValidDateRange(start, end) {
const startDt = this.toDateTime(start);
const endDt = this.toDateTime(end);
return startDt && endDt && endDt > startDt;
}
/**
* Get the current time in Eastern timezone
*/
getNow() {
return DateTime.now().setZone(this.timezone);
}
/**
* Get a date range for the last N hours
*/
getLastNHours(hours) {
const now = this.getNow();
return {
start: now.minus({ hours }),
end: now
};
}
/**
* Get a date range for the last N days
* Aligns with custom day start time
*/
getLastNDays(days) {
const now = this.getNow();
const dayStart = this.getDayStart(now);
return {
start: dayStart.minus({ days }),
end: this.getDayEnd(now)
};
}
/**
* Get a date range for a specific time period
* All ranges align with custom day start time
*/
getDateRange(period) {
const now = this.getNow();
// Normalize period to handle both 'last' and 'previous' prefixes
const normalizedPeriod = period.startsWith('previous') ? period.replace('previous', 'last') : period;
switch (normalizedPeriod) {
case 'custom': {
// Custom ranges are handled separately via getCustomRange
console.warn('[TimeManager] Custom ranges should use getCustomRange method');
return null;
}
case 'today': {
const dayStart = this.getDayStart(now);
return {
start: dayStart,
end: this.getDayEnd(now)
};
}
case 'yesterday': {
const yesterday = now.minus({ days: 1 });
return {
start: this.getDayStart(yesterday),
end: this.getDayEnd(yesterday)
};
}
case 'last7days': {
// For last 7 days, we want to include today and the previous 6 days
const dayStart = this.getDayStart(now);
const weekStart = dayStart.minus({ days: 6 });
return {
start: weekStart,
end: this.getDayEnd(now)
};
}
case 'last30days': {
// Include today and previous 29 days
const dayStart = this.getDayStart(now);
const monthStart = dayStart.minus({ days: 29 });
return {
start: monthStart,
end: this.getDayEnd(now)
};
}
case 'last90days': {
// Include today and previous 89 days
const dayStart = this.getDayStart(now);
const start = dayStart.minus({ days: 89 });
return {
start,
end: this.getDayEnd(now)
};
}
case 'thisWeek': {
// Get the start of the week (Sunday) with custom hour
const weekStart = this.getWeekStart(now);
return {
start: weekStart,
end: this.getDayEnd(now)
};
}
case 'lastWeek': {
const lastWeek = now.minus({ weeks: 1 });
const weekStart = this.getWeekStart(lastWeek);
const weekEnd = weekStart.plus({ days: 6 }); // 6 days after start = Saturday
return {
start: weekStart,
end: this.getDayEnd(weekEnd)
};
}
case 'thisMonth': {
const dayStart = this.getDayStart(now);
const monthStart = dayStart.startOf('month').set({ hour: this.dayStartHour });
return {
start: monthStart,
end: this.getDayEnd(now)
};
}
case 'lastMonth': {
const lastMonth = now.minus({ months: 1 });
const monthStart = lastMonth.startOf('month').set({ hour: this.dayStartHour });
const monthEnd = monthStart.plus({ months: 1 }).minus({ days: 1 });
return {
start: monthStart,
end: this.getDayEnd(monthEnd)
};
}
default:
console.warn(`[TimeManager] Unknown period: ${period}`);
return null;
}
}
/**
* Format a duration in milliseconds to a human-readable string
*/
formatDuration(ms) {
return DateTime.fromMillis(ms).toFormat("hh'h' mm'm' ss's'");
}
/**
* Get relative time string (e.g., "2 hours ago")
*/
getRelativeTime(date) {
const dt = this.toDateTime(date);
if (!dt) return '';
return dt.toRelative();
}
/**
* Get a custom date range using exact dates and times provided
* @param {string} startDate - ISO string or Date for range start
* @param {string} endDate - ISO string or Date for range end
* @returns {Object} Object with start and end DateTime objects
*/
getCustomRange(startDate, endDate) {
if (!startDate || !endDate) {
console.error("[TimeManager] Custom range requires both start and end dates");
return null;
}
const start = this.toDateTime(startDate);
const end = this.toDateTime(endDate);
if (!start || !end || !start.isValid || !end.isValid) {
console.error("[TimeManager] Invalid dates provided for custom range");
return null;
}
// Validate the range
if (end < start) {
console.error("[TimeManager] End date must be after start date");
return null;
}
return {
start,
end
};
}
/**
* Get the previous period's date range based on the current period
* @param {string} period - The current period
* @param {DateTime} now - The current datetime (optional)
* @returns {Object} Object with start and end DateTime objects
*/
getPreviousPeriod(period, now = this.getNow()) {
const normalizedPeriod = period.startsWith('previous') ? period.replace('previous', 'last') : period;
switch (normalizedPeriod) {
case 'today': {
const yesterday = now.minus({ days: 1 });
return {
start: this.getDayStart(yesterday),
end: this.getDayEnd(yesterday)
};
}
case 'yesterday': {
const twoDaysAgo = now.minus({ days: 2 });
return {
start: this.getDayStart(twoDaysAgo),
end: this.getDayEnd(twoDaysAgo)
};
}
case 'last7days': {
const dayStart = this.getDayStart(now);
const currentStart = dayStart.minus({ days: 6 });
const prevEnd = currentStart.minus({ milliseconds: 1 });
const prevStart = prevEnd.minus({ days: 6 });
return {
start: prevStart,
end: prevEnd
};
}
case 'last30days': {
const dayStart = this.getDayStart(now);
const currentStart = dayStart.minus({ days: 29 });
const prevEnd = currentStart.minus({ milliseconds: 1 });
const prevStart = prevEnd.minus({ days: 29 });
return {
start: prevStart,
end: prevEnd
};
}
case 'last90days': {
const dayStart = this.getDayStart(now);
const currentStart = dayStart.minus({ days: 89 });
const prevEnd = currentStart.minus({ milliseconds: 1 });
const prevStart = prevEnd.minus({ days: 89 });
return {
start: prevStart,
end: prevEnd
};
}
case 'thisWeek': {
const weekStart = this.getWeekStart(now);
const prevEnd = weekStart.minus({ milliseconds: 1 });
const prevStart = this.getWeekStart(prevEnd);
return {
start: prevStart,
end: prevEnd
};
}
case 'lastWeek': {
const lastWeekStart = this.getWeekStart(now.minus({ weeks: 1 }));
const prevEnd = lastWeekStart.minus({ milliseconds: 1 });
const prevStart = this.getWeekStart(prevEnd);
return {
start: prevStart,
end: prevEnd
};
}
case 'thisMonth': {
const monthStart = now.startOf('month').set({ hour: this.dayStartHour });
const prevEnd = monthStart.minus({ milliseconds: 1 });
const prevStart = prevEnd.startOf('month').set({ hour: this.dayStartHour });
return {
start: prevStart,
end: prevEnd
};
}
case 'lastMonth': {
const lastMonthStart = now.minus({ months: 1 }).startOf('month').set({ hour: this.dayStartHour });
const prevEnd = lastMonthStart.minus({ milliseconds: 1 });
const prevStart = prevEnd.startOf('month').set({ hour: this.dayStartHour });
return {
start: prevStart,
end: prevEnd
};
}
default:
console.warn(`[TimeManager] No previous period defined for: ${period}`);
return null;
}
}
groupEventsByInterval(events, interval = 'day', property = null) {
if (!events?.length) return [];
const groupedData = new Map();
const now = DateTime.now().setZone('America/New_York');
for (const event of events) {
const datetime = DateTime.fromISO(event.attributes.datetime);
let groupKey;
switch (interval) {
case 'hour':
groupKey = datetime.startOf('hour').toISO();
break;
case 'day':
groupKey = datetime.startOf('day').toISO();
break;
case 'week':
groupKey = datetime.startOf('week').toISO();
break;
case 'month':
groupKey = datetime.startOf('month').toISO();
break;
default:
groupKey = datetime.startOf('day').toISO();
}
const existingGroup = groupedData.get(groupKey) || {
datetime: groupKey,
count: 0,
value: 0
};
existingGroup.count++;
if (property) {
// Extract property value from event
const props = event.attributes?.event_properties || event.attributes?.properties || {};
let value = 0;
if (property === '$value') {
// Special case for $value - use event value
value = Number(event.attributes?.value || 0);
} else {
// Otherwise get from properties
value = Number(props[property] || 0);
}
existingGroup.value = (existingGroup.value || 0) + value;
}
groupedData.set(groupKey, existingGroup);
}
// Convert to array and sort by datetime
return Array.from(groupedData.values())
.sort((a, b) => DateTime.fromISO(a.datetime) - DateTime.fromISO(b.datetime));
}
}

View File

@@ -0,0 +1,935 @@
{
"name": "meta-server",
"version": "1.0.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "meta-server",
"version": "1.0.0",
"license": "ISC",
"dependencies": {
"axios": "^1.7.9",
"cors": "^2.8.5",
"dotenv": "^16.4.7",
"express": "^4.21.2"
}
},
"node_modules/accepts": {
"version": "1.3.8",
"resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz",
"integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==",
"license": "MIT",
"dependencies": {
"mime-types": "~2.1.34",
"negotiator": "0.6.3"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/array-flatten": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
"integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==",
"license": "MIT"
},
"node_modules/asynckit": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
"license": "MIT"
},
"node_modules/axios": {
"version": "1.7.9",
"resolved": "https://registry.npmjs.org/axios/-/axios-1.7.9.tgz",
"integrity": "sha512-LhLcE7Hbiryz8oMDdDptSrWowmB4Bl6RCt6sIJKpRB4XtVf0iEgewX3au/pJqm+Py1kCASkb/FFKjxQaLtxJvw==",
"license": "MIT",
"dependencies": {
"follow-redirects": "^1.15.6",
"form-data": "^4.0.0",
"proxy-from-env": "^1.1.0"
}
},
"node_modules/body-parser": {
"version": "1.20.3",
"resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz",
"integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==",
"license": "MIT",
"dependencies": {
"bytes": "3.1.2",
"content-type": "~1.0.5",
"debug": "2.6.9",
"depd": "2.0.0",
"destroy": "1.2.0",
"http-errors": "2.0.0",
"iconv-lite": "0.4.24",
"on-finished": "2.4.1",
"qs": "6.13.0",
"raw-body": "2.5.2",
"type-is": "~1.6.18",
"unpipe": "1.0.0"
},
"engines": {
"node": ">= 0.8",
"npm": "1.2.8000 || >= 1.4.16"
}
},
"node_modules/bytes": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
"integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/call-bind-apply-helpers": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.1.tgz",
"integrity": "sha512-BhYE+WDaywFg2TBWYNXAE+8B1ATnThNBqXHP5nQu0jWJdVvY2hvkpyB3qOmtmDePiS5/BDQ8wASEWGMWRG148g==",
"license": "MIT",
"dependencies": {
"es-errors": "^1.3.0",
"function-bind": "^1.1.2"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/call-bound": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.3.tgz",
"integrity": "sha512-YTd+6wGlNlPxSuri7Y6X8tY2dmm12UMH66RpKMhiX6rsk5wXXnYgbUcOt8kiS31/AjfoTOvCsE+w8nZQLQnzHA==",
"license": "MIT",
"dependencies": {
"call-bind-apply-helpers": "^1.0.1",
"get-intrinsic": "^1.2.6"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/combined-stream": {
"version": "1.0.8",
"resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
"integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
"license": "MIT",
"dependencies": {
"delayed-stream": "~1.0.0"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/content-disposition": {
"version": "0.5.4",
"resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz",
"integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==",
"license": "MIT",
"dependencies": {
"safe-buffer": "5.2.1"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/content-type": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz",
"integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/cookie": {
"version": "0.7.1",
"resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz",
"integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/cookie-signature": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
"integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==",
"license": "MIT"
},
"node_modules/cors": {
"version": "2.8.5",
"resolved": "https://registry.npmjs.org/cors/-/cors-2.8.5.tgz",
"integrity": "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==",
"license": "MIT",
"dependencies": {
"object-assign": "^4",
"vary": "^1"
},
"engines": {
"node": ">= 0.10"
}
},
"node_modules/debug": {
"version": "2.6.9",
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"license": "MIT",
"dependencies": {
"ms": "2.0.0"
}
},
"node_modules/delayed-stream": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
"integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
"license": "MIT",
"engines": {
"node": ">=0.4.0"
}
},
"node_modules/depd": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
"integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/destroy": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz",
"integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==",
"license": "MIT",
"engines": {
"node": ">= 0.8",
"npm": "1.2.8000 || >= 1.4.16"
}
},
"node_modules/dotenv": {
"version": "16.4.7",
"resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.7.tgz",
"integrity": "sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ==",
"license": "BSD-2-Clause",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://dotenvx.com"
}
},
"node_modules/dunder-proto": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
"integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
"license": "MIT",
"dependencies": {
"call-bind-apply-helpers": "^1.0.1",
"es-errors": "^1.3.0",
"gopd": "^1.2.0"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/ee-first": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
"integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==",
"license": "MIT"
},
"node_modules/encodeurl": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz",
"integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/es-define-property": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
"integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
}
},
"node_modules/es-errors": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
"integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
}
},
"node_modules/es-object-atoms": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.0.0.tgz",
"integrity": "sha512-MZ4iQ6JwHOBQjahnjwaC1ZtIBH+2ohjamzAO3oaHcXYup7qxjF2fixyH+Q71voWHeOkI2q/TnJao/KfXYIZWbw==",
"license": "MIT",
"dependencies": {
"es-errors": "^1.3.0"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/escape-html": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
"integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==",
"license": "MIT"
},
"node_modules/etag": {
"version": "1.8.1",
"resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
"integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/express": {
"version": "4.21.2",
"resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz",
"integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==",
"license": "MIT",
"dependencies": {
"accepts": "~1.3.8",
"array-flatten": "1.1.1",
"body-parser": "1.20.3",
"content-disposition": "0.5.4",
"content-type": "~1.0.4",
"cookie": "0.7.1",
"cookie-signature": "1.0.6",
"debug": "2.6.9",
"depd": "2.0.0",
"encodeurl": "~2.0.0",
"escape-html": "~1.0.3",
"etag": "~1.8.1",
"finalhandler": "1.3.1",
"fresh": "0.5.2",
"http-errors": "2.0.0",
"merge-descriptors": "1.0.3",
"methods": "~1.1.2",
"on-finished": "2.4.1",
"parseurl": "~1.3.3",
"path-to-regexp": "0.1.12",
"proxy-addr": "~2.0.7",
"qs": "6.13.0",
"range-parser": "~1.2.1",
"safe-buffer": "5.2.1",
"send": "0.19.0",
"serve-static": "1.16.2",
"setprototypeof": "1.2.0",
"statuses": "2.0.1",
"type-is": "~1.6.18",
"utils-merge": "1.0.1",
"vary": "~1.1.2"
},
"engines": {
"node": ">= 0.10.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/express"
}
},
"node_modules/finalhandler": {
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz",
"integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==",
"license": "MIT",
"dependencies": {
"debug": "2.6.9",
"encodeurl": "~2.0.0",
"escape-html": "~1.0.3",
"on-finished": "2.4.1",
"parseurl": "~1.3.3",
"statuses": "2.0.1",
"unpipe": "~1.0.0"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/follow-redirects": {
"version": "1.15.9",
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.9.tgz",
"integrity": "sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==",
"funding": [
{
"type": "individual",
"url": "https://github.com/sponsors/RubenVerborgh"
}
],
"license": "MIT",
"engines": {
"node": ">=4.0"
},
"peerDependenciesMeta": {
"debug": {
"optional": true
}
}
},
"node_modules/form-data": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.1.tgz",
"integrity": "sha512-tzN8e4TX8+kkxGPK8D5u0FNmjPUjw3lwC9lSLxxoB/+GtsJG91CO8bSWy73APlgAZzZbXEYZJuxjkHH2w+Ezhw==",
"license": "MIT",
"dependencies": {
"asynckit": "^0.4.0",
"combined-stream": "^1.0.8",
"mime-types": "^2.1.12"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/forwarded": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz",
"integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/fresh": {
"version": "0.5.2",
"resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
"integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/function-bind": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
"integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
"license": "MIT",
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/get-intrinsic": {
"version": "1.2.6",
"resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.6.tgz",
"integrity": "sha512-qxsEs+9A+u85HhllWJJFicJfPDhRmjzoYdl64aMWW9yRIJmSyxdn8IEkuIM530/7T+lv0TIHd8L6Q/ra0tEoeA==",
"license": "MIT",
"dependencies": {
"call-bind-apply-helpers": "^1.0.1",
"dunder-proto": "^1.0.0",
"es-define-property": "^1.0.1",
"es-errors": "^1.3.0",
"es-object-atoms": "^1.0.0",
"function-bind": "^1.1.2",
"gopd": "^1.2.0",
"has-symbols": "^1.1.0",
"hasown": "^2.0.2",
"math-intrinsics": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/gopd": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
"integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/has-symbols": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
"integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/hasown": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
"integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
"license": "MIT",
"dependencies": {
"function-bind": "^1.1.2"
},
"engines": {
"node": ">= 0.4"
}
},
"node_modules/http-errors": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz",
"integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==",
"license": "MIT",
"dependencies": {
"depd": "2.0.0",
"inherits": "2.0.4",
"setprototypeof": "1.2.0",
"statuses": "2.0.1",
"toidentifier": "1.0.1"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/iconv-lite": {
"version": "0.4.24",
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
"integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
"license": "MIT",
"dependencies": {
"safer-buffer": ">= 2.1.2 < 3"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/inherits": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
"license": "ISC"
},
"node_modules/ipaddr.js": {
"version": "1.9.1",
"resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
"integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==",
"license": "MIT",
"engines": {
"node": ">= 0.10"
}
},
"node_modules/math-intrinsics": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
"integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
}
},
"node_modules/media-typer": {
"version": "0.3.0",
"resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
"integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/merge-descriptors": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz",
"integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==",
"license": "MIT",
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/methods": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
"integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/mime": {
"version": "1.6.0",
"resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz",
"integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==",
"license": "MIT",
"bin": {
"mime": "cli.js"
},
"engines": {
"node": ">=4"
}
},
"node_modules/mime-db": {
"version": "1.52.0",
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/mime-types": {
"version": "2.1.35",
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
"license": "MIT",
"dependencies": {
"mime-db": "1.52.0"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/ms": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
"license": "MIT"
},
"node_modules/negotiator": {
"version": "0.6.3",
"resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz",
"integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/object-assign": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
"integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==",
"license": "MIT",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/object-inspect": {
"version": "1.13.3",
"resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.3.tgz",
"integrity": "sha512-kDCGIbxkDSXE3euJZZXzc6to7fCrKHNI/hSRQnRuQ+BWjFNzZwiFF8fj/6o2t2G9/jTj8PSIYTfCLelLZEeRpA==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/on-finished": {
"version": "2.4.1",
"resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
"integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
"license": "MIT",
"dependencies": {
"ee-first": "1.1.1"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/parseurl": {
"version": "1.3.3",
"resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
"integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/path-to-regexp": {
"version": "0.1.12",
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz",
"integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==",
"license": "MIT"
},
"node_modules/proxy-addr": {
"version": "2.0.7",
"resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz",
"integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==",
"license": "MIT",
"dependencies": {
"forwarded": "0.2.0",
"ipaddr.js": "1.9.1"
},
"engines": {
"node": ">= 0.10"
}
},
"node_modules/proxy-from-env": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
"integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==",
"license": "MIT"
},
"node_modules/qs": {
"version": "6.13.0",
"resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz",
"integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==",
"license": "BSD-3-Clause",
"dependencies": {
"side-channel": "^1.0.6"
},
"engines": {
"node": ">=0.6"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/range-parser": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
"integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
"license": "MIT",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/raw-body": {
"version": "2.5.2",
"resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz",
"integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==",
"license": "MIT",
"dependencies": {
"bytes": "3.1.2",
"http-errors": "2.0.0",
"iconv-lite": "0.4.24",
"unpipe": "1.0.0"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/safe-buffer": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
"integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"license": "MIT"
},
"node_modules/safer-buffer": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
"license": "MIT"
},
"node_modules/send": {
"version": "0.19.0",
"resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz",
"integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==",
"license": "MIT",
"dependencies": {
"debug": "2.6.9",
"depd": "2.0.0",
"destroy": "1.2.0",
"encodeurl": "~1.0.2",
"escape-html": "~1.0.3",
"etag": "~1.8.1",
"fresh": "0.5.2",
"http-errors": "2.0.0",
"mime": "1.6.0",
"ms": "2.1.3",
"on-finished": "2.4.1",
"range-parser": "~1.2.1",
"statuses": "2.0.1"
},
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/send/node_modules/encodeurl": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
"integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/send/node_modules/ms": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
"license": "MIT"
},
"node_modules/serve-static": {
"version": "1.16.2",
"resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz",
"integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==",
"license": "MIT",
"dependencies": {
"encodeurl": "~2.0.0",
"escape-html": "~1.0.3",
"parseurl": "~1.3.3",
"send": "0.19.0"
},
"engines": {
"node": ">= 0.8.0"
}
},
"node_modules/setprototypeof": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz",
"integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==",
"license": "ISC"
},
"node_modules/side-channel": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz",
"integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==",
"license": "MIT",
"dependencies": {
"es-errors": "^1.3.0",
"object-inspect": "^1.13.3",
"side-channel-list": "^1.0.0",
"side-channel-map": "^1.0.1",
"side-channel-weakmap": "^1.0.2"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/side-channel-list": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz",
"integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==",
"license": "MIT",
"dependencies": {
"es-errors": "^1.3.0",
"object-inspect": "^1.13.3"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/side-channel-map": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz",
"integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==",
"license": "MIT",
"dependencies": {
"call-bound": "^1.0.2",
"es-errors": "^1.3.0",
"get-intrinsic": "^1.2.5",
"object-inspect": "^1.13.3"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/side-channel-weakmap": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz",
"integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==",
"license": "MIT",
"dependencies": {
"call-bound": "^1.0.2",
"es-errors": "^1.3.0",
"get-intrinsic": "^1.2.5",
"object-inspect": "^1.13.3",
"side-channel-map": "^1.0.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/statuses": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
"integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/toidentifier": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz",
"integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==",
"license": "MIT",
"engines": {
"node": ">=0.6"
}
},
"node_modules/type-is": {
"version": "1.6.18",
"resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz",
"integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
"license": "MIT",
"dependencies": {
"media-typer": "0.3.0",
"mime-types": "~2.1.24"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/unpipe": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
"integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/utils-merge": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
"integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==",
"license": "MIT",
"engines": {
"node": ">= 0.4.0"
}
},
"node_modules/vary": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
"integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
}
}
}

View File

@@ -0,0 +1,20 @@
{
"name": "meta-server",
"version": "1.0.0",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1",
"start": "node server.js",
"dev": "nodemon server.js"
},
"keywords": [],
"author": "",
"license": "ISC",
"description": "",
"dependencies": {
"axios": "^1.7.9",
"cors": "^2.8.5",
"dotenv": "^16.4.7",
"express": "^4.21.2"
}
}

View File

@@ -0,0 +1,91 @@
const express = require('express');
const router = express.Router();
const {
fetchCampaigns,
fetchAccountInsights,
updateCampaignBudget,
updateCampaignStatus,
} = require('../services/meta.service');
// Get all campaigns with insights
router.get('/campaigns', async (req, res) => {
try {
const { since, until } = req.query;
if (!since || !until) {
return res.status(400).json({ error: 'Date range is required (since, until)' });
}
const campaigns = await fetchCampaigns(since, until);
res.json(campaigns);
} catch (error) {
console.error('Campaign fetch error:', error);
res.status(500).json({
error: 'Failed to fetch campaigns',
details: error.response?.data?.error?.message || error.message,
});
}
});
// Get account insights
router.get('/account-insights', async (req, res) => {
try {
const { since, until } = req.query;
if (!since || !until) {
return res.status(400).json({ error: 'Date range is required (since, until)' });
}
const insights = await fetchAccountInsights(since, until);
res.json(insights);
} catch (error) {
console.error('Account insights fetch error:', error);
res.status(500).json({
error: 'Failed to fetch account insights',
details: error.response?.data?.error?.message || error.message,
});
}
});
// Update campaign budget
router.patch('/campaigns/:campaignId/budget', async (req, res) => {
try {
const { campaignId } = req.params;
const { budget } = req.body;
if (!budget) {
return res.status(400).json({ error: 'Budget is required' });
}
const result = await updateCampaignBudget(campaignId, budget);
res.json(result);
} catch (error) {
console.error('Budget update error:', error);
res.status(500).json({
error: 'Failed to update campaign budget',
details: error.response?.data?.error?.message || error.message,
});
}
});
// Update campaign status (pause/unpause)
router.post('/campaigns/:campaignId/:action', async (req, res) => {
try {
const { campaignId, action } = req.params;
if (!['pause', 'unpause'].includes(action)) {
return res.status(400).json({ error: 'Invalid action. Use "pause" or "unpause"' });
}
const result = await updateCampaignStatus(campaignId, action);
res.json(result);
} catch (error) {
console.error('Status update error:', error);
res.status(500).json({
error: 'Failed to update campaign status',
details: error.response?.data?.error?.message || error.message,
});
}
});
module.exports = router;

View File

@@ -0,0 +1,31 @@
const express = require('express');
const cors = require('cors');
const path = require('path');
require('dotenv').config({
path: path.resolve(__dirname, '.env')
});
const app = express();
const port = process.env.PORT || 3005;
app.use(cors());
app.use(express.json());
// Import routes
const campaignRoutes = require('./routes/campaigns.routes');
// Use routes
app.use('/api/meta', campaignRoutes);
// Error handling middleware
app.use((err, req, res, next) => {
console.error(err.stack);
res.status(500).json({ error: 'Something went wrong!' });
});
// Start server
app.listen(port, () => {
console.log(`Meta API server running on port ${port}`);
});
module.exports = app;

View File

@@ -0,0 +1,99 @@
const { default: axios } = require('axios');
const META_API_VERSION = process.env.META_API_VERSION || 'v21.0';
const META_API_BASE_URL = `https://graph.facebook.com/${META_API_VERSION}`;
const META_ACCESS_TOKEN = process.env.META_ACCESS_TOKEN;
const AD_ACCOUNT_ID = process.env.META_AD_ACCOUNT_ID;
const metaApiRequest = async (endpoint, params = {}) => {
try {
const response = await axios.get(`${META_API_BASE_URL}/${endpoint}`, {
params: {
access_token: META_ACCESS_TOKEN,
time_zone: 'America/New_York',
...params,
},
});
return response.data;
} catch (error) {
console.error('Meta API Error:', {
message: error.message,
response: error.response?.data,
endpoint,
});
throw error;
}
};
const fetchCampaigns = async (since, until) => {
const campaigns = await metaApiRequest(`act_${AD_ACCOUNT_ID}/campaigns`, {
fields: [
'id',
'name',
'status',
'objective',
'daily_budget',
'lifetime_budget',
'adsets{daily_budget,lifetime_budget}',
`insights.time_range({'since':'${since}','until':'${until}'}).level(campaign){
spend,
impressions,
clicks,
ctr,
reach,
frequency,
cpm,
cpc,
actions,
action_values,
cost_per_action_type
}`,
].join(','),
limit: 100,
});
return campaigns.data.filter(c => c.insights?.data?.[0]?.spend > 0);
};
const fetchAccountInsights = async (since, until) => {
const accountInsights = await metaApiRequest(`act_${AD_ACCOUNT_ID}/insights`, {
fields: 'reach,spend,impressions,clicks,ctr,cpm,actions,action_values',
time_range: JSON.stringify({ since, until }),
});
return accountInsights.data[0] || null;
};
const updateCampaignBudget = async (campaignId, budget) => {
try {
const response = await axios.post(`${META_API_BASE_URL}/${campaignId}`, {
access_token: META_ACCESS_TOKEN,
daily_budget: budget * 100, // Convert to cents
});
return response.data;
} catch (error) {
console.error('Update campaign budget error:', error);
throw error;
}
};
const updateCampaignStatus = async (campaignId, action) => {
try {
const status = action === 'pause' ? 'PAUSED' : 'ACTIVE';
const response = await axios.post(`${META_API_BASE_URL}/${campaignId}`, {
access_token: META_ACCESS_TOKEN,
status,
});
return response.data;
} catch (error) {
console.error('Update campaign status error:', error);
throw error;
}
};
module.exports = {
fetchCampaigns,
fetchAccountInsights,
updateCampaignBudget,
updateCampaignStatus,
};

View File

@@ -0,0 +1,24 @@
{
"name": "dashboard",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"dependencies": {
"dotenv": "^16.4.7"
}
},
"node_modules/dotenv": {
"version": "16.4.7",
"resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.7.tgz",
"integrity": "sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ==",
"license": "BSD-2-Clause",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://dotenvx.com"
}
}
}
}

View File

@@ -0,0 +1,5 @@
{
"dependencies": {
"dotenv": "^16.4.7"
}
}

View File

@@ -0,0 +1,13 @@
# Server Configuration
NODE_ENV=development
TYPEFORM_PORT=3008
# Redis Configuration
REDIS_URL=redis://localhost:6379
# Typeform API Configuration
TYPEFORM_ACCESS_TOKEN=your_typeform_access_token_here
# Optional: Form IDs (if you want to store them in env)
TYPEFORM_FORM_ID_1=your_first_form_id
TYPEFORM_FORM_ID_2=your_second_form_id

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,20 @@
{
"name": "typeform-server",
"version": "1.0.0",
"description": "Typeform API integration server",
"main": "server.js",
"scripts": {
"start": "node server.js",
"dev": "nodemon server.js"
},
"dependencies": {
"axios": "^1.6.2",
"cors": "^2.8.5",
"dotenv": "^16.3.1",
"express": "^4.18.2",
"redis": "^4.6.11"
},
"devDependencies": {
"nodemon": "^3.0.2"
}
}

View File

@@ -0,0 +1,121 @@
const express = require('express');
const router = express.Router();
const typeformService = require('../services/typeform.service');
// Get form responses
router.get('/forms/:formId/responses', async (req, res) => {
try {
const { formId } = req.params;
const filters = req.query;
console.log(`Fetching responses for form ${formId} with filters:`, filters);
if (!formId) {
return res.status(400).json({
error: 'Missing form ID',
details: 'The form ID parameter is required'
});
}
const data = await typeformService.getFormResponsesWithFilters(formId, filters);
if (!data) {
return res.status(404).json({
error: 'No data found',
details: `No responses found for form ${formId}`
});
}
res.json(data);
} catch (error) {
console.error('Form responses error:', {
formId: req.params.formId,
filters: req.query,
error: error.message,
stack: error.stack,
response: error.response?.data
});
// Handle specific error cases
if (error.response?.status === 401) {
return res.status(401).json({
error: 'Authentication failed',
details: 'Invalid Typeform API credentials'
});
}
if (error.response?.status === 404) {
return res.status(404).json({
error: 'Not found',
details: `Form '${req.params.formId}' not found`
});
}
if (error.response?.status === 400) {
return res.status(400).json({
error: 'Invalid request',
details: error.response?.data?.message || 'The request was invalid',
data: error.response?.data
});
}
res.status(500).json({
error: 'Failed to fetch form responses',
details: error.response?.data?.message || error.message,
data: error.response?.data
});
}
});
// Get form insights
router.get('/forms/:formId/insights', async (req, res) => {
try {
const { formId } = req.params;
if (!formId) {
return res.status(400).json({
error: 'Missing form ID',
details: 'The form ID parameter is required'
});
}
const data = await typeformService.getFormInsights(formId);
if (!data) {
return res.status(404).json({
error: 'No data found',
details: `No insights found for form ${formId}`
});
}
res.json(data);
} catch (error) {
console.error('Form insights error:', {
formId: req.params.formId,
error: error.message,
response: error.response?.data
});
if (error.response?.status === 401) {
return res.status(401).json({
error: 'Authentication failed',
details: 'Invalid Typeform API credentials'
});
}
if (error.response?.status === 404) {
return res.status(404).json({
error: 'Not found',
details: `Form '${req.params.formId}' not found`
});
}
res.status(500).json({
error: 'Failed to fetch form insights',
details: error.response?.data?.message || error.message,
data: error.response?.data
});
}
});
module.exports = router;

View File

@@ -0,0 +1,31 @@
const express = require('express');
const cors = require('cors');
const path = require('path');
require('dotenv').config({
path: path.resolve(__dirname, '.env')
});
const app = express();
const port = process.env.TYPEFORM_PORT || 3008;
app.use(cors());
app.use(express.json());
// Import routes
const typeformRoutes = require('./routes/typeform.routes');
// Use routes
app.use('/api/typeform', typeformRoutes);
// Error handling middleware
app.use((err, req, res, next) => {
console.error(err.stack);
res.status(500).json({ error: 'Something went wrong!' });
});
// Start server
app.listen(port, () => {
console.log(`Typeform API server running on port ${port}`);
});
module.exports = app;

View File

@@ -0,0 +1,142 @@
const axios = require('axios');
const { createClient } = require('redis');
class TypeformService {
constructor() {
this.redis = createClient({
url: process.env.REDIS_URL
});
this.redis.on('error', err => console.error('Redis Client Error:', err));
this.redis.connect().catch(err => console.error('Redis connection error:', err));
const token = process.env.TYPEFORM_ACCESS_TOKEN;
console.log('Initializing Typeform client with token:', token ? `${token.slice(0, 10)}...` : 'missing');
this.apiClient = axios.create({
baseURL: 'https://api.typeform.com',
headers: {
'Authorization': `Bearer ${token}`,
'Content-Type': 'application/json'
}
});
// Test the token
this.testConnection();
}
async testConnection() {
try {
const response = await this.apiClient.get('/forms');
console.log('Typeform connection test successful:', {
status: response.status,
headers: response.headers,
});
} catch (error) {
console.error('Typeform connection test failed:', {
error: error.message,
response: error.response?.data,
status: error.response?.status,
});
}
}
async getFormResponses(formId, params = {}) {
const cacheKey = `typeform:responses:${formId}:${JSON.stringify(params)}`;
try {
// Try Redis first
const cachedData = await this.redis.get(cacheKey);
if (cachedData) {
console.log(`Form responses for ${formId} found in Redis cache`);
return JSON.parse(cachedData);
}
// Fetch from API
const response = await this.apiClient.get(`/forms/${formId}/responses`, { params });
const data = response.data;
// Save to Redis with 5 minute expiry
await this.redis.set(cacheKey, JSON.stringify(data), {
EX: 300 // 5 minutes
});
return data;
} catch (error) {
console.error(`Error fetching form responses for ${formId}:`, {
error: error.message,
params,
response: error.response?.data
});
throw error;
}
}
async getFormInsights(formId) {
const cacheKey = `typeform:insights:${formId}`;
try {
// Try Redis first
const cachedData = await this.redis.get(cacheKey);
if (cachedData) {
console.log(`Form insights for ${formId} found in Redis cache`);
return JSON.parse(cachedData);
}
// Log the request details
console.log(`Fetching insights for form ${formId}...`, {
url: `/insights/${formId}/summary`,
headers: this.apiClient.defaults.headers
});
// Fetch from API
const response = await this.apiClient.get(`/insights/${formId}/summary`);
console.log('Typeform insights response:', {
status: response.status,
headers: response.headers,
data: response.data
});
const data = response.data;
// Save to Redis with 5 minute expiry
await this.redis.set(cacheKey, JSON.stringify(data), {
EX: 300 // 5 minutes
});
return data;
} catch (error) {
console.error(`Error fetching form insights for ${formId}:`, {
error: error.message,
response: error.response?.data,
status: error.response?.status,
headers: error.response?.headers,
requestUrl: `/insights/${formId}/summary`,
requestHeaders: this.apiClient.defaults.headers
});
throw error;
}
}
async getFormResponsesWithFilters(formId, { since, until, pageSize = 25, ...otherParams } = {}) {
try {
const params = {
page_size: pageSize,
...otherParams
};
if (since) {
params.since = new Date(since).toISOString();
}
if (until) {
params.until = new Date(until).toISOString();
}
return await this.getFormResponses(formId, params);
} catch (error) {
console.error('Error in getFormResponsesWithFilters:', error);
throw error;
}
}
}
module.exports = new TypeformService();

View File

@@ -0,0 +1,196 @@
-- Create function for updating timestamps if it doesn't exist
CREATE OR REPLACE FUNCTION update_updated_column()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
-- Create function for updating updated_at timestamps
CREATE OR REPLACE FUNCTION update_updated_at_column()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
-- Drop tables in reverse order of dependency
DROP TABLE IF EXISTS public.settings_product CASCADE;
DROP TABLE IF EXISTS public.settings_vendor CASCADE;
DROP TABLE IF EXISTS public.settings_global CASCADE;
-- Table Definition: settings_global
CREATE TABLE public.settings_global (
setting_key VARCHAR PRIMARY KEY,
setting_value VARCHAR NOT NULL,
description TEXT,
updated_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP
);
-- Table Definition: settings_vendor
CREATE TABLE public.settings_vendor (
vendor VARCHAR PRIMARY KEY, -- Matches products.vendor
default_lead_time_days INT,
default_days_of_stock INT,
updated_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP
);
-- Index for faster lookups if needed (PK usually sufficient)
-- CREATE INDEX idx_settings_vendor_vendor ON public.settings_vendor(vendor);
-- Table Definition: settings_product
CREATE TABLE public.settings_product (
pid INT8 PRIMARY KEY,
lead_time_days INT, -- Overrides vendor/global
days_of_stock INT, -- Overrides vendor/global
safety_stock INT DEFAULT 0, -- Minimum desired stock level
forecast_method VARCHAR DEFAULT 'standard', -- e.g., 'standard', 'seasonal'
exclude_from_forecast BOOLEAN DEFAULT FALSE,
updated_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT fk_settings_product_pid FOREIGN KEY (pid) REFERENCES public.products(pid) ON DELETE CASCADE ON UPDATE CASCADE
);
-- Description: Inserts or updates standard default global settings.
-- Safe to rerun; will update existing keys with these default values.
-- Dependencies: `settings_global` table must exist.
-- Frequency: Run once initially, or rerun if you want to reset global defaults.
INSERT INTO public.settings_global (setting_key, setting_value, description) VALUES
('abc_revenue_threshold_a', '0.80', 'Revenue percentage for Class A (cumulative)'),
('abc_revenue_threshold_b', '0.95', 'Revenue percentage for Class B (cumulative)'),
('abc_calculation_basis', 'revenue_30d', 'Metric for ABC calc (revenue_30d, sales_30d, lifetime_revenue)'),
('abc_calculation_period', '30', 'Days period for ABC calculation if not lifetime'),
('default_forecast_method', 'standard', 'Default forecast method (standard, seasonal)'),
('default_lead_time_days', '14', 'Global default lead time in days'),
('default_days_of_stock', '30', 'Global default days of stock coverage target'),
-- Set default safety stock to 0 units. Can be overridden per product.
-- If you wanted safety stock in days, you'd store 'days' here and calculate units later.
('default_safety_stock_units', '0', 'Global default safety stock in units')
ON CONFLICT (setting_key) DO UPDATE SET
setting_value = EXCLUDED.setting_value,
description = EXCLUDED.description,
updated_at = CURRENT_TIMESTAMP; -- Update timestamp if default value changes
-- Description: Creates placeholder rows in `settings_vendor` for each unique vendor
-- found in the `products` table. Does NOT set specific overrides.
-- Safe to rerun; will NOT overwrite existing vendor settings.
-- Dependencies: `settings_vendor` table must exist, `products` table populated.
-- Frequency: Run once after initial product load, or periodically if new vendors are added.
INSERT INTO public.settings_vendor (
vendor,
default_lead_time_days,
default_days_of_stock
-- updated_at will use its default CURRENT_TIMESTAMP on insert
)
SELECT
DISTINCT p.vendor,
-- Explicitly cast NULL to INTEGER to resolve type mismatch
CAST(NULL AS INTEGER),
CAST(NULL AS INTEGER)
FROM
public.products p
WHERE
p.vendor IS NOT NULL
AND p.vendor <> '' -- Exclude blank vendors if necessary
ON CONFLICT (vendor) DO NOTHING; -- IMPORTANT: Do not overwrite existing vendor settings
SELECT COUNT(*) FROM public.settings_vendor; -- Verify rows were inserted
-- Description: Creates placeholder rows in `settings_product` for each unique product
-- found in the `products` table. Sets basic defaults but no specific overrides.
-- Safe to rerun; will NOT overwrite existing product settings.
-- Dependencies: `settings_product` table must exist, `products` table populated.
-- Frequency: Run once after initial product load, or periodically if new products are added.
INSERT INTO public.settings_product (
pid,
lead_time_days, -- NULL = Inherit from Vendor/Global
days_of_stock, -- NULL = Inherit from Vendor/Global
safety_stock, -- Default to 0 units initially
forecast_method, -- NULL = Inherit from Global ('standard')
exclude_from_forecast -- Default to FALSE
-- updated_at will use its default CURRENT_TIMESTAMP on insert
)
SELECT
p.pid,
CAST(NULL AS INTEGER), -- Explicitly cast NULL to INTEGER
CAST(NULL AS INTEGER), -- Explicitly cast NULL to INTEGER
COALESCE((SELECT setting_value::int FROM settings_global WHERE setting_key = 'default_safety_stock_units'), 0), -- Use global default safety stock units
CAST(NULL AS VARCHAR), -- Cast NULL to VARCHAR for forecast_method (already varchar, but explicit)
FALSE -- Default: Include in forecast
FROM
public.products p
ON CONFLICT (pid) DO NOTHING; -- IMPORTANT: Do not overwrite existing product-specific settings
-- History and status tables
CREATE TABLE IF NOT EXISTS calculate_history (
id BIGSERIAL PRIMARY KEY,
start_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
end_time TIMESTAMP WITH TIME ZONE NULL,
duration_seconds INTEGER,
duration_minutes DECIMAL(10,2) GENERATED ALWAYS AS (duration_seconds::decimal / 60.0) STORED,
total_products INTEGER DEFAULT 0,
total_orders INTEGER DEFAULT 0,
total_purchase_orders INTEGER DEFAULT 0,
processed_products INTEGER DEFAULT 0,
processed_orders INTEGER DEFAULT 0,
processed_purchase_orders INTEGER DEFAULT 0,
status calculation_status DEFAULT 'running',
error_message TEXT,
additional_info JSONB
);
CREATE TABLE IF NOT EXISTS calculate_status (
module_name text PRIMARY KEY,
last_calculation_timestamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS sync_status (
table_name TEXT PRIMARY KEY,
last_sync_timestamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
last_sync_id BIGINT
);
CREATE TABLE IF NOT EXISTS import_history (
id BIGSERIAL PRIMARY KEY,
table_name VARCHAR(50) NOT NULL,
start_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
end_time TIMESTAMP WITH TIME ZONE NULL,
duration_seconds INTEGER,
duration_minutes DECIMAL(10,2) GENERATED ALWAYS AS (duration_seconds::decimal / 60.0) STORED,
records_added INTEGER DEFAULT 0,
records_updated INTEGER DEFAULT 0,
records_deleted INTEGER DEFAULT 0,
records_skipped INTEGER DEFAULT 0,
total_processed INTEGER DEFAULT 0,
is_incremental BOOLEAN DEFAULT FALSE,
status calculation_status DEFAULT 'running',
error_message TEXT,
additional_info JSONB
);
-- Create all indexes after tables are fully created
CREATE INDEX IF NOT EXISTS idx_last_calc ON calculate_status(last_calculation_timestamp);
CREATE INDEX IF NOT EXISTS idx_last_sync ON sync_status(last_sync_timestamp);
CREATE INDEX IF NOT EXISTS idx_table_time ON import_history(table_name, start_time);
CREATE INDEX IF NOT EXISTS idx_import_history_status ON import_history(status);
CREATE INDEX IF NOT EXISTS idx_calculate_history_status ON calculate_history(status);
-- Add comments for documentation
COMMENT ON TABLE import_history IS 'Tracks history of data import operations with detailed statistics';
COMMENT ON COLUMN import_history.records_deleted IS 'Number of records deleted during this import';
COMMENT ON COLUMN import_history.records_skipped IS 'Number of records skipped (e.g., unchanged, invalid)';
COMMENT ON COLUMN import_history.total_processed IS 'Total number of records examined/processed, including skipped';
COMMENT ON TABLE calculate_history IS 'Tracks history of metrics calculation runs with performance data';
COMMENT ON COLUMN calculate_history.duration_seconds IS 'Total duration of the calculation in seconds';
COMMENT ON COLUMN calculate_history.additional_info IS 'JSON object containing step timings, row counts, and other detailed metrics';

View File

@@ -1,278 +0,0 @@
-- Configuration tables schema
-- Create function for updating timestamps if it doesn't exist
CREATE OR REPLACE FUNCTION update_updated_column()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
-- Create function for updating updated_at timestamps
CREATE OR REPLACE FUNCTION update_updated_at_column()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
-- Stock threshold configurations
CREATE TABLE stock_thresholds (
id INTEGER NOT NULL,
category_id BIGINT, -- NULL means default/global threshold
vendor VARCHAR(100), -- NULL means applies to all vendors
critical_days INTEGER NOT NULL DEFAULT 7,
reorder_days INTEGER NOT NULL DEFAULT 14,
overstock_days INTEGER NOT NULL DEFAULT 90,
low_stock_threshold INTEGER NOT NULL DEFAULT 5,
min_reorder_quantity INTEGER NOT NULL DEFAULT 1,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (id),
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
UNIQUE (category_id, vendor)
);
CREATE TRIGGER update_stock_thresholds_updated
BEFORE UPDATE ON stock_thresholds
FOR EACH ROW
EXECUTE FUNCTION update_updated_at_column();
CREATE INDEX idx_st_metrics ON stock_thresholds(category_id, vendor);
-- Lead time threshold configurations
CREATE TABLE lead_time_thresholds (
id INTEGER NOT NULL,
category_id BIGINT, -- NULL means default/global threshold
vendor VARCHAR(100), -- NULL means applies to all vendors
target_days INTEGER NOT NULL DEFAULT 14,
warning_days INTEGER NOT NULL DEFAULT 21,
critical_days INTEGER NOT NULL DEFAULT 30,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (id),
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
UNIQUE (category_id, vendor)
);
CREATE TRIGGER update_lead_time_thresholds_updated
BEFORE UPDATE ON lead_time_thresholds
FOR EACH ROW
EXECUTE FUNCTION update_updated_at_column();
-- Sales velocity window configurations
CREATE TABLE sales_velocity_config (
id INTEGER NOT NULL,
category_id BIGINT, -- NULL means default/global threshold
vendor VARCHAR(100), -- NULL means applies to all vendors
daily_window_days INTEGER NOT NULL DEFAULT 30,
weekly_window_days INTEGER NOT NULL DEFAULT 7,
monthly_window_days INTEGER NOT NULL DEFAULT 90,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (id),
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
UNIQUE (category_id, vendor)
);
CREATE TRIGGER update_sales_velocity_config_updated
BEFORE UPDATE ON sales_velocity_config
FOR EACH ROW
EXECUTE FUNCTION update_updated_at_column();
CREATE INDEX idx_sv_metrics ON sales_velocity_config(category_id, vendor);
-- ABC Classification configurations
CREATE TABLE abc_classification_config (
id INTEGER NOT NULL PRIMARY KEY,
a_threshold DECIMAL(5,2) NOT NULL DEFAULT 20.0,
b_threshold DECIMAL(5,2) NOT NULL DEFAULT 50.0,
classification_period_days INTEGER NOT NULL DEFAULT 90,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
);
CREATE TRIGGER update_abc_classification_config_updated
BEFORE UPDATE ON abc_classification_config
FOR EACH ROW
EXECUTE FUNCTION update_updated_at_column();
-- Safety stock configurations
CREATE TABLE safety_stock_config (
id INTEGER NOT NULL,
category_id BIGINT, -- NULL means default/global threshold
vendor VARCHAR(100), -- NULL means applies to all vendors
coverage_days INTEGER NOT NULL DEFAULT 14,
service_level DECIMAL(5,2) NOT NULL DEFAULT 95.0,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (id),
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
UNIQUE (category_id, vendor)
);
CREATE TRIGGER update_safety_stock_config_updated
BEFORE UPDATE ON safety_stock_config
FOR EACH ROW
EXECUTE FUNCTION update_updated_at_column();
CREATE INDEX idx_ss_metrics ON safety_stock_config(category_id, vendor);
-- Turnover rate configurations
CREATE TABLE turnover_config (
id INTEGER NOT NULL,
category_id BIGINT, -- NULL means default/global threshold
vendor VARCHAR(100), -- NULL means applies to all vendors
calculation_period_days INTEGER NOT NULL DEFAULT 30,
target_rate DECIMAL(10,2) NOT NULL DEFAULT 1.0,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (id),
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
UNIQUE (category_id, vendor)
);
CREATE TRIGGER update_turnover_config_updated
BEFORE UPDATE ON turnover_config
FOR EACH ROW
EXECUTE FUNCTION update_updated_at_column();
-- Create table for sales seasonality factors
CREATE TABLE sales_seasonality (
month INTEGER NOT NULL,
seasonality_factor DECIMAL(5,3) DEFAULT 0,
last_updated TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (month),
CONSTRAINT month_range CHECK (month BETWEEN 1 AND 12),
CONSTRAINT seasonality_range CHECK (seasonality_factor BETWEEN -1.0 AND 1.0)
);
CREATE TRIGGER update_sales_seasonality_updated
BEFORE UPDATE ON sales_seasonality
FOR EACH ROW
EXECUTE FUNCTION update_updated_at_column();
-- Insert default global thresholds
INSERT INTO stock_thresholds (id, category_id, vendor, critical_days, reorder_days, overstock_days)
VALUES (1, NULL, NULL, 7, 14, 90)
ON CONFLICT (id) DO UPDATE SET
critical_days = EXCLUDED.critical_days,
reorder_days = EXCLUDED.reorder_days,
overstock_days = EXCLUDED.overstock_days;
INSERT INTO lead_time_thresholds (id, category_id, vendor, target_days, warning_days, critical_days)
VALUES (1, NULL, NULL, 14, 21, 30)
ON CONFLICT (id) DO UPDATE SET
target_days = EXCLUDED.target_days,
warning_days = EXCLUDED.warning_days,
critical_days = EXCLUDED.critical_days;
INSERT INTO sales_velocity_config (id, category_id, vendor, daily_window_days, weekly_window_days, monthly_window_days)
VALUES (1, NULL, NULL, 30, 7, 90)
ON CONFLICT (id) DO UPDATE SET
daily_window_days = EXCLUDED.daily_window_days,
weekly_window_days = EXCLUDED.weekly_window_days,
monthly_window_days = EXCLUDED.monthly_window_days;
INSERT INTO abc_classification_config (id, a_threshold, b_threshold, classification_period_days)
VALUES (1, 20.0, 50.0, 90)
ON CONFLICT (id) DO UPDATE SET
a_threshold = EXCLUDED.a_threshold,
b_threshold = EXCLUDED.b_threshold,
classification_period_days = EXCLUDED.classification_period_days;
INSERT INTO safety_stock_config (id, category_id, vendor, coverage_days, service_level)
VALUES (1, NULL, NULL, 14, 95.0)
ON CONFLICT (id) DO UPDATE SET
coverage_days = EXCLUDED.coverage_days,
service_level = EXCLUDED.service_level;
INSERT INTO turnover_config (id, category_id, vendor, calculation_period_days, target_rate)
VALUES (1, NULL, NULL, 30, 1.0)
ON CONFLICT (id) DO UPDATE SET
calculation_period_days = EXCLUDED.calculation_period_days,
target_rate = EXCLUDED.target_rate;
-- Insert default seasonality factors (neutral)
INSERT INTO sales_seasonality (month, seasonality_factor)
VALUES
(1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0),
(7, 0), (8, 0), (9, 0), (10, 0), (11, 0), (12, 0)
ON CONFLICT (month) DO UPDATE SET
last_updated = CURRENT_TIMESTAMP;
-- View to show thresholds with category names
CREATE OR REPLACE VIEW stock_thresholds_view AS
SELECT
st.*,
c.name as category_name,
CASE
WHEN st.category_id IS NULL AND st.vendor IS NULL THEN 'Global Default'
WHEN st.category_id IS NULL THEN 'Vendor: ' || st.vendor
WHEN st.vendor IS NULL THEN 'Category: ' || c.name
ELSE 'Category: ' || c.name || ' / Vendor: ' || st.vendor
END as threshold_scope
FROM
stock_thresholds st
LEFT JOIN
categories c ON st.category_id = c.cat_id
ORDER BY
CASE
WHEN st.category_id IS NULL AND st.vendor IS NULL THEN 1
WHEN st.category_id IS NULL THEN 2
WHEN st.vendor IS NULL THEN 3
ELSE 4
END,
c.name,
st.vendor;
-- History and status tables
CREATE TABLE IF NOT EXISTS calculate_history (
id BIGSERIAL PRIMARY KEY,
start_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
end_time TIMESTAMP WITH TIME ZONE NULL,
duration_seconds INTEGER,
duration_minutes DECIMAL(10,2) GENERATED ALWAYS AS (duration_seconds::decimal / 60.0) STORED,
total_products INTEGER DEFAULT 0,
total_orders INTEGER DEFAULT 0,
total_purchase_orders INTEGER DEFAULT 0,
processed_products INTEGER DEFAULT 0,
processed_orders INTEGER DEFAULT 0,
processed_purchase_orders INTEGER DEFAULT 0,
status calculation_status DEFAULT 'running',
error_message TEXT,
additional_info JSONB
);
CREATE TABLE IF NOT EXISTS calculate_status (
module_name module_name PRIMARY KEY,
last_calculation_timestamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS sync_status (
table_name VARCHAR(50) PRIMARY KEY,
last_sync_timestamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
last_sync_id BIGINT
);
CREATE TABLE IF NOT EXISTS import_history (
id BIGSERIAL PRIMARY KEY,
table_name VARCHAR(50) NOT NULL,
start_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
end_time TIMESTAMP WITH TIME ZONE NULL,
duration_seconds INTEGER,
duration_minutes DECIMAL(10,2) GENERATED ALWAYS AS (duration_seconds::decimal / 60.0) STORED,
records_added INTEGER DEFAULT 0,
records_updated INTEGER DEFAULT 0,
is_incremental BOOLEAN DEFAULT FALSE,
status calculation_status DEFAULT 'running',
error_message TEXT,
additional_info JSONB
);
-- Create all indexes after tables are fully created
CREATE INDEX IF NOT EXISTS idx_last_calc ON calculate_status(last_calculation_timestamp);
CREATE INDEX IF NOT EXISTS idx_last_sync ON sync_status(last_sync_timestamp);
CREATE INDEX IF NOT EXISTS idx_table_time ON import_history(table_name, start_time);

View File

@@ -0,0 +1,344 @@
-- Drop tables in reverse order of dependency
DROP TABLE IF EXISTS public.product_metrics CASCADE;
DROP TABLE IF EXISTS public.daily_product_snapshots CASCADE;
-- Table Definition: daily_product_snapshots
CREATE TABLE public.daily_product_snapshots (
snapshot_date DATE NOT NULL,
pid INT8 NOT NULL,
sku VARCHAR, -- Copied for convenience
-- Inventory Metrics (End of Day / Last Snapshot of Day)
eod_stock_quantity INT NOT NULL DEFAULT 0,
eod_stock_cost NUMERIC(14, 4) NOT NULL DEFAULT 0.00, -- Increased precision
eod_stock_retail NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
eod_stock_gross NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
stockout_flag BOOLEAN NOT NULL DEFAULT FALSE,
-- Sales Metrics (Aggregated for the snapshot_date)
units_sold INT NOT NULL DEFAULT 0,
units_returned INT NOT NULL DEFAULT 0,
gross_revenue NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
discounts NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
returns_revenue NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
net_revenue NUMERIC(14, 4) NOT NULL DEFAULT 0.00, -- gross_revenue - discounts
cogs NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
gross_regular_revenue NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
profit NUMERIC(14, 4) NOT NULL DEFAULT 0.00, -- net_revenue - cogs
-- Receiving Metrics (Aggregated for the snapshot_date)
units_received INT NOT NULL DEFAULT 0,
cost_received NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
calculation_timestamp TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (snapshot_date, pid) -- Composite primary key
-- CONSTRAINT fk_daily_snapshot_pid FOREIGN KEY (pid) REFERENCES public.products(pid) ON DELETE CASCADE ON UPDATE CASCADE -- FK Optional on snapshot table
);
-- Add Indexes for daily_product_snapshots
CREATE INDEX idx_daily_snapshot_pid_date ON public.daily_product_snapshots(pid, snapshot_date); -- Useful for product-specific time series
-- Table Definition: product_metrics
CREATE TABLE public.product_metrics (
pid INT8 PRIMARY KEY,
last_calculated TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
-- Product Info (Copied for convenience/performance)
sku VARCHAR,
title VARCHAR,
brand VARCHAR,
vendor VARCHAR,
image_url VARCHAR, -- (e.g., products.image_175)
is_visible BOOLEAN,
is_replenishable BOOLEAN,
-- Additional product fields
barcode VARCHAR,
harmonized_tariff_code VARCHAR,
vendor_reference VARCHAR,
notions_reference VARCHAR,
line VARCHAR,
subline VARCHAR,
artist VARCHAR,
moq INT,
rating NUMERIC(10, 2),
reviews INT,
weight NUMERIC(14, 4),
length NUMERIC(14, 4),
width NUMERIC(14, 4),
height NUMERIC(14, 4),
country_of_origin VARCHAR,
location VARCHAR,
baskets INT,
notifies INT,
preorder_count INT,
notions_inv_count INT,
-- Current Status (Refreshed Hourly)
current_price NUMERIC(10, 2),
current_regular_price NUMERIC(10, 2),
current_cost_price NUMERIC(10, 4), -- Increased precision for cost
current_landing_cost_price NUMERIC(10, 4), -- Increased precision for cost
current_stock INT NOT NULL DEFAULT 0,
current_stock_cost NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
current_stock_retail NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
current_stock_gross NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
on_order_qty INT NOT NULL DEFAULT 0,
on_order_cost NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
on_order_retail NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
earliest_expected_date DATE,
-- total_received_lifetime INT NOT NULL DEFAULT 0, -- Can calc if needed
-- Historical Dates (Calculated Once/Periodically)
date_created DATE,
date_first_received DATE,
date_last_received DATE,
date_first_sold DATE,
date_last_sold DATE,
age_days INT, -- Calculated based on LEAST(date_created, date_first_sold)
-- Rolling Period Metrics (Refreshed Hourly from daily_product_snapshots)
sales_7d INT, revenue_7d NUMERIC(14, 4),
sales_14d INT, revenue_14d NUMERIC(14, 4),
sales_30d INT, revenue_30d NUMERIC(14, 4),
cogs_30d NUMERIC(14, 4), profit_30d NUMERIC(14, 4),
returns_units_30d INT, returns_revenue_30d NUMERIC(14, 4),
discounts_30d NUMERIC(14, 4),
gross_revenue_30d NUMERIC(14, 4), gross_regular_revenue_30d NUMERIC(14, 4),
stockout_days_30d INT,
sales_365d INT, revenue_365d NUMERIC(14, 4),
avg_stock_units_30d NUMERIC(10, 2), avg_stock_cost_30d NUMERIC(14, 4),
avg_stock_retail_30d NUMERIC(14, 4), avg_stock_gross_30d NUMERIC(14, 4),
received_qty_30d INT, received_cost_30d NUMERIC(14, 4),
-- Lifetime Metrics (Recalculated Hourly/Daily from daily_product_snapshots)
lifetime_sales INT,
lifetime_revenue NUMERIC(16, 4),
lifetime_revenue_quality VARCHAR(10), -- 'exact', 'partial', 'estimated'
-- First Period Metrics (Calculated Once/Periodically from daily_product_snapshots)
first_7_days_sales INT, first_7_days_revenue NUMERIC(14, 4),
first_30_days_sales INT, first_30_days_revenue NUMERIC(14, 4),
first_60_days_sales INT, first_60_days_revenue NUMERIC(14, 4),
first_90_days_sales INT, first_90_days_revenue NUMERIC(14, 4),
-- Calculated KPIs (Refreshed Hourly based on rolling metrics)
asp_30d NUMERIC(10, 2), -- revenue_30d / sales_30d
acp_30d NUMERIC(10, 4), -- cogs_30d / sales_30d
avg_ros_30d NUMERIC(10, 4), -- profit_30d / sales_30d
avg_sales_per_day_30d NUMERIC(10, 2), -- sales_30d / 30.0
avg_sales_per_month_30d NUMERIC(10, 2), -- sales_30d (assuming 30d = 1 month for this metric)
margin_30d NUMERIC(8, 2), -- (profit_30d / revenue_30d) * 100
markup_30d NUMERIC(8, 2), -- (profit_30d / cogs_30d) * 100
gmroi_30d NUMERIC(10, 2), -- profit_30d / avg_stock_cost_30d
stockturn_30d NUMERIC(10, 2), -- sales_30d / avg_stock_units_30d
return_rate_30d NUMERIC(8, 2), -- returns_units_30d / (sales_30d + returns_units_30d) * 100
discount_rate_30d NUMERIC(8, 2), -- discounts_30d / gross_revenue_30d * 100
stockout_rate_30d NUMERIC(8, 2), -- stockout_days_30d / 30.0 * 100
markdown_30d NUMERIC(14, 4), -- gross_regular_revenue_30d - gross_revenue_30d
markdown_rate_30d NUMERIC(8, 2), -- markdown_30d / gross_regular_revenue_30d * 100
sell_through_30d NUMERIC(8, 2), -- sales_30d / (current_stock + sales_30d) * 100
avg_lead_time_days INT, -- Calculated Periodically from purchase_orders
-- Forecasting & Replenishment (Refreshed Hourly)
abc_class CHAR(1), -- Updated Periodically (e.g., Weekly)
sales_velocity_daily NUMERIC(10, 4), -- sales_30d / (30.0 - stockout_days_30d)
config_lead_time INT, -- From settings tables
config_days_of_stock INT, -- From settings tables
config_safety_stock INT, -- From settings_product
planning_period_days INT, -- config_lead_time + config_days_of_stock
lead_time_forecast_units NUMERIC(10, 2), -- sales_velocity_daily * config_lead_time
days_of_stock_forecast_units NUMERIC(10, 2), -- sales_velocity_daily * config_days_of_stock
planning_period_forecast_units NUMERIC(10, 2), -- lead_time_forecast_units + days_of_stock_forecast_units
lead_time_closing_stock NUMERIC(10, 2), -- current_stock + on_order_qty - lead_time_forecast_units
days_of_stock_closing_stock NUMERIC(10, 2), -- lead_time_closing_stock - days_of_stock_forecast_units
replenishment_needed_raw NUMERIC(10, 2), -- planning_period_forecast_units + config_safety_stock - current_stock - on_order_qty
replenishment_units INT, -- CEILING(GREATEST(0, replenishment_needed_raw))
replenishment_cost NUMERIC(14, 4), -- replenishment_units * COALESCE(current_landing_cost_price, current_cost_price)
replenishment_retail NUMERIC(14, 4), -- replenishment_units * current_price
replenishment_profit NUMERIC(14, 4), -- replenishment_units * (current_price - COALESCE(current_landing_cost_price, current_cost_price))
to_order_units INT, -- Apply MOQ/UOM logic to replenishment_units
forecast_lost_sales_units NUMERIC(10, 2), -- GREATEST(0, -lead_time_closing_stock)
forecast_lost_revenue NUMERIC(14, 4), -- forecast_lost_sales_units * current_price
stock_cover_in_days NUMERIC(10, 1), -- current_stock / sales_velocity_daily
po_cover_in_days NUMERIC(10, 1), -- on_order_qty / sales_velocity_daily
sells_out_in_days NUMERIC(10, 1), -- (current_stock + on_order_qty) / sales_velocity_daily
replenish_date DATE, -- Calc based on when stock hits safety stock minus lead time
overstocked_units INT, -- GREATEST(0, current_stock - config_safety_stock - planning_period_forecast_units)
overstocked_cost NUMERIC(14, 4), -- overstocked_units * COALESCE(current_landing_cost_price, current_cost_price)
overstocked_retail NUMERIC(14, 4), -- overstocked_units * current_price
is_old_stock BOOLEAN, -- Based on age, last sold, last received, on_order status
-- Yesterday's Metrics (Refreshed Hourly from daily_product_snapshots)
yesterday_sales INT,
-- Product Status (Calculated from metrics)
status VARCHAR, -- Stores status values like: Critical, Reorder Soon, Healthy, Overstock, At Risk, New
-- Growth Metrics (P3)
sales_growth_30d_vs_prev NUMERIC(10, 2), -- % growth current 30d vs prev 30d
revenue_growth_30d_vs_prev NUMERIC(10, 2), -- % growth current 30d vs prev 30d
sales_growth_yoy NUMERIC(10, 2), -- Year-over-year sales growth %
revenue_growth_yoy NUMERIC(10, 2), -- Year-over-year revenue growth %
-- Demand Variability Metrics (P3)
sales_variance_30d NUMERIC(10, 2), -- Variance of daily sales
sales_std_dev_30d NUMERIC(10, 2), -- Standard deviation of daily sales
sales_cv_30d NUMERIC(10, 2), -- Coefficient of variation
demand_pattern VARCHAR(20), -- 'stable', 'variable', 'sporadic', 'lumpy'
-- Service Level & Fill Rate (P5)
fill_rate_30d NUMERIC(8, 2), -- % of demand fulfilled from stock
stockout_incidents_30d INT, -- Days with stockouts
service_level_30d NUMERIC(8, 2), -- % of days without stockouts
lost_sales_incidents_30d INT, -- Days with potential lost sales
-- Seasonality (P5)
seasonality_index NUMERIC(10, 2), -- Current vs average (100 = average)
seasonal_pattern VARCHAR(20), -- 'none', 'weekly', 'monthly', 'quarterly', 'yearly'
peak_season VARCHAR(20), -- e.g., 'Q4', 'summer', 'holiday'
CONSTRAINT fk_product_metrics_pid FOREIGN KEY (pid) REFERENCES public.products(pid) ON DELETE CASCADE ON UPDATE CASCADE
);
-- Add Indexes for product_metrics (adjust based on common filtering/sorting in frontend)
CREATE INDEX idx_product_metrics_brand ON public.product_metrics(brand);
CREATE INDEX idx_product_metrics_vendor ON public.product_metrics(vendor);
CREATE INDEX idx_product_metrics_sku ON public.product_metrics(sku);
CREATE INDEX idx_product_metrics_abc_class ON public.product_metrics(abc_class);
CREATE INDEX idx_product_metrics_revenue_30d ON public.product_metrics(revenue_30d DESC NULLS LAST); -- Example sorting index
CREATE INDEX idx_product_metrics_sales_30d ON public.product_metrics(sales_30d DESC NULLS LAST); -- Example sorting index
CREATE INDEX idx_product_metrics_current_stock ON public.product_metrics(current_stock);
CREATE INDEX idx_product_metrics_sells_out_in_days ON public.product_metrics(sells_out_in_days ASC NULLS LAST); -- Example sorting index
CREATE INDEX idx_product_metrics_status ON public.product_metrics(status); -- Index for status filtering
-- Add new vendor, category, and brand metrics tables
-- Drop tables in reverse order if they exist
DROP TABLE IF EXISTS public.brand_metrics CASCADE;
DROP TABLE IF EXISTS public.vendor_metrics CASCADE;
DROP TABLE IF EXISTS public.category_metrics CASCADE;
-- ========= Category Metrics =========
CREATE TABLE public.category_metrics (
category_id INT8 PRIMARY KEY, -- Foreign key to categories.cat_id
category_name VARCHAR, -- Denormalized for convenience
category_type INT2, -- Denormalized for convenience
parent_id INT8, -- Denormalized for convenience
last_calculated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- ROLLED-UP METRICS (includes this category + all descendants)
-- Counts & Basic Info
product_count INT NOT NULL DEFAULT 0, -- Total products linked
active_product_count INT NOT NULL DEFAULT 0, -- Visible products linked
replenishable_product_count INT NOT NULL DEFAULT 0,-- Replenishable products linked
-- Current Stock Value (approximated using current product costs/prices)
current_stock_units INT NOT NULL DEFAULT 0,
current_stock_cost NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
current_stock_retail NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
-- Rolling Period Aggregates (Summed from product_metrics)
sales_7d INT NOT NULL DEFAULT 0, revenue_7d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
sales_30d INT NOT NULL DEFAULT 0, revenue_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
profit_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00, cogs_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
sales_365d INT NOT NULL DEFAULT 0, revenue_365d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
lifetime_sales INT NOT NULL DEFAULT 0, lifetime_revenue NUMERIC(18, 4) NOT NULL DEFAULT 0.00,
-- DIRECT METRICS (only products directly in this category)
direct_product_count INT NOT NULL DEFAULT 0, -- Products directly in this category
direct_active_product_count INT NOT NULL DEFAULT 0, -- Visible products directly in this category
direct_replenishable_product_count INT NOT NULL DEFAULT 0,-- Replenishable products directly in this category
-- Direct Current Stock Value
direct_current_stock_units INT NOT NULL DEFAULT 0,
direct_stock_cost NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
direct_stock_retail NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
-- Direct Rolling Period Aggregates
direct_sales_7d INT NOT NULL DEFAULT 0, direct_revenue_7d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
direct_sales_30d INT NOT NULL DEFAULT 0, direct_revenue_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
direct_profit_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00, direct_cogs_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
direct_sales_365d INT NOT NULL DEFAULT 0, direct_revenue_365d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
direct_lifetime_sales INT NOT NULL DEFAULT 0, direct_lifetime_revenue NUMERIC(18, 4) NOT NULL DEFAULT 0.00,
-- Calculated KPIs (Based on 30d aggregates) - Apply to rolled-up metrics
avg_margin_30d NUMERIC(7, 3), -- (profit / revenue) * 100
stock_turn_30d NUMERIC(10, 3), -- sales_units / avg_stock_units (Needs avg stock calc)
sales_growth_30d_vs_prev NUMERIC(10, 2), -- % growth in sales units
revenue_growth_30d_vs_prev NUMERIC(10, 2), -- % growth in revenue
CONSTRAINT fk_category_metrics_cat_id FOREIGN KEY (category_id) REFERENCES public.categories(cat_id) ON DELETE CASCADE ON UPDATE CASCADE
);
CREATE INDEX idx_category_metrics_name ON public.category_metrics(category_name);
CREATE INDEX idx_category_metrics_type ON public.category_metrics(category_type);
-- ========= Vendor Metrics =========
CREATE TABLE public.vendor_metrics (
vendor_name VARCHAR PRIMARY KEY, -- Matches products.vendor
last_calculated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Counts & Basic Info
product_count INT NOT NULL DEFAULT 0, -- Total products from this vendor
active_product_count INT NOT NULL DEFAULT 0, -- Visible products
replenishable_product_count INT NOT NULL DEFAULT 0,-- Replenishable products
-- Current Stock Value (approximated)
current_stock_units INT NOT NULL DEFAULT 0,
current_stock_cost NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
current_stock_retail NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
-- On Order Value
on_order_units INT NOT NULL DEFAULT 0,
on_order_cost NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
-- PO Performance (Simplified)
po_count_365d INT NOT NULL DEFAULT 0, -- Count of distinct POs created in last year
avg_lead_time_days INT, -- Calculated from received POs historically
-- Rolling Period Aggregates (Summed from product_metrics)
sales_7d INT NOT NULL DEFAULT 0, revenue_7d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
sales_30d INT NOT NULL DEFAULT 0, revenue_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
profit_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00, cogs_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
sales_365d INT NOT NULL DEFAULT 0, revenue_365d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
lifetime_sales INT NOT NULL DEFAULT 0, lifetime_revenue NUMERIC(18, 4) NOT NULL DEFAULT 0.00,
-- Calculated KPIs (Based on 30d aggregates)
avg_margin_30d NUMERIC(14, 4), -- (profit / revenue) * 100
sales_growth_30d_vs_prev NUMERIC(10, 2), -- % growth in sales units
revenue_growth_30d_vs_prev NUMERIC(10, 2), -- % growth in revenue
-- Add more KPIs if needed (e.g., avg product value, sell-through rate for vendor)
);
CREATE INDEX idx_vendor_metrics_active_count ON public.vendor_metrics(active_product_count);
-- ========= Brand Metrics =========
CREATE TABLE public.brand_metrics (
brand_name VARCHAR PRIMARY KEY, -- Matches products.brand (use 'Unbranded' for NULLs)
last_calculated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
-- Counts & Basic Info
product_count INT NOT NULL DEFAULT 0, -- Total products of this brand
active_product_count INT NOT NULL DEFAULT 0, -- Visible products
replenishable_product_count INT NOT NULL DEFAULT 0,-- Replenishable products
-- Current Stock Value (approximated)
current_stock_units INT NOT NULL DEFAULT 0,
current_stock_cost NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
current_stock_retail NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
-- Rolling Period Aggregates (Summed from product_metrics)
sales_7d INT NOT NULL DEFAULT 0, revenue_7d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
sales_30d INT NOT NULL DEFAULT 0, revenue_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
profit_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00, cogs_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
sales_365d INT NOT NULL DEFAULT 0, revenue_365d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
lifetime_sales INT NOT NULL DEFAULT 0, lifetime_revenue NUMERIC(18, 4) NOT NULL DEFAULT 0.00,
-- Calculated KPIs (Based on 30d aggregates)
avg_margin_30d NUMERIC(7, 3), -- (profit / revenue) * 100
sales_growth_30d_vs_prev NUMERIC(10, 2), -- % growth in sales units
revenue_growth_30d_vs_prev NUMERIC(10, 2), -- % growth in revenue
-- Add more KPIs if needed (e.g., avg product value, sell-through rate for brand)
);
CREATE INDEX idx_brand_metrics_active_count ON public.brand_metrics(active_product_count);

View File

@@ -4,7 +4,12 @@ SET session_replication_role = 'replica'; -- Disable foreign key checks tempora
-- Create function for updating timestamps
CREATE OR REPLACE FUNCTION update_updated_column() RETURNS TRIGGER AS $func$
BEGIN
NEW.updated = CURRENT_TIMESTAMP;
-- Check which table is being updated and use the appropriate column
IF TG_TABLE_NAME = 'categories' THEN
NEW.updated_at = CURRENT_TIMESTAMP;
ELSIF TG_TABLE_NAME IN ('products', 'orders', 'purchase_orders', 'receivings') THEN
NEW.updated = CURRENT_TIMESTAMP;
END IF;
RETURN NEW;
END;
$func$ language plpgsql;
@@ -12,48 +17,48 @@ $func$ language plpgsql;
-- Create tables
CREATE TABLE products (
pid BIGINT NOT NULL,
title VARCHAR(255) NOT NULL,
title TEXT NOT NULL,
description TEXT,
SKU VARCHAR(50) NOT NULL,
sku TEXT NOT NULL,
created_at TIMESTAMP WITH TIME ZONE,
first_received TIMESTAMP WITH TIME ZONE,
stock_quantity INTEGER DEFAULT 0,
preorder_count INTEGER DEFAULT 0,
notions_inv_count INTEGER DEFAULT 0,
price DECIMAL(10, 3) NOT NULL,
regular_price DECIMAL(10, 3) NOT NULL,
cost_price DECIMAL(10, 3),
landing_cost_price DECIMAL(10, 3),
barcode VARCHAR(50),
harmonized_tariff_code VARCHAR(20),
price NUMERIC(14, 4) NOT NULL,
regular_price NUMERIC(14, 4) NOT NULL,
cost_price NUMERIC(14, 4),
landing_cost_price NUMERIC(14, 4),
barcode TEXT,
harmonized_tariff_code TEXT,
updated_at TIMESTAMP WITH TIME ZONE,
visible BOOLEAN DEFAULT true,
managing_stock BOOLEAN DEFAULT true,
replenishable BOOLEAN DEFAULT true,
vendor VARCHAR(100),
vendor_reference VARCHAR(100),
notions_reference VARCHAR(100),
permalink VARCHAR(255),
vendor TEXT,
vendor_reference TEXT,
notions_reference TEXT,
permalink TEXT,
categories TEXT,
image VARCHAR(255),
image_175 VARCHAR(255),
image_full VARCHAR(255),
brand VARCHAR(100),
line VARCHAR(100),
subline VARCHAR(100),
artist VARCHAR(100),
image TEXT,
image_175 TEXT,
image_full TEXT,
brand TEXT,
line TEXT,
subline TEXT,
artist TEXT,
options TEXT,
tags TEXT,
moq INTEGER DEFAULT 1,
uom INTEGER DEFAULT 1,
rating DECIMAL(10,2) DEFAULT 0.00,
rating NUMERIC(14, 4) DEFAULT 0.00,
reviews INTEGER DEFAULT 0,
weight DECIMAL(10,3),
length DECIMAL(10,3),
width DECIMAL(10,3),
height DECIMAL(10,3),
country_of_origin VARCHAR(5),
location VARCHAR(50),
weight NUMERIC(14, 4),
length NUMERIC(14, 4),
width NUMERIC(14, 4),
height NUMERIC(14, 4),
country_of_origin TEXT,
location TEXT,
total_sold INTEGER DEFAULT 0,
baskets INTEGER DEFAULT 0,
notifies INTEGER DEFAULT 0,
@@ -69,25 +74,25 @@ CREATE TRIGGER update_products_updated
EXECUTE FUNCTION update_updated_column();
-- Create indexes for products table
CREATE INDEX idx_products_sku ON products(SKU);
CREATE INDEX idx_products_sku ON products(sku);
CREATE INDEX idx_products_vendor ON products(vendor);
CREATE INDEX idx_products_brand ON products(brand);
CREATE INDEX idx_products_location ON products(location);
CREATE INDEX idx_products_total_sold ON products(total_sold);
CREATE INDEX idx_products_date_last_sold ON products(date_last_sold);
CREATE INDEX idx_products_visible ON products(visible);
CREATE INDEX idx_products_replenishable ON products(replenishable);
CREATE INDEX idx_products_updated ON products(updated);
-- Create categories table with hierarchy support
CREATE TABLE categories (
cat_id BIGINT PRIMARY KEY,
name VARCHAR(100) NOT NULL,
name TEXT NOT NULL,
type SMALLINT NOT NULL,
parent_id BIGINT,
description TEXT,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
status VARCHAR(20) DEFAULT 'active',
FOREIGN KEY (parent_id) REFERENCES categories(cat_id)
updated TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
status TEXT DEFAULT 'active',
FOREIGN KEY (parent_id) REFERENCES categories(cat_id) ON DELETE SET NULL
);
-- Create trigger for categories
@@ -101,6 +106,7 @@ COMMENT ON COLUMN categories.type IS '10=section, 11=category, 12=subcategory, 1
CREATE INDEX idx_categories_parent ON categories(parent_id);
CREATE INDEX idx_categories_type ON categories(type);
CREATE INDEX idx_categories_status ON categories(status);
CREATE INDEX idx_categories_name ON categories(name);
CREATE INDEX idx_categories_name_type ON categories(name, type);
-- Create product_categories junction table
@@ -113,28 +119,28 @@ CREATE TABLE product_categories (
);
CREATE INDEX idx_product_categories_category ON product_categories(cat_id);
CREATE INDEX idx_product_categories_product ON product_categories(pid);
-- Create orders table with its indexes
CREATE TABLE orders (
id BIGSERIAL PRIMARY KEY,
order_number VARCHAR(50) NOT NULL,
order_number TEXT NOT NULL,
pid BIGINT NOT NULL,
SKU VARCHAR(50) NOT NULL,
date DATE NOT NULL,
price DECIMAL(10,3) NOT NULL,
sku TEXT NOT NULL,
date TIMESTAMP WITH TIME ZONE NOT NULL,
price NUMERIC(14, 4) NOT NULL,
quantity INTEGER NOT NULL,
discount DECIMAL(10,3) DEFAULT 0.000,
tax DECIMAL(10,3) DEFAULT 0.000,
discount NUMERIC(14, 4) DEFAULT 0.0000,
tax NUMERIC(14, 4) DEFAULT 0.0000,
tax_included BOOLEAN DEFAULT false,
shipping DECIMAL(10,3) DEFAULT 0.000,
costeach DECIMAL(10,3) DEFAULT 0.000,
customer VARCHAR(50) NOT NULL,
customer_name VARCHAR(100),
status VARCHAR(20) DEFAULT 'pending',
shipping NUMERIC(14, 4) DEFAULT 0.0000,
costeach NUMERIC(14, 4) DEFAULT 0.0000,
customer TEXT NOT NULL,
customer_name TEXT,
status TEXT DEFAULT 'pending',
canceled BOOLEAN DEFAULT false,
updated TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
UNIQUE (order_number, pid)
UNIQUE (order_number, pid),
FOREIGN KEY (pid) REFERENCES products(pid) ON DELETE RESTRICT
);
-- Create trigger for orders
@@ -145,36 +151,34 @@ CREATE TRIGGER update_orders_updated
CREATE INDEX idx_orders_number ON orders(order_number);
CREATE INDEX idx_orders_pid ON orders(pid);
CREATE INDEX idx_orders_sku ON orders(sku);
CREATE INDEX idx_orders_customer ON orders(customer);
CREATE INDEX idx_orders_date ON orders(date);
CREATE INDEX idx_orders_status ON orders(status);
CREATE INDEX idx_orders_metrics ON orders(pid, date, canceled);
CREATE INDEX idx_orders_pid_date ON orders(pid, date);
CREATE INDEX idx_orders_updated ON orders(updated);
-- Create purchase_orders table with its indexes
-- This table now focuses solely on purchase order intent, not receivings
CREATE TABLE purchase_orders (
id BIGSERIAL PRIMARY KEY,
po_id VARCHAR(50) NOT NULL,
vendor VARCHAR(100) NOT NULL,
date DATE NOT NULL,
po_id TEXT NOT NULL,
vendor TEXT NOT NULL,
date TIMESTAMP WITH TIME ZONE NOT NULL,
expected_date DATE,
pid BIGINT NOT NULL,
sku VARCHAR(50) NOT NULL,
name VARCHAR(100) NOT NULL,
cost_price DECIMAL(10, 3) NOT NULL,
po_cost_price DECIMAL(10, 3) NOT NULL,
status SMALLINT DEFAULT 1,
receiving_status SMALLINT DEFAULT 1,
sku TEXT NOT NULL,
name TEXT NOT NULL,
po_cost_price NUMERIC(14, 4) NOT NULL,
status TEXT DEFAULT 'created',
notes TEXT,
long_note TEXT,
ordered INTEGER NOT NULL,
received INTEGER DEFAULT 0,
received_date DATE,
last_received_date DATE,
received_by VARCHAR(100),
receiving_history JSONB,
supplier_id INTEGER,
date_created TIMESTAMP WITH TIME ZONE,
date_ordered TIMESTAMP WITH TIME ZONE,
updated TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (pid) REFERENCES products(pid),
FOREIGN KEY (pid) REFERENCES products(pid) ON DELETE CASCADE,
UNIQUE (po_id, pid)
);
@@ -185,22 +189,116 @@ CREATE TRIGGER update_purchase_orders_updated
EXECUTE FUNCTION update_updated_column();
COMMENT ON COLUMN purchase_orders.name IS 'Product name from products.description';
COMMENT ON COLUMN purchase_orders.po_cost_price IS 'Original cost from PO, before receiving adjustments';
COMMENT ON COLUMN purchase_orders.status IS '0=canceled,1=created,10=electronically_ready_send,11=ordered,12=preordered,13=electronically_sent,15=receiving_started,50=done';
COMMENT ON COLUMN purchase_orders.receiving_status IS '0=canceled,1=created,30=partial_received,40=full_received,50=paid';
COMMENT ON COLUMN purchase_orders.receiving_history IS 'Array of receiving records with qty, date, cost, receiving_id, and alt_po flag';
COMMENT ON COLUMN purchase_orders.po_cost_price IS 'Original cost from PO';
COMMENT ON COLUMN purchase_orders.status IS 'canceled, created, electronically_ready_send, ordered, preordered, electronically_sent, receiving_started, done';
CREATE INDEX idx_po_id ON purchase_orders(po_id);
CREATE INDEX idx_po_sku ON purchase_orders(sku);
CREATE INDEX idx_po_vendor ON purchase_orders(vendor);
CREATE INDEX idx_po_status ON purchase_orders(status);
CREATE INDEX idx_po_receiving_status ON purchase_orders(receiving_status);
CREATE INDEX idx_po_metrics ON purchase_orders(pid, date, status, ordered, received);
CREATE INDEX idx_po_metrics_receiving ON purchase_orders(pid, date, receiving_status, received_date);
CREATE INDEX idx_po_product_date ON purchase_orders(pid, date);
CREATE INDEX idx_po_product_status ON purchase_orders(pid, status);
CREATE INDEX idx_po_expected_date ON purchase_orders(expected_date);
CREATE INDEX idx_po_pid_status ON purchase_orders(pid, status);
CREATE INDEX idx_po_pid_date ON purchase_orders(pid, date);
CREATE INDEX idx_po_updated ON purchase_orders(updated);
CREATE INDEX idx_po_supplier_id ON purchase_orders(supplier_id);
-- Create receivings table to track actual receipt of goods
CREATE TABLE receivings (
id BIGSERIAL PRIMARY KEY,
receiving_id TEXT NOT NULL,
pid BIGINT NOT NULL,
sku TEXT NOT NULL,
name TEXT NOT NULL,
vendor TEXT,
qty_each INTEGER NOT NULL,
qty_each_orig INTEGER,
cost_each NUMERIC(14, 5) NOT NULL,
cost_each_orig NUMERIC(14, 5),
received_by INTEGER,
received_by_name TEXT,
received_date TIMESTAMP WITH TIME ZONE NOT NULL,
receiving_created_date TIMESTAMP WITH TIME ZONE,
supplier_id INTEGER,
status TEXT DEFAULT 'created',
updated TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (pid) REFERENCES products(pid) ON DELETE CASCADE,
UNIQUE (receiving_id, pid)
);
-- Create trigger for receivings
CREATE TRIGGER update_receivings_updated
BEFORE UPDATE ON receivings
FOR EACH ROW
EXECUTE FUNCTION update_updated_column();
COMMENT ON COLUMN receivings.status IS 'canceled, created, partial_received, full_received, paid';
COMMENT ON COLUMN receivings.qty_each_orig IS 'Original quantity from the source system';
COMMENT ON COLUMN receivings.cost_each_orig IS 'Original cost from the source system';
COMMENT ON COLUMN receivings.vendor IS 'Vendor name, same as in purchase_orders';
CREATE INDEX idx_receivings_id ON receivings(receiving_id);
CREATE INDEX idx_receivings_pid ON receivings(pid);
CREATE INDEX idx_receivings_sku ON receivings(sku);
CREATE INDEX idx_receivings_status ON receivings(status);
CREATE INDEX idx_receivings_received_date ON receivings(received_date);
CREATE INDEX idx_receivings_supplier_id ON receivings(supplier_id);
CREATE INDEX idx_receivings_vendor ON receivings(vendor);
CREATE INDEX idx_receivings_updated ON receivings(updated);
SET session_replication_role = 'origin'; -- Re-enable foreign key checks
-- Create views for common calculations
-- product_sales_trends view moved to metrics-schema.sql
-- product_sales_trends view moved to metrics-schema.sql
-- -- Historical data tables imported from production
-- CREATE TABLE imported_product_current_prices (
-- price_id BIGSERIAL PRIMARY KEY,
-- pid BIGINT NOT NULL,
-- qty_buy SMALLINT NOT NULL,
-- is_min_qty_buy BOOLEAN NOT NULL,
-- price_each NUMERIC(10,3) NOT NULL,
-- qty_limit SMALLINT NOT NULL,
-- no_promo BOOLEAN NOT NULL,
-- checkout_offer BOOLEAN NOT NULL,
-- active BOOLEAN NOT NULL,
-- date_active TIMESTAMP WITH TIME ZONE,
-- date_deactive TIMESTAMP WITH TIME ZONE,
-- updated TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
-- );
-- CREATE INDEX idx_imported_product_current_prices_pid ON imported_product_current_prices(pid, active, qty_buy);
-- CREATE INDEX idx_imported_product_current_prices_checkout ON imported_product_current_prices(checkout_offer, active);
-- CREATE INDEX idx_imported_product_current_prices_deactive ON imported_product_current_prices(date_deactive, active);
-- CREATE INDEX idx_imported_product_current_prices_active ON imported_product_current_prices(date_active, active);
-- CREATE TABLE imported_daily_inventory (
-- date DATE NOT NULL,
-- pid BIGINT NOT NULL,
-- amountsold SMALLINT NOT NULL DEFAULT 0,
-- times_sold SMALLINT NOT NULL DEFAULT 0,
-- qtyreceived SMALLINT NOT NULL DEFAULT 0,
-- price NUMERIC(7,2) NOT NULL DEFAULT 0,
-- costeach NUMERIC(7,2) NOT NULL DEFAULT 0,
-- stamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
-- updated TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
-- PRIMARY KEY (date, pid)
-- );
-- CREATE INDEX idx_imported_daily_inventory_pid ON imported_daily_inventory(pid);
-- CREATE TABLE imported_product_stat_history (
-- pid BIGINT NOT NULL,
-- date DATE NOT NULL,
-- score NUMERIC(10,2) NOT NULL,
-- score2 NUMERIC(10,2) NOT NULL,
-- qty_in_baskets SMALLINT NOT NULL,
-- qty_sold SMALLINT NOT NULL,
-- notifies_set SMALLINT NOT NULL,
-- visibility_score NUMERIC(10,2) NOT NULL,
-- health_score VARCHAR(5) NOT NULL,
-- sold_view_score NUMERIC(6,3) NOT NULL,
-- updated TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
-- PRIMARY KEY (pid, date)
-- );
-- CREATE INDEX idx_imported_product_stat_history_date ON imported_product_stat_history(date);

View File

@@ -23,6 +23,56 @@ CREATE TABLE IF NOT EXISTS templates (
UNIQUE(company, product_type)
);
-- AI Prompts table for storing validation prompts
CREATE TABLE IF NOT EXISTS ai_prompts (
id SERIAL PRIMARY KEY,
prompt_text TEXT NOT NULL,
prompt_type TEXT NOT NULL CHECK (prompt_type IN ('general', 'company_specific', 'system')),
company TEXT,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT unique_company_prompt UNIQUE (company),
CONSTRAINT company_required_for_specific CHECK (
(prompt_type = 'general' AND company IS NULL) OR
(prompt_type = 'system' AND company IS NULL) OR
(prompt_type = 'company_specific' AND company IS NOT NULL)
)
);
-- Create a unique partial index to ensure only one general prompt
CREATE UNIQUE INDEX IF NOT EXISTS idx_unique_general_prompt
ON ai_prompts (prompt_type)
WHERE prompt_type = 'general';
-- Create a unique partial index to ensure only one system prompt
CREATE UNIQUE INDEX IF NOT EXISTS idx_unique_system_prompt
ON ai_prompts (prompt_type)
WHERE prompt_type = 'system';
-- Reusable Images table for storing persistent images
CREATE TABLE IF NOT EXISTS reusable_images (
id SERIAL PRIMARY KEY,
name TEXT NOT NULL,
filename TEXT NOT NULL,
file_path TEXT NOT NULL,
image_url TEXT NOT NULL,
is_global BOOLEAN NOT NULL DEFAULT false,
company TEXT,
mime_type TEXT,
file_size INTEGER,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT company_required_for_non_global CHECK (
(is_global = true AND company IS NULL) OR
(is_global = false AND company IS NOT NULL)
)
);
-- Create index on company for efficient querying
CREATE INDEX IF NOT EXISTS idx_reusable_images_company ON reusable_images(company);
-- Create index on is_global for efficient querying
CREATE INDEX IF NOT EXISTS idx_reusable_images_is_global ON reusable_images(is_global);
-- AI Validation Performance Tracking
CREATE TABLE IF NOT EXISTS ai_validation_performance (
id SERIAL PRIMARY KEY,
@@ -50,4 +100,16 @@ $$ language 'plpgsql';
CREATE TRIGGER update_templates_updated_at
BEFORE UPDATE ON templates
FOR EACH ROW
EXECUTE FUNCTION update_updated_at_column();
-- Trigger to automatically update the updated_at column for ai_prompts
CREATE TRIGGER update_ai_prompts_updated_at
BEFORE UPDATE ON ai_prompts
FOR EACH ROW
EXECUTE FUNCTION update_updated_at_column();
-- Trigger to automatically update the updated_at column for reusable_images
CREATE TRIGGER update_reusable_images_updated_at
BEFORE UPDATE ON reusable_images
FOR EACH ROW
EXECUTE FUNCTION update_updated_at_column();

View File

@@ -0,0 +1,426 @@
const path = require('path');
const fs = require('fs');
const progress = require('../scripts/metrics-new/utils/progress'); // Assuming progress utils are here
const { getConnection, closePool } = require('../scripts/metrics-new/utils/db'); // Assuming db utils are here
const os = require('os'); // For detecting number of CPU cores
// --- Configuration ---
const BATCH_SIZE_DAYS = 1; // Process 1 day per database function call
const SQL_FUNCTION_FILE = path.resolve(__dirname, 'backfill_historical_snapshots.sql'); // Correct path
const LOG_PROGRESS_INTERVAL_MS = 5000; // Update console progress roughly every 5 seconds
const HISTORY_TYPE = 'backfill_snapshots'; // Identifier for history table
const MAX_WORKERS = Math.max(1, Math.floor(os.cpus().length / 2)); // Use half of available CPU cores
const USE_PARALLEL = false; // Set to true to enable parallel processing
const PG_STATEMENT_TIMEOUT_MS = 1800000; // 30 minutes max per query
// --- Cancellation Handling ---
let isCancelled = false;
let runningQueryPromise = null; // To potentially track the active query
function requestCancellation() {
if (!isCancelled) {
isCancelled = true;
console.warn('\nCancellation requested. Finishing current batch then stopping...');
// Note: We are NOT forcefully cancelling the backend query anymore.
}
}
process.on('SIGINT', requestCancellation); // Handle Ctrl+C
process.on('SIGTERM', requestCancellation); // Handle termination signals
// --- Main Backfill Function ---
async function backfillSnapshots(cmdStartDate, cmdEndDate, cmdStartBatch = 1) {
let connection;
const overallStartTime = Date.now();
let calculateHistoryId = null;
let processedDaysTotal = 0; // Track total days processed across all batches executed in this run
let currentBatchNum = cmdStartBatch > 0 ? cmdStartBatch : 1;
let totalBatches = 0; // Initialize totalBatches
let totalDays = 0; // Initialize totalDays
console.log(`Starting snapshot backfill process...`);
console.log(`SQL Function definition file: ${SQL_FUNCTION_FILE}`);
if (!fs.existsSync(SQL_FUNCTION_FILE)) {
console.error(`FATAL: SQL file not found at ${SQL_FUNCTION_FILE}`);
process.exit(1); // Exit early if file doesn't exist
}
try {
// Set up a connection with higher memory limits
connection = await getConnection({
// Add performance-related settings
application_name: 'backfill_snapshots',
statement_timeout: PG_STATEMENT_TIMEOUT_MS, // 30 min timeout per statement
// These parameters may need to be configured in your database:
// work_mem: '1GB',
// maintenance_work_mem: '2GB',
// temp_buffers: '1GB',
});
console.log('Database connection acquired.');
// --- Ensure Function Exists ---
console.log('Ensuring database function is up-to-date...');
try {
const sqlFunctionDef = fs.readFileSync(SQL_FUNCTION_FILE, 'utf8');
if (!sqlFunctionDef.includes('CREATE OR REPLACE FUNCTION backfill_daily_snapshots_range_final')) {
throw new Error(`SQL file ${SQL_FUNCTION_FILE} does not seem to contain the function definition.`);
}
await connection.query(sqlFunctionDef); // Execute the whole file
console.log('Database function `backfill_daily_snapshots_range_final` created/updated.');
// Add performance query hints to the database
await connection.query(`
-- Analyze tables for better query planning
ANALYZE public.products;
ANALYZE public.imported_daily_inventory;
ANALYZE public.imported_product_stat_history;
ANALYZE public.daily_product_snapshots;
ANALYZE public.imported_product_current_prices;
`).catch(err => {
// Non-fatal if analyze fails
console.warn('Failed to analyze tables (non-fatal):', err.message);
});
} catch (err) {
console.error(`Error processing SQL function file ${SQL_FUNCTION_FILE}:`, err);
throw new Error(`Failed to create or replace DB function: ${err.message}`);
}
// --- Prepare History Record ---
console.log('Preparing calculation history record...');
// Ensure history table exists (optional, could be done elsewhere)
await connection.query(`
CREATE TABLE IF NOT EXISTS public.calculate_history (
id SERIAL PRIMARY KEY,
start_time TIMESTAMPTZ NOT NULL DEFAULT NOW(),
end_time TIMESTAMPTZ,
duration_seconds INTEGER,
status VARCHAR(20) NOT NULL, -- e.g., 'running', 'completed', 'failed', 'cancelled'
error_message TEXT,
additional_info JSONB -- Store type, file, batch info etc.
);
`);
// Mark previous runs of this type as potentially failed if they were left 'running'
await connection.query(`
UPDATE public.calculate_history
SET status = 'failed', error_message = 'Interrupted by new run.'
WHERE status = 'running' AND additional_info->>'type' = $1;
`, [HISTORY_TYPE]);
// Create new history record
const historyResult = await connection.query(`
INSERT INTO public.calculate_history (start_time, status, additional_info)
VALUES (NOW(), 'running', jsonb_build_object('type', $1::text, 'sql_file', $2::text, 'start_batch', $3::integer))
RETURNING id;
`, [HISTORY_TYPE, path.basename(SQL_FUNCTION_FILE), cmdStartBatch]);
calculateHistoryId = historyResult.rows[0].id;
console.log(`Calculation history record created with ID: ${calculateHistoryId}`);
// --- Determine Date Range ---
console.log('Determining date range...');
let effectiveStartDate, effectiveEndDate;
// Use command-line dates if provided, otherwise query DB
if (cmdStartDate) {
effectiveStartDate = cmdStartDate;
} else {
const minDateResult = await connection.query(`
SELECT LEAST(
COALESCE((SELECT MIN(date) FROM public.imported_daily_inventory WHERE date > '1970-01-01'), CURRENT_DATE),
COALESCE((SELECT MIN(date) FROM public.imported_product_stat_history WHERE date > '1970-01-01'), CURRENT_DATE)
)::date as min_date;
`);
effectiveStartDate = minDateResult.rows[0]?.min_date || new Date().toISOString().split('T')[0]; // Fallback
console.log(`Auto-detected start date: ${effectiveStartDate}`);
}
if (cmdEndDate) {
effectiveEndDate = cmdEndDate;
} else {
const maxDateResult = await connection.query(`
SELECT GREATEST(
COALESCE((SELECT MAX(date) FROM public.imported_daily_inventory WHERE date < CURRENT_DATE), '1970-01-01'::date),
COALESCE((SELECT MAX(date) FROM public.imported_product_stat_history WHERE date < CURRENT_DATE), '1970-01-01'::date)
)::date as max_date;
`);
// Ensure end date is not today or in the future
effectiveEndDate = maxDateResult.rows[0]?.max_date || new Date(Date.now() - 86400000).toISOString().split('T')[0]; // Default yesterday
if (new Date(effectiveEndDate) >= new Date(new Date().toISOString().split('T')[0])) {
effectiveEndDate = new Date(Date.now() - 86400000).toISOString().split('T')[0]; // Set to yesterday if >= today
}
console.log(`Auto-detected end date: ${effectiveEndDate}`);
}
// Validate dates
const dStart = new Date(effectiveStartDate);
const dEnd = new Date(effectiveEndDate);
if (isNaN(dStart.getTime()) || isNaN(dEnd.getTime()) || dStart > dEnd) {
throw new Error(`Invalid date range: Start "${effectiveStartDate}", End "${effectiveEndDate}"`);
}
// --- Batch Processing ---
totalDays = Math.ceil((dEnd - dStart) / (1000 * 60 * 60 * 24)) + 1; // Inclusive
totalBatches = Math.ceil(totalDays / BATCH_SIZE_DAYS);
console.log(`Target Date Range: ${effectiveStartDate} to ${effectiveEndDate} (${totalDays} days)`);
console.log(`Total Batches: ${totalBatches} (Batch Size: ${BATCH_SIZE_DAYS} days)`);
console.log(`Starting from Batch: ${currentBatchNum}`);
// Initial progress update
progress.outputProgress({
status: 'running',
operation: 'Starting Batch Processing',
currentBatch: currentBatchNum,
totalBatches: totalBatches,
totalDays: totalDays,
elapsed: '0s',
remaining: 'Calculating...',
rate: 0,
historyId: calculateHistoryId // Include history ID in the object
});
while (currentBatchNum <= totalBatches && !isCancelled) {
const batchOffset = (currentBatchNum - 1) * BATCH_SIZE_DAYS;
const batchStartDate = new Date(dStart);
batchStartDate.setDate(dStart.getDate() + batchOffset);
const batchEndDate = new Date(batchStartDate);
batchEndDate.setDate(batchStartDate.getDate() + BATCH_SIZE_DAYS - 1);
// Clamp batch end date to the overall effective end date
if (batchEndDate > dEnd) {
batchEndDate.setTime(dEnd.getTime());
}
const batchStartDateStr = batchStartDate.toISOString().split('T')[0];
const batchEndDateStr = batchEndDate.toISOString().split('T')[0];
const batchStartTime = Date.now();
console.log(`\n--- Processing Batch ${currentBatchNum} / ${totalBatches} ---`);
console.log(` Dates: ${batchStartDateStr} to ${batchEndDateStr}`);
// Execute the function for the batch
try {
progress.outputProgress({
status: 'running',
operation: `Executing DB function for batch ${currentBatchNum}...`,
currentBatch: currentBatchNum,
totalBatches: totalBatches,
totalDays: totalDays,
elapsed: progress.formatElapsedTime(overallStartTime),
remaining: 'Executing...',
rate: 0,
historyId: calculateHistoryId
});
// Performance improvement: Add batch processing hint
await connection.query('SET LOCAL enable_parallel_append = on; SET LOCAL enable_parallel_hash = on; SET LOCAL max_parallel_workers_per_gather = 4;');
// Store promise in case we need to try and cancel (though not implemented forcefully)
runningQueryPromise = connection.query(
`SELECT backfill_daily_snapshots_range_final($1::date, $2::date);`,
[batchStartDateStr, batchEndDateStr]
);
await runningQueryPromise; // Wait for the function call to complete
runningQueryPromise = null; // Clear the promise
const batchDurationMs = Date.now() - batchStartTime;
const daysInThisBatch = Math.ceil((batchEndDate - batchStartDate) / (1000 * 60 * 60 * 24)) + 1;
processedDaysTotal += daysInThisBatch;
console.log(` Batch ${currentBatchNum} completed in ${progress.formatElapsedTime(batchStartTime)}.`);
// --- Update Progress & History ---
const overallElapsedSec = Math.round((Date.now() - overallStartTime) / 1000);
progress.outputProgress({
status: 'running',
operation: `Completed batch ${currentBatchNum}`,
currentBatch: currentBatchNum,
totalBatches: totalBatches,
totalDays: totalDays,
processedDays: processedDaysTotal,
elapsed: progress.formatElapsedTime(overallStartTime),
remaining: progress.estimateRemaining(overallStartTime, processedDaysTotal, totalDays),
rate: progress.calculateRate(overallStartTime, processedDaysTotal),
batchDuration: progress.formatElapsedTime(batchStartTime),
historyId: calculateHistoryId
});
// Save checkpoint in history
await connection.query(`
UPDATE public.calculate_history
SET additional_info = jsonb_set(additional_info, '{last_completed_batch}', $1::jsonb)
|| jsonb_build_object('last_processed_date', $2::text)
WHERE id = $3::integer;
`, [JSON.stringify(currentBatchNum), batchEndDateStr, calculateHistoryId]);
} catch (batchError) {
console.error(`\n--- ERROR in Batch ${currentBatchNum} (${batchStartDateStr} to ${batchEndDateStr}) ---`);
console.error(' Database Error:', batchError.message);
console.error(' DB Error Code:', batchError.code);
// Log detailed error to history and re-throw to stop the process
await connection.query(`
UPDATE public.calculate_history
SET status = 'failed',
end_time = NOW(),
duration_seconds = $1::integer,
error_message = $2::text,
additional_info = additional_info || jsonb_build_object('failed_batch', $3::integer, 'failed_date_range', $4::text)
WHERE id = $5::integer;
`, [
Math.round((Date.now() - overallStartTime) / 1000),
`Batch ${currentBatchNum} failed: ${batchError.message} (Code: ${batchError.code || 'N/A'})`,
currentBatchNum,
`${batchStartDateStr} to ${batchEndDateStr}`,
calculateHistoryId
]);
throw batchError; // Stop execution
}
currentBatchNum++;
// Optional delay between batches
// await new Promise(resolve => setTimeout(resolve, 500));
} // End while loop
// --- Final Outcome ---
const finalStatus = isCancelled ? 'cancelled' : 'completed';
const finalMessage = isCancelled ? `Calculation stopped after completing batch ${currentBatchNum - 1}.` : 'Historical snapshots backfill completed successfully.';
const finalDurationSec = Math.round((Date.now() - overallStartTime) / 1000);
console.log(`\n--- Backfill ${finalStatus.toUpperCase()} ---`);
console.log(finalMessage);
console.log(`Total duration: ${progress.formatElapsedTime(overallStartTime)}`);
// Update history record
await connection.query(`
UPDATE public.calculate_history SET status = $1::calculation_status, end_time = NOW(), duration_seconds = $2::integer, error_message = $3
WHERE id = $4::integer;
`, [finalStatus, finalDurationSec, (isCancelled ? 'User cancelled' : null), calculateHistoryId]);
if (!isCancelled) {
progress.clearProgress(); // Clear progress state only on successful completion
} else {
progress.outputProgress({ // Final cancelled status update
status: 'cancelled',
operation: finalMessage,
currentBatch: currentBatchNum - 1,
totalBatches: totalBatches,
totalDays: totalDays,
processedDays: processedDaysTotal,
elapsed: progress.formatElapsedTime(overallStartTime),
remaining: 'Cancelled',
rate: 0,
historyId: calculateHistoryId
});
}
return { success: true, status: finalStatus, message: finalMessage, duration: finalDurationSec };
} catch (error) {
console.error('\n--- Backfill encountered an unrecoverable error ---');
console.error(error.message);
const finalDurationSec = Math.round((Date.now() - overallStartTime) / 1000);
// Update history if possible
if (connection && calculateHistoryId) {
try {
await connection.query(`
UPDATE public.calculate_history
SET status = $1::calculation_status, end_time = NOW(), duration_seconds = $2::integer, error_message = $3::text
WHERE id = $4::integer;
`, [
isCancelled ? 'cancelled' : 'failed',
finalDurationSec,
error.message,
calculateHistoryId
]);
} catch (histError) {
console.error("Failed to update history record with error state:", histError);
}
} else {
console.error("Could not update history record (no ID or connection).");
}
// FIX: Use initialized value or a default if loop never started
const batchNumForError = currentBatchNum > cmdStartBatch ? currentBatchNum - 1 : cmdStartBatch - 1;
// Update progress.outputProgress call to match actual function signature
try {
// Create progress data object
const progressData = {
status: 'failed',
operation: 'Backfill failed',
message: error.message,
currentBatch: batchNumForError,
totalBatches: totalBatches,
totalDays: totalDays,
processedDays: processedDaysTotal,
elapsed: progress.formatElapsedTime(overallStartTime),
remaining: 'Failed',
rate: 0,
// Include history ID in progress data if needed
historyId: calculateHistoryId
};
// Call with single object parameter (not separate historyId)
progress.outputProgress(progressData);
} catch (progressError) {
console.error('Failed to report progress:', progressError);
}
return { success: false, status: 'failed', error: error.message, duration: finalDurationSec };
} finally {
if (connection) {
console.log('Releasing database connection.');
connection.release();
}
// Close pool only if this script is meant to be standalone
// If part of a larger app, the app should manage pool closure
// console.log('Closing database pool.');
// await closePool();
}
}
// --- Script Execution ---
// Parse command-line arguments
const args = process.argv.slice(2);
let cmdStartDateArg, cmdEndDateArg, cmdStartBatchArg = 1; // Default start batch is 1
for (let i = 0; i < args.length; i++) {
if (args[i] === '--start-date' && args[i+1]) cmdStartDateArg = args[++i];
else if (args[i] === '--end-date' && args[i+1]) cmdEndDateArg = args[++i];
else if (args[i] === '--start-batch' && args[i+1]) cmdStartBatchArg = parseInt(args[++i], 10);
}
if (isNaN(cmdStartBatchArg) || cmdStartBatchArg < 1) {
console.warn(`Invalid --start-batch value. Defaulting to 1.`);
cmdStartBatchArg = 1;
}
// Run the backfill process
backfillSnapshots(cmdStartDateArg, cmdEndDateArg, cmdStartBatchArg)
.then(result => {
if (result.success) {
console.log(`\n${result.message} (Duration: ${result.duration}s)`);
process.exitCode = 0; // Success
} else {
console.error(`\n❌ Backfill failed: ${result.error || 'Unknown error'} (Duration: ${result.duration}s)`);
process.exitCode = 1; // Failure
}
})
.catch(err => {
console.error('\n❌ Unexpected error during backfill execution:', err);
process.exitCode = 1; // Failure
})
.finally(async () => {
// Ensure pool is closed if run standalone
console.log('Backfill script finished. Closing pool.');
await closePool(); // Make sure closePool exists and works in your db utils
process.exit(process.exitCode); // Exit with appropriate code
});

View File

@@ -0,0 +1,161 @@
-- Description: Backfills the daily_product_snapshots table using imported historical unit data
-- (daily inventory/stats) and historical price data (current prices table).
-- - Uses imported daily sales/receipt UNIT counts for accuracy.
-- - ESTIMATES historical stock levels using a forward calculation.
-- - APPROXIMATES historical REVENUE using looked-up historical base prices.
-- - APPROXIMATES historical COGS, PROFIT, and STOCK VALUE using CURRENT product costs/prices.
-- Run ONCE after importing historical data and before initial product_metrics population.
-- Dependencies: Core import tables (products), imported history tables (imported_daily_inventory,
-- imported_product_stat_history, imported_product_current_prices),
-- daily_product_snapshots table must exist.
-- Frequency: Run ONCE.
CREATE OR REPLACE FUNCTION backfill_daily_snapshots_range_final(
_start_date DATE,
_end_date DATE
)
RETURNS VOID AS $$
DECLARE
_current_processing_date DATE := _start_date;
_batch_start_time TIMESTAMPTZ;
_row_count INTEGER;
BEGIN
RAISE NOTICE 'Starting FINAL historical snapshot backfill from % to %.', _start_date, _end_date;
RAISE NOTICE 'Using historical units and historical prices (for revenue approximation).';
RAISE NOTICE 'WARNING: Historical COGS, Profit, and Stock Value use CURRENT product costs/prices.';
-- Ensure end date is not in the future
IF _end_date >= CURRENT_DATE THEN
_end_date := CURRENT_DATE - INTERVAL '1 day';
RAISE NOTICE 'Adjusted end date to % to avoid conflict with hourly script.', _end_date;
END IF;
-- Performance: Create temporary table with product info to avoid repeated lookups
CREATE TEMP TABLE IF NOT EXISTS temp_product_info AS
SELECT
pid,
sku,
COALESCE(landing_cost_price, cost_price, 0.00) as effective_cost_price,
COALESCE(price, 0.00) as current_price,
COALESCE(regular_price, 0.00) as current_regular_price
FROM public.products;
-- Performance: Create index on temporary table
CREATE INDEX IF NOT EXISTS temp_product_info_pid_idx ON temp_product_info(pid);
ANALYZE temp_product_info;
RAISE NOTICE 'Created temporary product info table with % products', (SELECT COUNT(*) FROM temp_product_info);
WHILE _current_processing_date <= _end_date LOOP
_batch_start_time := clock_timestamp();
RAISE NOTICE 'Processing date: %', _current_processing_date;
-- Get Daily Transaction Unit Info from imported history
WITH DailyHistoryUnits AS (
SELECT
pids.pid,
-- Prioritize daily_inventory, fallback to product_stat_history for sold qty
COALESCE(di.amountsold, ps.qty_sold, 0)::integer as units_sold_today,
COALESCE(di.qtyreceived, 0)::integer as units_received_today
FROM
(SELECT DISTINCT pid FROM temp_product_info) pids -- Ensure all products are considered
LEFT JOIN public.imported_daily_inventory di
ON pids.pid = di.pid AND di.date = _current_processing_date
LEFT JOIN public.imported_product_stat_history ps
ON pids.pid = ps.pid AND ps.date = _current_processing_date
-- Removed WHERE clause to ensure snapshots are created even for days with 0 activity,
-- allowing stock carry-over. The main query will handle products properly.
),
HistoricalPrice AS (
-- Find the base price (qty_buy=1) active on the processing date
SELECT DISTINCT ON (pid)
pid,
price_each
FROM public.imported_product_current_prices
WHERE
qty_buy = 1
-- Use TIMESTAMPTZ comparison logic:
AND date_active <= (_current_processing_date + interval '1 day' - interval '1 second') -- Active sometime on or before end of processing day
AND (date_deactive IS NULL OR date_deactive > _current_processing_date) -- Not deactivated before start of processing day
-- Assuming 'active' flag isn't needed if dates are correct; add 'AND active != 0' if necessary
ORDER BY
pid, date_active DESC -- Get the most recently activated price
),
PreviousStock AS (
-- Get the estimated stock from the PREVIOUS day snapshot
SELECT pid, eod_stock_quantity
FROM public.daily_product_snapshots
WHERE snapshot_date = _current_processing_date - INTERVAL '1 day'
)
-- Insert into the daily snapshots table
INSERT INTO public.daily_product_snapshots (
snapshot_date, pid, sku,
eod_stock_quantity, eod_stock_cost, eod_stock_retail, eod_stock_gross, stockout_flag,
units_sold, units_returned,
gross_revenue, discounts, returns_revenue,
net_revenue, cogs, gross_regular_revenue, profit,
units_received, cost_received,
calculation_timestamp
)
SELECT
_current_processing_date AS snapshot_date,
p.pid,
p.sku,
-- Estimated EOD Stock (using historical daily units)
-- Handle potential NULL from joins with COALESCE 0
COALESCE(ps.eod_stock_quantity, 0) + COALESCE(dh.units_received_today, 0) - COALESCE(dh.units_sold_today, 0) AS estimated_eod_stock,
-- Valued Stock (using estimated stock and CURRENT prices/costs - APPROXIMATION)
GREATEST(0, COALESCE(ps.eod_stock_quantity, 0) + COALESCE(dh.units_received_today, 0) - COALESCE(dh.units_sold_today, 0)) * p.effective_cost_price AS eod_stock_cost,
GREATEST(0, COALESCE(ps.eod_stock_quantity, 0) + COALESCE(dh.units_received_today, 0) - COALESCE(dh.units_sold_today, 0)) * p.current_price AS eod_stock_retail, -- Stock retail uses current price
GREATEST(0, COALESCE(ps.eod_stock_quantity, 0) + COALESCE(dh.units_received_today, 0) - COALESCE(dh.units_sold_today, 0)) * p.current_regular_price AS eod_stock_gross, -- Stock gross uses current regular price
-- Stockout Flag (based on estimated stock)
(COALESCE(ps.eod_stock_quantity, 0) + COALESCE(dh.units_received_today, 0) - COALESCE(dh.units_sold_today, 0)) <= 0 AS stockout_flag,
-- Today's Unit Aggregates from History
COALESCE(dh.units_sold_today, 0) as units_sold,
0 AS units_returned, -- Placeholder: Cannot determine returns from daily summary
-- Monetary Values using looked-up Historical Price and CURRENT Cost/RegPrice
COALESCE(dh.units_sold_today, 0) * COALESCE(hp.price_each, p.current_price) AS gross_revenue, -- Approx Revenue
0 AS discounts, -- Placeholder
0 AS returns_revenue, -- Placeholder
COALESCE(dh.units_sold_today, 0) * COALESCE(hp.price_each, p.current_price) AS net_revenue, -- Approx Net Revenue
COALESCE(dh.units_sold_today, 0) * p.effective_cost_price AS cogs, -- Approx COGS (uses CURRENT cost)
COALESCE(dh.units_sold_today, 0) * p.current_regular_price AS gross_regular_revenue, -- Approx Gross Regular Revenue
-- Approx Profit
(COALESCE(dh.units_sold_today, 0) * COALESCE(hp.price_each, p.current_price)) - (COALESCE(dh.units_sold_today, 0) * p.effective_cost_price) AS profit,
COALESCE(dh.units_received_today, 0) as units_received,
-- Estimate received cost using CURRENT product cost
COALESCE(dh.units_received_today, 0) * p.effective_cost_price AS cost_received, -- Approx
clock_timestamp() -- Timestamp of this specific calculation
FROM temp_product_info p -- Use the temp table for better performance
LEFT JOIN PreviousStock ps ON p.pid = ps.pid
LEFT JOIN DailyHistoryUnits dh ON p.pid = dh.pid -- Join today's historical activity
LEFT JOIN HistoricalPrice hp ON p.pid = hp.pid -- Join the looked-up historical price
-- Optimization: Only process products with activity or previous stock
WHERE (dh.units_sold_today > 0 OR dh.units_received_today > 0 OR COALESCE(ps.eod_stock_quantity, 0) > 0)
ON CONFLICT (snapshot_date, pid) DO NOTHING; -- Avoid errors if rerunning parts, but prefer clean runs
GET DIAGNOSTICS _row_count = ROW_COUNT;
RAISE NOTICE 'Processed %: Inserted/Skipped % rows. Duration: %',
_current_processing_date,
_row_count,
clock_timestamp() - _batch_start_time;
_current_processing_date := _current_processing_date + INTERVAL '1 day';
END LOOP;
-- Clean up temporary tables
DROP TABLE IF EXISTS temp_product_info;
RAISE NOTICE 'Finished FINAL historical snapshot backfill.';
END;
$$ LANGUAGE plpgsql;
-- Example usage:
-- SELECT backfill_daily_snapshots_range_final('2023-01-01'::date, '2023-12-31'::date);

View File

@@ -57,18 +57,20 @@ const TEMP_TABLES = [
'temp_daily_sales',
'temp_product_stats',
'temp_category_sales',
'temp_category_stats'
'temp_category_stats',
'temp_beginning_inventory',
'temp_monthly_inventory'
];
// Add cleanup function for temporary tables
async function cleanupTemporaryTables(connection) {
try {
// Drop each temporary table if it exists
for (const table of TEMP_TABLES) {
await connection.query(`DROP TEMPORARY TABLE IF EXISTS ${table}`);
await connection.query(`DROP TABLE IF EXISTS ${table}`);
}
} catch (error) {
logError(error, 'Error cleaning up temporary tables');
throw error; // Re-throw to be handled by the caller
} catch (err) {
console.error('Error cleaning up temporary tables:', err);
}
}
@@ -86,22 +88,42 @@ let isCancelled = false;
function cancelCalculation() {
isCancelled = true;
global.clearProgress();
// Format as SSE event
const event = {
progress: {
status: 'cancelled',
operation: 'Calculation cancelled',
current: 0,
total: 0,
elapsed: null,
remaining: null,
rate: 0,
timestamp: Date.now()
}
console.log('Calculation has been cancelled by user');
// Force-terminate any query that's been running for more than 5 seconds
try {
const connection = getConnection();
connection.then(async (conn) => {
try {
// Identify and terminate long-running queries from our application
await conn.query(`
SELECT pg_cancel_backend(pid)
FROM pg_stat_activity
WHERE query_start < now() - interval '5 seconds'
AND application_name LIKE '%node%'
AND query NOT LIKE '%pg_cancel_backend%'
`);
// Clean up any temporary tables
await cleanupTemporaryTables(conn);
// Release connection
conn.release();
} catch (err) {
console.error('Error during force cancellation:', err);
conn.release();
}
}).catch(err => {
console.error('Could not get connection for cancellation:', err);
});
} catch (err) {
console.error('Failed to terminate running queries:', err);
}
return {
success: true,
message: 'Calculation has been cancelled'
};
process.stdout.write(JSON.stringify(event) + '\n');
process.exit(0);
}
// Handle SIGTERM signal for cancellation
@@ -119,6 +141,15 @@ async function calculateMetrics() {
let totalPurchaseOrders = 0;
let calculateHistoryId;
// Set a maximum execution time (30 minutes)
const MAX_EXECUTION_TIME = 30 * 60 * 1000;
const timeout = setTimeout(() => {
console.error(`Calculation timed out after ${MAX_EXECUTION_TIME/1000} seconds, forcing termination`);
// Call cancel and force exit
cancelCalculation();
process.exit(1);
}, MAX_EXECUTION_TIME);
try {
// Clean up any previously running calculations
connection = await getConnection();
@@ -127,24 +158,24 @@ async function calculateMetrics() {
SET
status = 'cancelled',
end_time = NOW(),
duration_seconds = TIMESTAMPDIFF(SECOND, start_time, NOW()),
duration_seconds = EXTRACT(EPOCH FROM (NOW() - start_time))::INTEGER,
error_message = 'Previous calculation was not completed properly'
WHERE status = 'running'
`);
// Get counts from all relevant tables
const [[productCount], [orderCount], [poCount]] = await Promise.all([
const [productCountResult, orderCountResult, poCountResult] = await Promise.all([
connection.query('SELECT COUNT(*) as total FROM products'),
connection.query('SELECT COUNT(*) as total FROM orders'),
connection.query('SELECT COUNT(*) as total FROM purchase_orders')
]);
totalProducts = productCount.total;
totalOrders = orderCount.total;
totalPurchaseOrders = poCount.total;
totalProducts = parseInt(productCountResult.rows[0].total);
totalOrders = parseInt(orderCountResult.rows[0].total);
totalPurchaseOrders = parseInt(poCountResult.rows[0].total);
// Create history record for this calculation
const [historyResult] = await connection.query(`
const historyResult = await connection.query(`
INSERT INTO calculate_history (
start_time,
status,
@@ -155,19 +186,19 @@ async function calculateMetrics() {
) VALUES (
NOW(),
'running',
?,
?,
?,
JSON_OBJECT(
'skip_product_metrics', ?,
'skip_time_aggregates', ?,
'skip_financial_metrics', ?,
'skip_vendor_metrics', ?,
'skip_category_metrics', ?,
'skip_brand_metrics', ?,
'skip_sales_forecasts', ?
$1,
$2,
$3,
jsonb_build_object(
'skip_product_metrics', ($4::int > 0),
'skip_time_aggregates', ($5::int > 0),
'skip_financial_metrics', ($6::int > 0),
'skip_vendor_metrics', ($7::int > 0),
'skip_category_metrics', ($8::int > 0),
'skip_brand_metrics', ($9::int > 0),
'skip_sales_forecasts', ($10::int > 0)
)
)
) RETURNING id
`, [
totalProducts,
totalOrders,
@@ -180,8 +211,7 @@ async function calculateMetrics() {
SKIP_BRAND_METRICS,
SKIP_SALES_FORECASTS
]);
calculateHistoryId = historyResult.insertId;
connection.release();
calculateHistoryId = historyResult.rows[0].id;
// Add debug logging for the progress functions
console.log('Debug - Progress functions:', {
@@ -199,6 +229,8 @@ async function calculateMetrics() {
throw err;
}
// Release the connection before getting a new one
connection.release();
isCancelled = false;
connection = await getConnection();
@@ -234,10 +266,10 @@ async function calculateMetrics() {
await connection.query(`
UPDATE calculate_history
SET
processed_products = ?,
processed_orders = ?,
processed_purchase_orders = ?
WHERE id = ?
processed_products = $1,
processed_orders = $2,
processed_purchase_orders = $3
WHERE id = $4
`, [safeProducts, safeOrders, safePurchaseOrders, calculateHistoryId]);
};
@@ -359,216 +391,6 @@ async function calculateMetrics() {
console.log('Skipping sales forecasts calculation');
}
// Calculate ABC classification
outputProgress({
status: 'running',
operation: 'Starting ABC classification',
current: processedProducts || 0,
total: totalProducts || 0,
elapsed: formatElapsedTime(startTime),
remaining: estimateRemaining(startTime, processedProducts || 0, totalProducts || 0),
rate: calculateRate(startTime, processedProducts || 0),
percentage: (((processedProducts || 0) / (totalProducts || 1)) * 100).toFixed(1),
timing: {
start_time: new Date(startTime).toISOString(),
end_time: new Date().toISOString(),
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
}
});
if (isCancelled) return {
processedProducts: processedProducts || 0,
processedOrders: processedOrders || 0,
processedPurchaseOrders: 0,
success: false
};
const [abcConfig] = await connection.query('SELECT a_threshold, b_threshold FROM abc_classification_config WHERE id = 1');
const abcThresholds = abcConfig[0] || { a_threshold: 20, b_threshold: 50 };
// First, create and populate the rankings table with an index
await connection.query('DROP TEMPORARY TABLE IF EXISTS temp_revenue_ranks');
await connection.query(`
CREATE TEMPORARY TABLE temp_revenue_ranks (
pid BIGINT NOT NULL,
total_revenue DECIMAL(10,3),
rank_num INT,
total_count INT,
PRIMARY KEY (pid),
INDEX (rank_num)
) ENGINE=MEMORY
`);
outputProgress({
status: 'running',
operation: 'Creating revenue rankings',
current: processedProducts || 0,
total: totalProducts || 0,
elapsed: formatElapsedTime(startTime),
remaining: estimateRemaining(startTime, processedProducts || 0, totalProducts || 0),
rate: calculateRate(startTime, processedProducts || 0),
percentage: (((processedProducts || 0) / (totalProducts || 1)) * 100).toFixed(1),
timing: {
start_time: new Date(startTime).toISOString(),
end_time: new Date().toISOString(),
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
}
});
if (isCancelled) return {
processedProducts: processedProducts || 0,
processedOrders: processedOrders || 0,
processedPurchaseOrders: 0,
success: false
};
await connection.query(`
INSERT INTO temp_revenue_ranks
SELECT
pid,
total_revenue,
@rank := @rank + 1 as rank_num,
@total_count := @rank as total_count
FROM (
SELECT pid, total_revenue
FROM product_metrics
WHERE total_revenue > 0
ORDER BY total_revenue DESC
) ranked,
(SELECT @rank := 0) r
`);
// Get total count for percentage calculation
const [rankingCount] = await connection.query('SELECT MAX(rank_num) as total_count FROM temp_revenue_ranks');
const totalCount = rankingCount[0].total_count || 1;
const max_rank = totalCount; // Store max_rank for use in classification
outputProgress({
status: 'running',
operation: 'Updating ABC classifications',
current: processedProducts || 0,
total: totalProducts || 0,
elapsed: formatElapsedTime(startTime),
remaining: estimateRemaining(startTime, processedProducts || 0, totalProducts || 0),
rate: calculateRate(startTime, processedProducts || 0),
percentage: (((processedProducts || 0) / (totalProducts || 1)) * 100).toFixed(1),
timing: {
start_time: new Date(startTime).toISOString(),
end_time: new Date().toISOString(),
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
}
});
if (isCancelled) return {
processedProducts: processedProducts || 0,
processedOrders: processedOrders || 0,
processedPurchaseOrders: 0,
success: false
};
// ABC classification progress tracking
let abcProcessedCount = 0;
const batchSize = 5000;
let lastProgressUpdate = Date.now();
const progressUpdateInterval = 1000; // Update every second
while (true) {
if (isCancelled) return {
processedProducts: Number(processedProducts) || 0,
processedOrders: Number(processedOrders) || 0,
processedPurchaseOrders: 0,
success: false
};
// First get a batch of PIDs that need updating
const [pids] = await connection.query(`
SELECT pm.pid
FROM product_metrics pm
LEFT JOIN temp_revenue_ranks tr ON pm.pid = tr.pid
WHERE pm.abc_class IS NULL
OR pm.abc_class !=
CASE
WHEN tr.rank_num IS NULL THEN 'C'
WHEN (tr.rank_num / ?) * 100 <= ? THEN 'A'
WHEN (tr.rank_num / ?) * 100 <= ? THEN 'B'
ELSE 'C'
END
LIMIT ?
`, [max_rank, abcThresholds.a_threshold,
max_rank, abcThresholds.b_threshold,
batchSize]);
if (pids.length === 0) {
break;
}
// Then update just those PIDs
const [result] = await connection.query(`
UPDATE product_metrics pm
LEFT JOIN temp_revenue_ranks tr ON pm.pid = tr.pid
SET pm.abc_class =
CASE
WHEN tr.rank_num IS NULL THEN 'C'
WHEN (tr.rank_num / ?) * 100 <= ? THEN 'A'
WHEN (tr.rank_num / ?) * 100 <= ? THEN 'B'
ELSE 'C'
END,
pm.last_calculated_at = NOW()
WHERE pm.pid IN (?)
`, [max_rank, abcThresholds.a_threshold,
max_rank, abcThresholds.b_threshold,
pids.map(row => row.pid)]);
abcProcessedCount += result.affectedRows;
// Calculate progress ensuring valid numbers
const currentProgress = Math.floor(totalProducts * (0.99 + (abcProcessedCount / (totalCount || 1)) * 0.01));
processedProducts = Number(currentProgress) || processedProducts || 0;
// Only update progress at most once per second
const now = Date.now();
if (now - lastProgressUpdate >= progressUpdateInterval) {
const progress = ensureValidProgress(processedProducts, totalProducts);
outputProgress({
status: 'running',
operation: 'ABC classification progress',
current: progress.current,
total: progress.total,
elapsed: formatElapsedTime(startTime),
remaining: estimateRemaining(startTime, progress.current, progress.total),
rate: calculateRate(startTime, progress.current),
percentage: progress.percentage,
timing: {
start_time: new Date(startTime).toISOString(),
end_time: new Date().toISOString(),
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
}
});
lastProgressUpdate = now;
}
// Update database progress
await updateProgress(processedProducts, processedOrders, processedPurchaseOrders);
// Small delay between batches to allow other transactions
await new Promise(resolve => setTimeout(resolve, 100));
}
// Clean up
await connection.query('DROP TEMPORARY TABLE IF EXISTS temp_revenue_ranks');
const endTime = Date.now();
const totalElapsedSeconds = Math.round((endTime - startTime) / 1000);
// Update calculate_status for ABC classification
await connection.query(`
INSERT INTO calculate_status (module_name, last_calculation_timestamp)
VALUES ('abc_classification', NOW())
ON DUPLICATE KEY UPDATE last_calculation_timestamp = NOW()
`);
// Final progress update with guaranteed valid numbers
const finalProgress = ensureValidProgress(totalProducts, totalProducts);
@@ -578,14 +400,14 @@ async function calculateMetrics() {
operation: 'Metrics calculation complete',
current: finalProgress.current,
total: finalProgress.total,
elapsed: formatElapsedTime(startTime),
elapsed: global.formatElapsedTime(startTime),
remaining: '0s',
rate: calculateRate(startTime, finalProgress.current),
rate: global.calculateRate(startTime, finalProgress.current),
percentage: '100',
timing: {
start_time: new Date(startTime).toISOString(),
end_time: new Date().toISOString(),
elapsed_seconds: totalElapsedSeconds
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
}
});
@@ -601,13 +423,13 @@ async function calculateMetrics() {
UPDATE calculate_history
SET
end_time = NOW(),
duration_seconds = ?,
processed_products = ?,
processed_orders = ?,
processed_purchase_orders = ?,
duration_seconds = $1,
processed_products = $2,
processed_orders = $3,
processed_purchase_orders = $4,
status = 'completed'
WHERE id = ?
`, [totalElapsedSeconds,
WHERE id = $5
`, [Math.round((Date.now() - startTime) / 1000),
finalStats.processedProducts,
finalStats.processedOrders,
finalStats.processedPurchaseOrders,
@@ -616,6 +438,11 @@ async function calculateMetrics() {
// Clear progress file on successful completion
global.clearProgress();
return {
success: true,
message: 'Calculation completed successfully',
duration: Math.round((Date.now() - startTime) / 1000)
};
} catch (error) {
const endTime = Date.now();
const totalElapsedSeconds = Math.round((endTime - startTime) / 1000);
@@ -625,13 +452,13 @@ async function calculateMetrics() {
UPDATE calculate_history
SET
end_time = NOW(),
duration_seconds = ?,
processed_products = ?,
processed_orders = ?,
processed_purchase_orders = ?,
status = ?,
error_message = ?
WHERE id = ?
duration_seconds = $1,
processed_products = $2,
processed_orders = $3,
processed_purchase_orders = $4,
status = $5,
error_message = $6
WHERE id = $7
`, [
totalElapsedSeconds,
processedProducts || 0, // Ensure we have a valid number
@@ -677,17 +504,38 @@ async function calculateMetrics() {
}
throw error;
} finally {
// Clear the timeout to prevent forced termination
clearTimeout(timeout);
// Always clean up and release connection
if (connection) {
// Ensure temporary tables are cleaned up
await cleanupTemporaryTables(connection);
connection.release();
try {
await cleanupTemporaryTables(connection);
connection.release();
} catch (err) {
console.error('Error in final cleanup:', err);
}
}
// Close the connection pool when we're done
await closePool();
}
} catch (error) {
success = false;
logError(error, 'Error in metrics calculation');
console.error('Error in metrics calculation', error);
try {
if (connection) {
await connection.query(`
UPDATE calculate_history
SET
status = 'failed',
end_time = NOW(),
duration_seconds = EXTRACT(EPOCH FROM (NOW() - start_time))::INTEGER,
error_message = $1
WHERE id = $2
`, [error.message.substring(0, 500), calculateHistoryId]);
}
} catch (updateError) {
console.error('Error updating calculation history:', updateError);
}
throw error;
}
}

View File

@@ -0,0 +1,242 @@
-- -- Configuration tables schema
-- -- Stock threshold configurations
-- CREATE TABLE stock_thresholds (
-- id INTEGER NOT NULL,
-- category_id BIGINT, -- NULL means default/global threshold
-- vendor VARCHAR(100), -- NULL means applies to all vendors
-- critical_days INTEGER NOT NULL DEFAULT 7,
-- reorder_days INTEGER NOT NULL DEFAULT 14,
-- overstock_days INTEGER NOT NULL DEFAULT 90,
-- low_stock_threshold INTEGER NOT NULL DEFAULT 5,
-- min_reorder_quantity INTEGER NOT NULL DEFAULT 1,
-- created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
-- updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
-- PRIMARY KEY (id),
-- FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
-- UNIQUE (category_id, vendor)
-- );
-- CREATE TRIGGER update_stock_thresholds_updated
-- BEFORE UPDATE ON stock_thresholds
-- FOR EACH ROW
-- EXECUTE FUNCTION update_updated_at_column();
-- CREATE INDEX idx_st_metrics ON stock_thresholds(category_id, vendor);
-- -- Lead time threshold configurations
-- CREATE TABLE lead_time_thresholds (
-- id INTEGER NOT NULL,
-- category_id BIGINT, -- NULL means default/global threshold
-- vendor VARCHAR(100), -- NULL means applies to all vendors
-- target_days INTEGER NOT NULL DEFAULT 14,
-- warning_days INTEGER NOT NULL DEFAULT 21,
-- critical_days INTEGER NOT NULL DEFAULT 30,
-- created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
-- updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
-- PRIMARY KEY (id),
-- FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
-- UNIQUE (category_id, vendor)
-- );
-- CREATE TRIGGER update_lead_time_thresholds_updated
-- BEFORE UPDATE ON lead_time_thresholds
-- FOR EACH ROW
-- EXECUTE FUNCTION update_updated_at_column();
-- -- Sales velocity window configurations
-- CREATE TABLE sales_velocity_config (
-- id INTEGER NOT NULL,
-- category_id BIGINT, -- NULL means default/global threshold
-- vendor VARCHAR(100), -- NULL means applies to all vendors
-- daily_window_days INTEGER NOT NULL DEFAULT 30,
-- weekly_window_days INTEGER NOT NULL DEFAULT 7,
-- monthly_window_days INTEGER NOT NULL DEFAULT 90,
-- created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
-- updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
-- PRIMARY KEY (id),
-- FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
-- UNIQUE (category_id, vendor)
-- );
-- CREATE TRIGGER update_sales_velocity_config_updated
-- BEFORE UPDATE ON sales_velocity_config
-- FOR EACH ROW
-- EXECUTE FUNCTION update_updated_at_column();
-- CREATE INDEX idx_sv_metrics ON sales_velocity_config(category_id, vendor);
-- -- ABC Classification configurations
-- CREATE TABLE abc_classification_config (
-- id INTEGER NOT NULL PRIMARY KEY,
-- a_threshold DECIMAL(5,2) NOT NULL DEFAULT 20.0,
-- b_threshold DECIMAL(5,2) NOT NULL DEFAULT 50.0,
-- classification_period_days INTEGER NOT NULL DEFAULT 90,
-- created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
-- updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
-- );
-- CREATE TRIGGER update_abc_classification_config_updated
-- BEFORE UPDATE ON abc_classification_config
-- FOR EACH ROW
-- EXECUTE FUNCTION update_updated_at_column();
-- -- Safety stock configurations
-- CREATE TABLE safety_stock_config (
-- id INTEGER NOT NULL,
-- category_id BIGINT, -- NULL means default/global threshold
-- vendor VARCHAR(100), -- NULL means applies to all vendors
-- coverage_days INTEGER NOT NULL DEFAULT 14,
-- service_level DECIMAL(5,2) NOT NULL DEFAULT 95.0,
-- created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
-- updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
-- PRIMARY KEY (id),
-- FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
-- UNIQUE (category_id, vendor)
-- );
-- CREATE TRIGGER update_safety_stock_config_updated
-- BEFORE UPDATE ON safety_stock_config
-- FOR EACH ROW
-- EXECUTE FUNCTION update_updated_at_column();
-- CREATE INDEX idx_ss_metrics ON safety_stock_config(category_id, vendor);
-- -- Turnover rate configurations
-- CREATE TABLE turnover_config (
-- id INTEGER NOT NULL,
-- category_id BIGINT, -- NULL means default/global threshold
-- vendor VARCHAR(100), -- NULL means applies to all vendors
-- calculation_period_days INTEGER NOT NULL DEFAULT 30,
-- target_rate DECIMAL(10,2) NOT NULL DEFAULT 1.0,
-- created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
-- updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
-- PRIMARY KEY (id),
-- FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
-- UNIQUE (category_id, vendor)
-- );
-- CREATE TRIGGER update_turnover_config_updated
-- BEFORE UPDATE ON turnover_config
-- FOR EACH ROW
-- EXECUTE FUNCTION update_updated_at_column();
-- -- Create table for sales seasonality factors
-- CREATE TABLE sales_seasonality (
-- month INTEGER NOT NULL,
-- seasonality_factor DECIMAL(5,3) DEFAULT 0,
-- last_updated TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
-- PRIMARY KEY (month),
-- CONSTRAINT month_range CHECK (month BETWEEN 1 AND 12),
-- CONSTRAINT seasonality_range CHECK (seasonality_factor BETWEEN -1.0 AND 1.0)
-- );
-- CREATE TRIGGER update_sales_seasonality_updated
-- BEFORE UPDATE ON sales_seasonality
-- FOR EACH ROW
-- EXECUTE FUNCTION update_updated_at_column();
-- -- Create table for financial calculation parameters
-- CREATE TABLE financial_calc_config (
-- id INTEGER NOT NULL PRIMARY KEY,
-- order_cost DECIMAL(10,2) NOT NULL DEFAULT 25.00, -- The fixed cost per purchase order (used in EOQ)
-- holding_rate DECIMAL(10,4) NOT NULL DEFAULT 0.25, -- The annual inventory holding cost as a percentage of unit cost (used in EOQ)
-- service_level_z_score DECIMAL(10,4) NOT NULL DEFAULT 1.96, -- Z-score for ~95% service level (used in Safety Stock)
-- min_reorder_qty INTEGER NOT NULL DEFAULT 1, -- Minimum reorder quantity
-- default_reorder_qty INTEGER NOT NULL DEFAULT 5, -- Default reorder quantity when sales data is insufficient
-- default_safety_stock INTEGER NOT NULL DEFAULT 5, -- Default safety stock when sales data is insufficient
-- created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
-- updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
-- );
-- CREATE TRIGGER update_financial_calc_config_updated
-- BEFORE UPDATE ON financial_calc_config
-- FOR EACH ROW
-- EXECUTE FUNCTION update_updated_at_column();
-- -- Insert default global thresholds
-- INSERT INTO stock_thresholds (id, category_id, vendor, critical_days, reorder_days, overstock_days)
-- VALUES (1, NULL, NULL, 7, 14, 90)
-- ON CONFLICT (id) DO UPDATE SET
-- critical_days = EXCLUDED.critical_days,
-- reorder_days = EXCLUDED.reorder_days,
-- overstock_days = EXCLUDED.overstock_days;
-- INSERT INTO lead_time_thresholds (id, category_id, vendor, target_days, warning_days, critical_days)
-- VALUES (1, NULL, NULL, 14, 21, 30)
-- ON CONFLICT (id) DO UPDATE SET
-- target_days = EXCLUDED.target_days,
-- warning_days = EXCLUDED.warning_days,
-- critical_days = EXCLUDED.critical_days;
-- INSERT INTO sales_velocity_config (id, category_id, vendor, daily_window_days, weekly_window_days, monthly_window_days)
-- VALUES (1, NULL, NULL, 30, 7, 90)
-- ON CONFLICT (id) DO UPDATE SET
-- daily_window_days = EXCLUDED.daily_window_days,
-- weekly_window_days = EXCLUDED.weekly_window_days,
-- monthly_window_days = EXCLUDED.monthly_window_days;
-- INSERT INTO abc_classification_config (id, a_threshold, b_threshold, classification_period_days)
-- VALUES (1, 20.0, 50.0, 90)
-- ON CONFLICT (id) DO UPDATE SET
-- a_threshold = EXCLUDED.a_threshold,
-- b_threshold = EXCLUDED.b_threshold,
-- classification_period_days = EXCLUDED.classification_period_days;
-- INSERT INTO safety_stock_config (id, category_id, vendor, coverage_days, service_level)
-- VALUES (1, NULL, NULL, 14, 95.0)
-- ON CONFLICT (id) DO UPDATE SET
-- coverage_days = EXCLUDED.coverage_days,
-- service_level = EXCLUDED.service_level;
-- INSERT INTO turnover_config (id, category_id, vendor, calculation_period_days, target_rate)
-- VALUES (1, NULL, NULL, 30, 1.0)
-- ON CONFLICT (id) DO UPDATE SET
-- calculation_period_days = EXCLUDED.calculation_period_days,
-- target_rate = EXCLUDED.target_rate;
-- -- Insert default seasonality factors (neutral)
-- INSERT INTO sales_seasonality (month, seasonality_factor)
-- VALUES
-- (1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0),
-- (7, 0), (8, 0), (9, 0), (10, 0), (11, 0), (12, 0)
-- ON CONFLICT (month) DO UPDATE SET
-- last_updated = CURRENT_TIMESTAMP;
-- -- Insert default values
-- INSERT INTO financial_calc_config (id, order_cost, holding_rate, service_level_z_score, min_reorder_qty, default_reorder_qty, default_safety_stock)
-- VALUES (1, 25.00, 0.25, 1.96, 1, 5, 5)
-- ON CONFLICT (id) DO UPDATE SET
-- order_cost = EXCLUDED.order_cost,
-- holding_rate = EXCLUDED.holding_rate,
-- service_level_z_score = EXCLUDED.service_level_z_score,
-- min_reorder_qty = EXCLUDED.min_reorder_qty,
-- default_reorder_qty = EXCLUDED.default_reorder_qty,
-- default_safety_stock = EXCLUDED.default_safety_stock;
-- -- View to show thresholds with category names
-- CREATE OR REPLACE VIEW stock_thresholds_view AS
-- SELECT
-- st.*,
-- c.name as category_name,
-- CASE
-- WHEN st.category_id IS NULL AND st.vendor IS NULL THEN 'Global Default'
-- WHEN st.category_id IS NULL THEN 'Vendor: ' || st.vendor
-- WHEN st.vendor IS NULL THEN 'Category: ' || c.name
-- ELSE 'Category: ' || c.name || ' / Vendor: ' || st.vendor
-- END as threshold_scope
-- FROM
-- stock_thresholds st
-- LEFT JOIN
-- categories c ON st.category_id = c.cat_id
-- ORDER BY
-- CASE
-- WHEN st.category_id IS NULL AND st.vendor IS NULL THEN 1
-- WHEN st.category_id IS NULL THEN 2
-- WHEN st.vendor IS NULL THEN 3
-- ELSE 4
-- END,
-- c.name,
-- st.vendor;

Some files were not shown because too many files have changed in this diff Show More