Compare commits
184 Commits
169407a729
...
merge-dash
| Author | SHA1 | Date | |
|---|---|---|---|
| 763aa4f74b | |||
| 520ff5bd74 | |||
| 8496bbc4ee | |||
| 38f6688f10 | |||
| fcfe7e2fab | |||
| 2e3e81a02b | |||
| 8606a90e34 | |||
| a97819f4a6 | |||
| dd82c624d8 | |||
| 7999e1e64a | |||
| 12a0f540b3 | |||
| e793cb0cc5 | |||
| b2330dee22 | |||
| 00501704df | |||
| 4cb41a7e4c | |||
| d05d27494d | |||
| 4ed734e5c0 | |||
| 1e3be5d4cb | |||
| 8dd852dd6a | |||
| eeff5817ea | |||
| 1b19feb172 | |||
| 80ff8124ec | |||
| 8508bfac93 | |||
| ac14179bd2 | |||
| 00249f7c33 | |||
| f271f3aae4 | |||
| 43f76e4ac0 | |||
| 92ff80fba2 | |||
| a4c1a19d2e | |||
| c9b656d34b | |||
| d081a60662 | |||
| 4021fe487d | |||
| 4552fa4862 | |||
| 2601a04211 | |||
| 6051b849d6 | |||
| dbd0232285 | |||
| 1b9f01d101 | |||
| a9dbbbf824 | |||
| 97296946f1 | |||
| 5035dda733 | |||
| 796a2e5d1f | |||
| 047122a620 | |||
| 4c4359908c | |||
| 54cc4be1e3 | |||
| f4854423ab | |||
| 0796518e26 | |||
| 7aa494aaad | |||
| 1e0be3f86e | |||
| a068a253cd | |||
| 087ec710f6 | |||
| 957c7b5eb1 | |||
| 8b8845b423 | |||
| e5c4f617c5 | |||
| 8e19e6cd74 | |||
| 749907bd30 | |||
| 108181c63d | |||
| 5dd779cb4a | |||
| 7b0e792d03 | |||
| 517bbe72f4 | |||
| 87d4b9e804 | |||
| 75da2c6772 | |||
| 00a02aa788 | |||
| 114018080a | |||
| 228ae8b2a9 | |||
| dd4b3f7145 | |||
| 7eb4077224 | |||
| d60a8cbc6e | |||
| 1fcbf54989 | |||
| ce75496770 | |||
| 7eae4a0b29 | |||
| f421154c1d | |||
| 03dc119a15 | |||
| 1963bee00c | |||
| 387e7e5e73 | |||
| a51a48ce89 | |||
| aacb3a2fd0 | |||
| 35d2f0df7c | |||
| 7d46ebd6ba | |||
| 1496aa57b1 | |||
| fc9ef2f0d7 | |||
| af067f7360 | |||
| 949b543d1f | |||
| 8fdb68fb19 | |||
| 136f767309 | |||
| aa9664c459 | |||
| f60f0b1b5c | |||
| 676cd44d9d | |||
| 1d081bb218 | |||
| 52ae7e10aa | |||
| 153bbecc44 | |||
| cb46970808 | |||
| 97fa7f3495 | |||
| a88dbb8486 | |||
| d0a83c04ca | |||
| f95c1f2d43 | |||
| 0ef27a3229 | |||
| 0f89373d11 | |||
| f55d35e301 | |||
| 1aee18a025 | |||
| 0068d77ad9 | |||
| b69182e2c7 | |||
| 1c8709f520 | |||
| de1408bd58 | |||
| c295c330ff | |||
| 7cc723ce83 | |||
| c3c48669ad | |||
| 78a0018940 | |||
| 851cc3c4cc | |||
| 74454cdc7f | |||
| 31c838197a | |||
| 45fa583ce8 | |||
| c96f514bcd | |||
| 6a5e6d2bfb | |||
| 875d0b8f55 | |||
| b15387041b | |||
| 60cdb1cee3 | |||
| 52fd47a921 | |||
| b723ec3c0f | |||
| 68ca7e93a1 | |||
| bc5607f48c | |||
| 36a5186c17 | |||
| 05bac73c45 | |||
| 7a43428e76 | |||
| e21da8330e | |||
| 56c3f0534d | |||
| 98e3b89d46 | |||
| 8271c9f95a | |||
| f7bdefb0a3 | |||
| e0a7787139 | |||
| c1159f518c | |||
| a19a8ba412 | |||
| bb455b3c37 | |||
| ca35a67e9f | |||
| 88f1853b09 | |||
| 3ca72674af | |||
| c185d4e3ca | |||
| 2d62cac5f7 | |||
| e3361cf098 | |||
| 41f7f33746 | |||
| 8141fafb34 | |||
| 42af434bd7 | |||
| fbb200c4ee | |||
| b96a9f412a | |||
| 6b101a91f6 | |||
| 2df5428712 | |||
| 5d7e05172d | |||
| 41058ff5c6 | |||
| 54a87ca3dc | |||
| 6bf93d33ea | |||
| 441a2c74ad | |||
| f628774267 | |||
| 3f16413769 | |||
| 959a64aebc | |||
| 694014934c | |||
| cff176e7a3 | |||
| 7f7e6fdd1f | |||
| 45a52cbc33 | |||
| bba7362641 | |||
| 468f85c45d | |||
| 24e2d01ccc | |||
| 43d7775d08 | |||
| 527dec4d49 | |||
| fe70b56d24 | |||
| ed62f03ba0 | |||
| e034e83198 | |||
| 110f4ec332 | |||
| 5bf265ed46 | |||
| 528fe7c024 | |||
| 08be0658cb | |||
| f823841b15 | |||
| 9ce3793067 | |||
| 89d4605577 | |||
| 675a0fc374 | |||
| ca2653ea1a | |||
| a8d3fd8033 | |||
| 702b956ff1 | |||
| 9b8577f258 | |||
| 9623681a15 | |||
| cc22fd8c35 | |||
| 0ef1b6100e | |||
| a519746ccb | |||
| f29dd8ef8b | |||
| f2a5c06005 | |||
| fb9f959fe5 |
41
.VSCodeCounter/2025-03-17_16-24-17/details.md
Normal file
41
.VSCodeCounter/2025-03-17_16-24-17/details.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# Details
|
||||
|
||||
Date : 2025-03-17 16:24:17
|
||||
|
||||
Directory /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew
|
||||
|
||||
Total : 26 files, 6193 codes, 1008 comments, 1017 blanks, all 8218 lines
|
||||
|
||||
[Summary](results.md) / Details / [Diff Summary](diff.md) / [Diff Details](diff-details.md)
|
||||
|
||||
## Files
|
||||
| filename | language | code | comment | blank | total |
|
||||
| :--- | :--- | ---: | ---: | ---: | ---: |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/README.md](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/README.md) | Markdown | 39 | 0 | 19 | 58 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/AiValidationDialogs.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/AiValidationDialogs.tsx) | TypeScript JSX | 230 | 10 | 8 | 248 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/BaseCellContent.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/BaseCellContent.tsx) | TypeScript JSX | 18 | 0 | 3 | 21 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/SearchableTemplateSelect.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/SearchableTemplateSelect.tsx) | TypeScript JSX | 273 | 19 | 37 | 329 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationCell.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationCell.tsx) | TypeScript JSX | 374 | 42 | 44 | 460 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationContainer.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationContainer.tsx) | TypeScript JSX | 730 | 126 | 106 | 962 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationTable.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationTable.tsx) | TypeScript JSX | 499 | 48 | 54 | 601 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/CheckboxCell.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/CheckboxCell.tsx) | TypeScript JSX | 112 | 12 | 21 | 145 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/InputCell.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/InputCell.tsx) | TypeScript JSX | 232 | 31 | 32 | 295 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/MultiSelectCell.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/MultiSelectCell.tsx) | TypeScript JSX | 407 | 56 | 52 | 515 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/MultilineInput.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/MultilineInput.tsx) | TypeScript JSX | 193 | 23 | 22 | 238 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/SelectCell.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/SelectCell.tsx) | TypeScript JSX | 289 | 36 | 31 | 356 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useAiValidation.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useAiValidation.tsx) | TypeScript JSX | 500 | 75 | 89 | 664 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useProductLinesFetching.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useProductLinesFetching.tsx) | TypeScript JSX | 248 | 69 | 74 | 391 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useTemplates.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useTemplates.tsx) | TypeScript JSX | 204 | 26 | 33 | 263 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useUpcValidation.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useUpcValidation.tsx) | TypeScript JSX | 209 | 49 | 50 | 308 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidation.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidation.tsx) | TypeScript JSX | 219 | 39 | 47 | 305 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidationState.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidationState.tsx) | TypeScript JSX | 1,060 | 228 | 229 | 1,517 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/index.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/index.tsx) | TypeScript JSX | 20 | 6 | 2 | 28 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/types.ts](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/types.ts) | TypeScript | 4 | 0 | 1 | 5 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/types/index.ts](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/types/index.ts) | TypeScript | 16 | 4 | 4 | 24 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/dataMutations.ts](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/dataMutations.ts) | TypeScript | 124 | 4 | 14 | 142 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/errorUtils.ts](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/errorUtils.ts) | TypeScript | 21 | 15 | 5 | 41 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/upcValidation.ts](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/upcValidation.ts) | TypeScript | 43 | 24 | 7 | 74 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/validation-helper.js](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/validation-helper.js) | JavaScript | 28 | 7 | 9 | 44 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/validationUtils.ts](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/validationUtils.ts) | TypeScript | 101 | 59 | 24 | 184 |
|
||||
|
||||
[Summary](results.md) / Details / [Diff Summary](diff.md) / [Diff Details](diff-details.md)
|
||||
20
.VSCodeCounter/2025-03-17_16-24-17/diff-details.md
Normal file
20
.VSCodeCounter/2025-03-17_16-24-17/diff-details.md
Normal file
@@ -0,0 +1,20 @@
|
||||
# Diff Details
|
||||
|
||||
Date : 2025-03-17 16:24:17
|
||||
|
||||
Directory /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew
|
||||
|
||||
Total : 5 files, -358 codes, -15 comments, -33 blanks, all -406 lines
|
||||
|
||||
[Summary](results.md) / [Details](details.md) / [Diff Summary](diff.md) / Diff Details
|
||||
|
||||
## Files
|
||||
| filename | language | code | comment | blank | total |
|
||||
| :--- | :--- | ---: | ---: | ---: | ---: |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/SaveTemplateDialog.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/SaveTemplateDialog.tsx) | TypeScript JSX | -83 | 0 | -4 | -87 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/TemplateManager.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/TemplateManager.tsx) | TypeScript JSX | -193 | -4 | -15 | -212 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationContainer.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationContainer.tsx) | TypeScript JSX | -241 | -68 | -72 | -381 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useFilters.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useFilters.tsx) | TypeScript JSX | -89 | -12 | -16 | -117 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useProductLinesFetching.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useProductLinesFetching.tsx) | TypeScript JSX | 248 | 69 | 74 | 391 |
|
||||
|
||||
[Summary](results.md) / [Details](details.md) / [Diff Summary](diff.md) / Diff Details
|
||||
23
.VSCodeCounter/2025-03-17_16-24-17/diff.md
Normal file
23
.VSCodeCounter/2025-03-17_16-24-17/diff.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# Diff Summary
|
||||
|
||||
Date : 2025-03-17 16:24:17
|
||||
|
||||
Directory /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew
|
||||
|
||||
Total : 5 files, -358 codes, -15 comments, -33 blanks, all -406 lines
|
||||
|
||||
[Summary](results.md) / [Details](details.md) / Diff Summary / [Diff Details](diff-details.md)
|
||||
|
||||
## Languages
|
||||
| language | files | code | comment | blank | total |
|
||||
| :--- | ---: | ---: | ---: | ---: | ---: |
|
||||
| TypeScript JSX | 5 | -358 | -15 | -33 | -406 |
|
||||
|
||||
## Directories
|
||||
| path | files | code | comment | blank | total |
|
||||
| :--- | ---: | ---: | ---: | ---: | ---: |
|
||||
| . | 5 | -358 | -15 | -33 | -406 |
|
||||
| components | 3 | -517 | -72 | -91 | -680 |
|
||||
| hooks | 2 | 159 | 57 | 58 | 274 |
|
||||
|
||||
[Summary](results.md) / [Details](details.md) / Diff Summary / [Diff Details](diff-details.md)
|
||||
31
.VSCodeCounter/2025-03-17_16-24-17/diff.txt
Normal file
31
.VSCodeCounter/2025-03-17_16-24-17/diff.txt
Normal file
@@ -0,0 +1,31 @@
|
||||
Date : 2025-03-17 16:24:17
|
||||
Directory : /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew
|
||||
Total : 5 files, -358 codes, -15 comments, -33 blanks, all -406 lines
|
||||
|
||||
Languages
|
||||
+----------------+------------+------------+------------+------------+------------+
|
||||
| language | files | code | comment | blank | total |
|
||||
+----------------+------------+------------+------------+------------+------------+
|
||||
| TypeScript JSX | 5 | -358 | -15 | -33 | -406 |
|
||||
+----------------+------------+------------+------------+------------+------------+
|
||||
|
||||
Directories
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+------------+------------+
|
||||
| path | files | code | comment | blank | total |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+------------+------------+
|
||||
| . | 5 | -358 | -15 | -33 | -406 |
|
||||
| components | 3 | -517 | -72 | -91 | -680 |
|
||||
| hooks | 2 | 159 | 57 | 58 | 274 |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+------------+------------+
|
||||
|
||||
Files
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------+----------------+------------+------------+------------+------------+
|
||||
| filename | language | code | comment | blank | total |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------+----------------+------------+------------+------------+------------+
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/SaveTemplateDialog.tsx | TypeScript JSX | -83 | 0 | -4 | -87 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/TemplateManager.tsx | TypeScript JSX | -193 | -4 | -15 | -212 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationContainer.tsx | TypeScript JSX | -241 | -68 | -72 | -381 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useFilters.tsx | TypeScript JSX | -89 | -12 | -16 | -117 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useProductLinesFetching.tsx | TypeScript JSX | 248 | 69 | 74 | 391 |
|
||||
| Total | | -358 | -15 | -33 | -406 |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------+----------------+------------+------------+------------+------------+
|
||||
1
.VSCodeCounter/2025-03-17_16-24-17/results.json
Normal file
1
.VSCodeCounter/2025-03-17_16-24-17/results.json
Normal file
File diff suppressed because one or more lines are too long
31
.VSCodeCounter/2025-03-17_16-24-17/results.md
Normal file
31
.VSCodeCounter/2025-03-17_16-24-17/results.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# Summary
|
||||
|
||||
Date : 2025-03-17 16:24:17
|
||||
|
||||
Directory /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew
|
||||
|
||||
Total : 26 files, 6193 codes, 1008 comments, 1017 blanks, all 8218 lines
|
||||
|
||||
Summary / [Details](details.md) / [Diff Summary](diff.md) / [Diff Details](diff-details.md)
|
||||
|
||||
## Languages
|
||||
| language | files | code | comment | blank | total |
|
||||
| :--- | ---: | ---: | ---: | ---: | ---: |
|
||||
| TypeScript JSX | 18 | 5,817 | 895 | 934 | 7,646 |
|
||||
| TypeScript | 6 | 309 | 106 | 55 | 470 |
|
||||
| Markdown | 1 | 39 | 0 | 19 | 58 |
|
||||
| JavaScript | 1 | 28 | 7 | 9 | 44 |
|
||||
|
||||
## Directories
|
||||
| path | files | code | comment | blank | total |
|
||||
| :--- | ---: | ---: | ---: | ---: | ---: |
|
||||
| . | 26 | 6,193 | 1,008 | 1,017 | 8,218 |
|
||||
| . (Files) | 3 | 63 | 6 | 22 | 91 |
|
||||
| components | 11 | 3,357 | 403 | 410 | 4,170 |
|
||||
| components (Files) | 6 | 2,124 | 245 | 252 | 2,621 |
|
||||
| components/cells | 5 | 1,233 | 158 | 158 | 1,549 |
|
||||
| hooks | 6 | 2,440 | 486 | 522 | 3,448 |
|
||||
| types | 1 | 16 | 4 | 4 | 24 |
|
||||
| utils | 5 | 317 | 109 | 59 | 485 |
|
||||
|
||||
Summary / [Details](details.md) / [Diff Summary](diff.md) / [Diff Details](diff-details.md)
|
||||
60
.VSCodeCounter/2025-03-17_16-24-17/results.txt
Normal file
60
.VSCodeCounter/2025-03-17_16-24-17/results.txt
Normal file
@@ -0,0 +1,60 @@
|
||||
Date : 2025-03-17 16:24:17
|
||||
Directory : /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew
|
||||
Total : 26 files, 6193 codes, 1008 comments, 1017 blanks, all 8218 lines
|
||||
|
||||
Languages
|
||||
+----------------+------------+------------+------------+------------+------------+
|
||||
| language | files | code | comment | blank | total |
|
||||
+----------------+------------+------------+------------+------------+------------+
|
||||
| TypeScript JSX | 18 | 5,817 | 895 | 934 | 7,646 |
|
||||
| TypeScript | 6 | 309 | 106 | 55 | 470 |
|
||||
| Markdown | 1 | 39 | 0 | 19 | 58 |
|
||||
| JavaScript | 1 | 28 | 7 | 9 | 44 |
|
||||
+----------------+------------+------------+------------+------------+------------+
|
||||
|
||||
Directories
|
||||
+------------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+------------+------------+
|
||||
| path | files | code | comment | blank | total |
|
||||
+------------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+------------+------------+
|
||||
| . | 26 | 6,193 | 1,008 | 1,017 | 8,218 |
|
||||
| . (Files) | 3 | 63 | 6 | 22 | 91 |
|
||||
| components | 11 | 3,357 | 403 | 410 | 4,170 |
|
||||
| components (Files) | 6 | 2,124 | 245 | 252 | 2,621 |
|
||||
| components/cells | 5 | 1,233 | 158 | 158 | 1,549 |
|
||||
| hooks | 6 | 2,440 | 486 | 522 | 3,448 |
|
||||
| types | 1 | 16 | 4 | 4 | 24 |
|
||||
| utils | 5 | 317 | 109 | 59 | 485 |
|
||||
+------------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+------------+------------+
|
||||
|
||||
Files
|
||||
+------------------------------------------------------------------------------------------------------------------------------------------+----------------+------------+------------+------------+------------+
|
||||
| filename | language | code | comment | blank | total |
|
||||
+------------------------------------------------------------------------------------------------------------------------------------------+----------------+------------+------------+------------+------------+
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/README.md | Markdown | 39 | 0 | 19 | 58 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/AiValidationDialogs.tsx | TypeScript JSX | 230 | 10 | 8 | 248 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/BaseCellContent.tsx | TypeScript JSX | 18 | 0 | 3 | 21 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/SearchableTemplateSelect.tsx | TypeScript JSX | 273 | 19 | 37 | 329 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationCell.tsx | TypeScript JSX | 374 | 42 | 44 | 460 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationContainer.tsx | TypeScript JSX | 730 | 126 | 106 | 962 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationTable.tsx | TypeScript JSX | 499 | 48 | 54 | 601 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/CheckboxCell.tsx | TypeScript JSX | 112 | 12 | 21 | 145 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/InputCell.tsx | TypeScript JSX | 232 | 31 | 32 | 295 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/MultiSelectCell.tsx | TypeScript JSX | 407 | 56 | 52 | 515 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/MultilineInput.tsx | TypeScript JSX | 193 | 23 | 22 | 238 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/SelectCell.tsx | TypeScript JSX | 289 | 36 | 31 | 356 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useAiValidation.tsx | TypeScript JSX | 500 | 75 | 89 | 664 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useProductLinesFetching.tsx | TypeScript JSX | 248 | 69 | 74 | 391 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useTemplates.tsx | TypeScript JSX | 204 | 26 | 33 | 263 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useUpcValidation.tsx | TypeScript JSX | 209 | 49 | 50 | 308 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidation.tsx | TypeScript JSX | 219 | 39 | 47 | 305 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidationState.tsx | TypeScript JSX | 1,060 | 228 | 229 | 1,517 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/index.tsx | TypeScript JSX | 20 | 6 | 2 | 28 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/types.ts | TypeScript | 4 | 0 | 1 | 5 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/types/index.ts | TypeScript | 16 | 4 | 4 | 24 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/dataMutations.ts | TypeScript | 124 | 4 | 14 | 142 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/errorUtils.ts | TypeScript | 21 | 15 | 5 | 41 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/upcValidation.ts | TypeScript | 43 | 24 | 7 | 74 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/validation-helper.js | JavaScript | 28 | 7 | 9 | 44 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/validationUtils.ts | TypeScript | 101 | 59 | 24 | 184 |
|
||||
| Total | | 6,193 | 1,008 | 1,017 | 8,218 |
|
||||
+------------------------------------------------------------------------------------------------------------------------------------------+----------------+------------+------------+------------+------------+
|
||||
42
.VSCodeCounter/2025-03-18_12-39-04/details.md
Normal file
42
.VSCodeCounter/2025-03-18_12-39-04/details.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# Details
|
||||
|
||||
Date : 2025-03-18 12:39:04
|
||||
|
||||
Directory /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew
|
||||
|
||||
Total : 27 files, 6925 codes, 1247 comments, 1248 blanks, all 9420 lines
|
||||
|
||||
[Summary](results.md) / Details / [Diff Summary](diff.md) / [Diff Details](diff-details.md)
|
||||
|
||||
## Files
|
||||
| filename | language | code | comment | blank | total |
|
||||
| :--- | :--- | ---: | ---: | ---: | ---: |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/README.md](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/README.md) | Markdown | 39 | 0 | 19 | 58 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/AiValidationDialogs.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/AiValidationDialogs.tsx) | TypeScript JSX | 230 | 10 | 8 | 248 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/BaseCellContent.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/BaseCellContent.tsx) | TypeScript JSX | 18 | 0 | 3 | 21 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/SearchableTemplateSelect.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/SearchableTemplateSelect.tsx) | TypeScript JSX | 273 | 19 | 37 | 329 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/UpcValidationTableAdapter.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/UpcValidationTableAdapter.tsx) | TypeScript JSX | 113 | 17 | 10 | 140 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationCell.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationCell.tsx) | TypeScript JSX | 377 | 49 | 54 | 480 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationContainer.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationContainer.tsx) | TypeScript JSX | 969 | 182 | 158 | 1,309 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationTable.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationTable.tsx) | TypeScript JSX | 509 | 50 | 57 | 616 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/CheckboxCell.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/CheckboxCell.tsx) | TypeScript JSX | 112 | 12 | 21 | 145 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/InputCell.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/InputCell.tsx) | TypeScript JSX | 233 | 34 | 33 | 300 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/MultiSelectCell.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/MultiSelectCell.tsx) | TypeScript JSX | 420 | 66 | 59 | 545 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/MultilineInput.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/MultilineInput.tsx) | TypeScript JSX | 193 | 23 | 22 | 238 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/SelectCell.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/SelectCell.tsx) | TypeScript JSX | 227 | 36 | 32 | 295 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useAiValidation.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useAiValidation.tsx) | TypeScript JSX | 500 | 75 | 89 | 664 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useProductLinesFetching.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useProductLinesFetching.tsx) | TypeScript JSX | 264 | 75 | 81 | 420 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useTemplates.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useTemplates.tsx) | TypeScript JSX | 204 | 26 | 33 | 263 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useUpcValidation.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useUpcValidation.tsx) | TypeScript JSX | 337 | 88 | 92 | 517 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidation.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidation.tsx) | TypeScript JSX | 360 | 78 | 85 | 523 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidationState.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidationState.tsx) | TypeScript JSX | 1,190 | 288 | 289 | 1,767 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/index.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/index.tsx) | TypeScript JSX | 20 | 6 | 2 | 28 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/types.ts](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/types.ts) | TypeScript | 4 | 0 | 1 | 5 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/types/index.ts](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/types/index.ts) | TypeScript | 16 | 4 | 4 | 24 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/dataMutations.ts](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/dataMutations.ts) | TypeScript | 124 | 4 | 14 | 142 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/errorUtils.ts](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/errorUtils.ts) | TypeScript | 21 | 15 | 5 | 41 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/upcValidation.ts](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/upcValidation.ts) | TypeScript | 43 | 24 | 7 | 74 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/validation-helper.js](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/validation-helper.js) | JavaScript | 28 | 7 | 9 | 44 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/validationUtils.ts](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/validationUtils.ts) | TypeScript | 101 | 59 | 24 | 184 |
|
||||
|
||||
[Summary](results.md) / Details / [Diff Summary](diff.md) / [Diff Details](diff-details.md)
|
||||
26
.VSCodeCounter/2025-03-18_12-39-04/diff-details.md
Normal file
26
.VSCodeCounter/2025-03-18_12-39-04/diff-details.md
Normal file
@@ -0,0 +1,26 @@
|
||||
# Diff Details
|
||||
|
||||
Date : 2025-03-18 12:39:04
|
||||
|
||||
Directory /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew
|
||||
|
||||
Total : 11 files, 732 codes, 239 comments, 231 blanks, all 1202 lines
|
||||
|
||||
[Summary](results.md) / [Details](details.md) / [Diff Summary](diff.md) / Diff Details
|
||||
|
||||
## Files
|
||||
| filename | language | code | comment | blank | total |
|
||||
| :--- | :--- | ---: | ---: | ---: | ---: |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/UpcValidationTableAdapter.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/UpcValidationTableAdapter.tsx) | TypeScript JSX | 113 | 17 | 10 | 140 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationCell.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationCell.tsx) | TypeScript JSX | 3 | 7 | 10 | 20 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationContainer.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationContainer.tsx) | TypeScript JSX | 239 | 56 | 52 | 347 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationTable.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationTable.tsx) | TypeScript JSX | 10 | 2 | 3 | 15 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/InputCell.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/InputCell.tsx) | TypeScript JSX | 1 | 3 | 1 | 5 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/MultiSelectCell.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/MultiSelectCell.tsx) | TypeScript JSX | 13 | 10 | 7 | 30 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/SelectCell.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/SelectCell.tsx) | TypeScript JSX | -62 | 0 | 1 | -61 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useProductLinesFetching.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useProductLinesFetching.tsx) | TypeScript JSX | 16 | 6 | 7 | 29 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useUpcValidation.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useUpcValidation.tsx) | TypeScript JSX | 128 | 39 | 42 | 209 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidation.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidation.tsx) | TypeScript JSX | 141 | 39 | 38 | 218 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidationState.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidationState.tsx) | TypeScript JSX | 130 | 60 | 60 | 250 |
|
||||
|
||||
[Summary](results.md) / [Details](details.md) / [Diff Summary](diff.md) / Diff Details
|
||||
25
.VSCodeCounter/2025-03-18_12-39-04/diff.md
Normal file
25
.VSCodeCounter/2025-03-18_12-39-04/diff.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# Diff Summary
|
||||
|
||||
Date : 2025-03-18 12:39:04
|
||||
|
||||
Directory /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew
|
||||
|
||||
Total : 11 files, 732 codes, 239 comments, 231 blanks, all 1202 lines
|
||||
|
||||
[Summary](results.md) / [Details](details.md) / Diff Summary / [Diff Details](diff-details.md)
|
||||
|
||||
## Languages
|
||||
| language | files | code | comment | blank | total |
|
||||
| :--- | ---: | ---: | ---: | ---: | ---: |
|
||||
| TypeScript JSX | 11 | 732 | 239 | 231 | 1,202 |
|
||||
|
||||
## Directories
|
||||
| path | files | code | comment | blank | total |
|
||||
| :--- | ---: | ---: | ---: | ---: | ---: |
|
||||
| . | 11 | 732 | 239 | 231 | 1,202 |
|
||||
| components | 7 | 317 | 95 | 84 | 496 |
|
||||
| components (Files) | 4 | 365 | 82 | 75 | 522 |
|
||||
| components/cells | 3 | -48 | 13 | 9 | -26 |
|
||||
| hooks | 4 | 415 | 144 | 147 | 706 |
|
||||
|
||||
[Summary](results.md) / [Details](details.md) / Diff Summary / [Diff Details](diff-details.md)
|
||||
39
.VSCodeCounter/2025-03-18_12-39-04/diff.txt
Normal file
39
.VSCodeCounter/2025-03-18_12-39-04/diff.txt
Normal file
@@ -0,0 +1,39 @@
|
||||
Date : 2025-03-18 12:39:04
|
||||
Directory : /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew
|
||||
Total : 11 files, 732 codes, 239 comments, 231 blanks, all 1202 lines
|
||||
|
||||
Languages
|
||||
+----------------+------------+------------+------------+------------+------------+
|
||||
| language | files | code | comment | blank | total |
|
||||
+----------------+------------+------------+------------+------------+------------+
|
||||
| TypeScript JSX | 11 | 732 | 239 | 231 | 1,202 |
|
||||
+----------------+------------+------------+------------+------------+------------+
|
||||
|
||||
Directories
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+------------+------------+
|
||||
| path | files | code | comment | blank | total |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+------------+------------+
|
||||
| . | 11 | 732 | 239 | 231 | 1,202 |
|
||||
| components | 7 | 317 | 95 | 84 | 496 |
|
||||
| components (Files) | 4 | 365 | 82 | 75 | 522 |
|
||||
| components/cells | 3 | -48 | 13 | 9 | -26 |
|
||||
| hooks | 4 | 415 | 144 | 147 | 706 |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+------------+------------+
|
||||
|
||||
Files
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------------+----------------+------------+------------+------------+------------+
|
||||
| filename | language | code | comment | blank | total |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------------+----------------+------------+------------+------------+------------+
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/UpcValidationTableAdapter.tsx | TypeScript JSX | 113 | 17 | 10 | 140 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationCell.tsx | TypeScript JSX | 3 | 7 | 10 | 20 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationContainer.tsx | TypeScript JSX | 239 | 56 | 52 | 347 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationTable.tsx | TypeScript JSX | 10 | 2 | 3 | 15 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/InputCell.tsx | TypeScript JSX | 1 | 3 | 1 | 5 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/MultiSelectCell.tsx | TypeScript JSX | 13 | 10 | 7 | 30 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/SelectCell.tsx | TypeScript JSX | -62 | 0 | 1 | -61 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useProductLinesFetching.tsx | TypeScript JSX | 16 | 6 | 7 | 29 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useUpcValidation.tsx | TypeScript JSX | 128 | 39 | 42 | 209 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidation.tsx | TypeScript JSX | 141 | 39 | 38 | 218 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidationState.tsx | TypeScript JSX | 130 | 60 | 60 | 250 |
|
||||
| Total | | 732 | 239 | 231 | 1,202 |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------------+----------------+------------+------------+------------+------------+
|
||||
1
.VSCodeCounter/2025-03-18_12-39-04/results.json
Normal file
1
.VSCodeCounter/2025-03-18_12-39-04/results.json
Normal file
File diff suppressed because one or more lines are too long
31
.VSCodeCounter/2025-03-18_12-39-04/results.md
Normal file
31
.VSCodeCounter/2025-03-18_12-39-04/results.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# Summary
|
||||
|
||||
Date : 2025-03-18 12:39:04
|
||||
|
||||
Directory /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew
|
||||
|
||||
Total : 27 files, 6925 codes, 1247 comments, 1248 blanks, all 9420 lines
|
||||
|
||||
Summary / [Details](details.md) / [Diff Summary](diff.md) / [Diff Details](diff-details.md)
|
||||
|
||||
## Languages
|
||||
| language | files | code | comment | blank | total |
|
||||
| :--- | ---: | ---: | ---: | ---: | ---: |
|
||||
| TypeScript JSX | 19 | 6,549 | 1,134 | 1,165 | 8,848 |
|
||||
| TypeScript | 6 | 309 | 106 | 55 | 470 |
|
||||
| Markdown | 1 | 39 | 0 | 19 | 58 |
|
||||
| JavaScript | 1 | 28 | 7 | 9 | 44 |
|
||||
|
||||
## Directories
|
||||
| path | files | code | comment | blank | total |
|
||||
| :--- | ---: | ---: | ---: | ---: | ---: |
|
||||
| . | 27 | 6,925 | 1,247 | 1,248 | 9,420 |
|
||||
| . (Files) | 3 | 63 | 6 | 22 | 91 |
|
||||
| components | 12 | 3,674 | 498 | 494 | 4,666 |
|
||||
| components (Files) | 7 | 2,489 | 327 | 327 | 3,143 |
|
||||
| components/cells | 5 | 1,185 | 171 | 167 | 1,523 |
|
||||
| hooks | 6 | 2,855 | 630 | 669 | 4,154 |
|
||||
| types | 1 | 16 | 4 | 4 | 24 |
|
||||
| utils | 5 | 317 | 109 | 59 | 485 |
|
||||
|
||||
Summary / [Details](details.md) / [Diff Summary](diff.md) / [Diff Details](diff-details.md)
|
||||
61
.VSCodeCounter/2025-03-18_12-39-04/results.txt
Normal file
61
.VSCodeCounter/2025-03-18_12-39-04/results.txt
Normal file
@@ -0,0 +1,61 @@
|
||||
Date : 2025-03-18 12:39:04
|
||||
Directory : /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew
|
||||
Total : 27 files, 6925 codes, 1247 comments, 1248 blanks, all 9420 lines
|
||||
|
||||
Languages
|
||||
+----------------+------------+------------+------------+------------+------------+
|
||||
| language | files | code | comment | blank | total |
|
||||
+----------------+------------+------------+------------+------------+------------+
|
||||
| TypeScript JSX | 19 | 6,549 | 1,134 | 1,165 | 8,848 |
|
||||
| TypeScript | 6 | 309 | 106 | 55 | 470 |
|
||||
| Markdown | 1 | 39 | 0 | 19 | 58 |
|
||||
| JavaScript | 1 | 28 | 7 | 9 | 44 |
|
||||
+----------------+------------+------------+------------+------------+------------+
|
||||
|
||||
Directories
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+------------+------------+
|
||||
| path | files | code | comment | blank | total |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+------------+------------+
|
||||
| . | 27 | 6,925 | 1,247 | 1,248 | 9,420 |
|
||||
| . (Files) | 3 | 63 | 6 | 22 | 91 |
|
||||
| components | 12 | 3,674 | 498 | 494 | 4,666 |
|
||||
| components (Files) | 7 | 2,489 | 327 | 327 | 3,143 |
|
||||
| components/cells | 5 | 1,185 | 171 | 167 | 1,523 |
|
||||
| hooks | 6 | 2,855 | 630 | 669 | 4,154 |
|
||||
| types | 1 | 16 | 4 | 4 | 24 |
|
||||
| utils | 5 | 317 | 109 | 59 | 485 |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+------------+------------+
|
||||
|
||||
Files
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------------+----------------+------------+------------+------------+------------+
|
||||
| filename | language | code | comment | blank | total |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------------+----------------+------------+------------+------------+------------+
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/README.md | Markdown | 39 | 0 | 19 | 58 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/AiValidationDialogs.tsx | TypeScript JSX | 230 | 10 | 8 | 248 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/BaseCellContent.tsx | TypeScript JSX | 18 | 0 | 3 | 21 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/SearchableTemplateSelect.tsx | TypeScript JSX | 273 | 19 | 37 | 329 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/UpcValidationTableAdapter.tsx | TypeScript JSX | 113 | 17 | 10 | 140 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationCell.tsx | TypeScript JSX | 377 | 49 | 54 | 480 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationContainer.tsx | TypeScript JSX | 969 | 182 | 158 | 1,309 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationTable.tsx | TypeScript JSX | 509 | 50 | 57 | 616 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/CheckboxCell.tsx | TypeScript JSX | 112 | 12 | 21 | 145 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/InputCell.tsx | TypeScript JSX | 233 | 34 | 33 | 300 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/MultiSelectCell.tsx | TypeScript JSX | 420 | 66 | 59 | 545 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/MultilineInput.tsx | TypeScript JSX | 193 | 23 | 22 | 238 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/SelectCell.tsx | TypeScript JSX | 227 | 36 | 32 | 295 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useAiValidation.tsx | TypeScript JSX | 500 | 75 | 89 | 664 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useProductLinesFetching.tsx | TypeScript JSX | 264 | 75 | 81 | 420 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useTemplates.tsx | TypeScript JSX | 204 | 26 | 33 | 263 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useUpcValidation.tsx | TypeScript JSX | 337 | 88 | 92 | 517 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidation.tsx | TypeScript JSX | 360 | 78 | 85 | 523 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidationState.tsx | TypeScript JSX | 1,190 | 288 | 289 | 1,767 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/index.tsx | TypeScript JSX | 20 | 6 | 2 | 28 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/types.ts | TypeScript | 4 | 0 | 1 | 5 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/types/index.ts | TypeScript | 16 | 4 | 4 | 24 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/dataMutations.ts | TypeScript | 124 | 4 | 14 | 142 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/errorUtils.ts | TypeScript | 21 | 15 | 5 | 41 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/upcValidation.ts | TypeScript | 43 | 24 | 7 | 74 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/validation-helper.js | JavaScript | 28 | 7 | 9 | 44 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/validationUtils.ts | TypeScript | 101 | 59 | 24 | 184 |
|
||||
| Total | | 6,925 | 1,247 | 1,248 | 9,420 |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------------+----------------+------------+------------+------------+------------+
|
||||
42
.VSCodeCounter/2025-03-18_13-49-23/details.md
Normal file
42
.VSCodeCounter/2025-03-18_13-49-23/details.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# Details
|
||||
|
||||
Date : 2025-03-18 13:49:23
|
||||
|
||||
Directory /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew
|
||||
|
||||
Total : 27 files, 6961 codes, 1254 comments, 1252 blanks, all 9467 lines
|
||||
|
||||
[Summary](results.md) / Details / [Diff Summary](diff.md) / [Diff Details](diff-details.md)
|
||||
|
||||
## Files
|
||||
| filename | language | code | comment | blank | total |
|
||||
| :--- | :--- | ---: | ---: | ---: | ---: |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/README.md](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/README.md) | Markdown | 39 | 0 | 19 | 58 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/AiValidationDialogs.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/AiValidationDialogs.tsx) | TypeScript JSX | 230 | 10 | 8 | 248 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/BaseCellContent.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/BaseCellContent.tsx) | TypeScript JSX | 18 | 0 | 3 | 21 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/SearchableTemplateSelect.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/SearchableTemplateSelect.tsx) | TypeScript JSX | 273 | 19 | 37 | 329 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/UpcValidationTableAdapter.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/UpcValidationTableAdapter.tsx) | TypeScript JSX | 113 | 17 | 10 | 140 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationCell.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationCell.tsx) | TypeScript JSX | 395 | 51 | 55 | 501 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationContainer.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationContainer.tsx) | TypeScript JSX | 969 | 182 | 158 | 1,309 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationTable.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationTable.tsx) | TypeScript JSX | 527 | 55 | 60 | 642 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/CheckboxCell.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/CheckboxCell.tsx) | TypeScript JSX | 112 | 12 | 21 | 145 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/InputCell.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/InputCell.tsx) | TypeScript JSX | 233 | 34 | 33 | 300 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/MultiSelectCell.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/MultiSelectCell.tsx) | TypeScript JSX | 420 | 66 | 59 | 545 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/MultilineInput.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/MultilineInput.tsx) | TypeScript JSX | 193 | 23 | 22 | 238 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/SelectCell.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/SelectCell.tsx) | TypeScript JSX | 227 | 36 | 32 | 295 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useAiValidation.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useAiValidation.tsx) | TypeScript JSX | 500 | 75 | 89 | 664 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useProductLinesFetching.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useProductLinesFetching.tsx) | TypeScript JSX | 264 | 75 | 81 | 420 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useTemplates.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useTemplates.tsx) | TypeScript JSX | 204 | 26 | 33 | 263 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useUpcValidation.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useUpcValidation.tsx) | TypeScript JSX | 337 | 88 | 92 | 517 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidation.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidation.tsx) | TypeScript JSX | 360 | 78 | 85 | 523 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidationState.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidationState.tsx) | TypeScript JSX | 1,190 | 288 | 289 | 1,767 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/index.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/index.tsx) | TypeScript JSX | 20 | 6 | 2 | 28 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/types.ts](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/types.ts) | TypeScript | 4 | 0 | 1 | 5 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/types/index.ts](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/types/index.ts) | TypeScript | 16 | 4 | 4 | 24 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/dataMutations.ts](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/dataMutations.ts) | TypeScript | 124 | 4 | 14 | 142 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/errorUtils.ts](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/errorUtils.ts) | TypeScript | 21 | 15 | 5 | 41 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/upcValidation.ts](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/upcValidation.ts) | TypeScript | 43 | 24 | 7 | 74 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/validation-helper.js](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/validation-helper.js) | JavaScript | 28 | 7 | 9 | 44 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/validationUtils.ts](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/validationUtils.ts) | TypeScript | 101 | 59 | 24 | 184 |
|
||||
|
||||
[Summary](results.md) / Details / [Diff Summary](diff.md) / [Diff Details](diff-details.md)
|
||||
17
.VSCodeCounter/2025-03-18_13-49-23/diff-details.md
Normal file
17
.VSCodeCounter/2025-03-18_13-49-23/diff-details.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# Diff Details
|
||||
|
||||
Date : 2025-03-18 13:49:23
|
||||
|
||||
Directory /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew
|
||||
|
||||
Total : 2 files, 36 codes, 7 comments, 4 blanks, all 47 lines
|
||||
|
||||
[Summary](results.md) / [Details](details.md) / [Diff Summary](diff.md) / Diff Details
|
||||
|
||||
## Files
|
||||
| filename | language | code | comment | blank | total |
|
||||
| :--- | :--- | ---: | ---: | ---: | ---: |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationCell.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationCell.tsx) | TypeScript JSX | 18 | 2 | 1 | 21 |
|
||||
| [inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationTable.tsx](/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationTable.tsx) | TypeScript JSX | 18 | 5 | 3 | 26 |
|
||||
|
||||
[Summary](results.md) / [Details](details.md) / [Diff Summary](diff.md) / Diff Details
|
||||
22
.VSCodeCounter/2025-03-18_13-49-23/diff.md
Normal file
22
.VSCodeCounter/2025-03-18_13-49-23/diff.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# Diff Summary
|
||||
|
||||
Date : 2025-03-18 13:49:23
|
||||
|
||||
Directory /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew
|
||||
|
||||
Total : 2 files, 36 codes, 7 comments, 4 blanks, all 47 lines
|
||||
|
||||
[Summary](results.md) / [Details](details.md) / Diff Summary / [Diff Details](diff-details.md)
|
||||
|
||||
## Languages
|
||||
| language | files | code | comment | blank | total |
|
||||
| :--- | ---: | ---: | ---: | ---: | ---: |
|
||||
| TypeScript JSX | 2 | 36 | 7 | 4 | 47 |
|
||||
|
||||
## Directories
|
||||
| path | files | code | comment | blank | total |
|
||||
| :--- | ---: | ---: | ---: | ---: | ---: |
|
||||
| . | 2 | 36 | 7 | 4 | 47 |
|
||||
| components | 2 | 36 | 7 | 4 | 47 |
|
||||
|
||||
[Summary](results.md) / [Details](details.md) / Diff Summary / [Diff Details](diff-details.md)
|
||||
27
.VSCodeCounter/2025-03-18_13-49-23/diff.txt
Normal file
27
.VSCodeCounter/2025-03-18_13-49-23/diff.txt
Normal file
@@ -0,0 +1,27 @@
|
||||
Date : 2025-03-18 13:49:23
|
||||
Directory : /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew
|
||||
Total : 2 files, 36 codes, 7 comments, 4 blanks, all 47 lines
|
||||
|
||||
Languages
|
||||
+----------------+------------+------------+------------+------------+------------+
|
||||
| language | files | code | comment | blank | total |
|
||||
+----------------+------------+------------+------------+------------+------------+
|
||||
| TypeScript JSX | 2 | 36 | 7 | 4 | 47 |
|
||||
+----------------+------------+------------+------------+------------+------------+
|
||||
|
||||
Directories
|
||||
+---------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+------------+------------+
|
||||
| path | files | code | comment | blank | total |
|
||||
+---------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+------------+------------+
|
||||
| . | 2 | 36 | 7 | 4 | 47 |
|
||||
| components | 2 | 36 | 7 | 4 | 47 |
|
||||
+---------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+------------+------------+
|
||||
|
||||
Files
|
||||
+---------------------------------------------------------------------------------------------------------------------------------+----------------+------------+------------+------------+------------+
|
||||
| filename | language | code | comment | blank | total |
|
||||
+---------------------------------------------------------------------------------------------------------------------------------+----------------+------------+------------+------------+------------+
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationCell.tsx | TypeScript JSX | 18 | 2 | 1 | 21 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationTable.tsx | TypeScript JSX | 18 | 5 | 3 | 26 |
|
||||
| Total | | 36 | 7 | 4 | 47 |
|
||||
+---------------------------------------------------------------------------------------------------------------------------------+----------------+------------+------------+------------+------------+
|
||||
1
.VSCodeCounter/2025-03-18_13-49-23/results.json
Normal file
1
.VSCodeCounter/2025-03-18_13-49-23/results.json
Normal file
File diff suppressed because one or more lines are too long
31
.VSCodeCounter/2025-03-18_13-49-23/results.md
Normal file
31
.VSCodeCounter/2025-03-18_13-49-23/results.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# Summary
|
||||
|
||||
Date : 2025-03-18 13:49:23
|
||||
|
||||
Directory /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew
|
||||
|
||||
Total : 27 files, 6961 codes, 1254 comments, 1252 blanks, all 9467 lines
|
||||
|
||||
Summary / [Details](details.md) / [Diff Summary](diff.md) / [Diff Details](diff-details.md)
|
||||
|
||||
## Languages
|
||||
| language | files | code | comment | blank | total |
|
||||
| :--- | ---: | ---: | ---: | ---: | ---: |
|
||||
| TypeScript JSX | 19 | 6,585 | 1,141 | 1,169 | 8,895 |
|
||||
| TypeScript | 6 | 309 | 106 | 55 | 470 |
|
||||
| Markdown | 1 | 39 | 0 | 19 | 58 |
|
||||
| JavaScript | 1 | 28 | 7 | 9 | 44 |
|
||||
|
||||
## Directories
|
||||
| path | files | code | comment | blank | total |
|
||||
| :--- | ---: | ---: | ---: | ---: | ---: |
|
||||
| . | 27 | 6,961 | 1,254 | 1,252 | 9,467 |
|
||||
| . (Files) | 3 | 63 | 6 | 22 | 91 |
|
||||
| components | 12 | 3,710 | 505 | 498 | 4,713 |
|
||||
| components (Files) | 7 | 2,525 | 334 | 331 | 3,190 |
|
||||
| components/cells | 5 | 1,185 | 171 | 167 | 1,523 |
|
||||
| hooks | 6 | 2,855 | 630 | 669 | 4,154 |
|
||||
| types | 1 | 16 | 4 | 4 | 24 |
|
||||
| utils | 5 | 317 | 109 | 59 | 485 |
|
||||
|
||||
Summary / [Details](details.md) / [Diff Summary](diff.md) / [Diff Details](diff-details.md)
|
||||
61
.VSCodeCounter/2025-03-18_13-49-23/results.txt
Normal file
61
.VSCodeCounter/2025-03-18_13-49-23/results.txt
Normal file
@@ -0,0 +1,61 @@
|
||||
Date : 2025-03-18 13:49:23
|
||||
Directory : /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew
|
||||
Total : 27 files, 6961 codes, 1254 comments, 1252 blanks, all 9467 lines
|
||||
|
||||
Languages
|
||||
+----------------+------------+------------+------------+------------+------------+
|
||||
| language | files | code | comment | blank | total |
|
||||
+----------------+------------+------------+------------+------------+------------+
|
||||
| TypeScript JSX | 19 | 6,585 | 1,141 | 1,169 | 8,895 |
|
||||
| TypeScript | 6 | 309 | 106 | 55 | 470 |
|
||||
| Markdown | 1 | 39 | 0 | 19 | 58 |
|
||||
| JavaScript | 1 | 28 | 7 | 9 | 44 |
|
||||
+----------------+------------+------------+------------+------------+------------+
|
||||
|
||||
Directories
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+------------+------------+
|
||||
| path | files | code | comment | blank | total |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+------------+------------+
|
||||
| . | 27 | 6,961 | 1,254 | 1,252 | 9,467 |
|
||||
| . (Files) | 3 | 63 | 6 | 22 | 91 |
|
||||
| components | 12 | 3,710 | 505 | 498 | 4,713 |
|
||||
| components (Files) | 7 | 2,525 | 334 | 331 | 3,190 |
|
||||
| components/cells | 5 | 1,185 | 171 | 167 | 1,523 |
|
||||
| hooks | 6 | 2,855 | 630 | 669 | 4,154 |
|
||||
| types | 1 | 16 | 4 | 4 | 24 |
|
||||
| utils | 5 | 317 | 109 | 59 | 485 |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------------+------------+------------+------------+------------+------------+
|
||||
|
||||
Files
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------------+----------------+------------+------------+------------+------------+
|
||||
| filename | language | code | comment | blank | total |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------------+----------------+------------+------------+------------+------------+
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/README.md | Markdown | 39 | 0 | 19 | 58 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/AiValidationDialogs.tsx | TypeScript JSX | 230 | 10 | 8 | 248 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/BaseCellContent.tsx | TypeScript JSX | 18 | 0 | 3 | 21 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/SearchableTemplateSelect.tsx | TypeScript JSX | 273 | 19 | 37 | 329 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/UpcValidationTableAdapter.tsx | TypeScript JSX | 113 | 17 | 10 | 140 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationCell.tsx | TypeScript JSX | 395 | 51 | 55 | 501 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationContainer.tsx | TypeScript JSX | 969 | 182 | 158 | 1,309 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationTable.tsx | TypeScript JSX | 527 | 55 | 60 | 642 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/CheckboxCell.tsx | TypeScript JSX | 112 | 12 | 21 | 145 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/InputCell.tsx | TypeScript JSX | 233 | 34 | 33 | 300 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/MultiSelectCell.tsx | TypeScript JSX | 420 | 66 | 59 | 545 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/MultilineInput.tsx | TypeScript JSX | 193 | 23 | 22 | 238 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/SelectCell.tsx | TypeScript JSX | 227 | 36 | 32 | 295 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useAiValidation.tsx | TypeScript JSX | 500 | 75 | 89 | 664 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useProductLinesFetching.tsx | TypeScript JSX | 264 | 75 | 81 | 420 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useTemplates.tsx | TypeScript JSX | 204 | 26 | 33 | 263 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useUpcValidation.tsx | TypeScript JSX | 337 | 88 | 92 | 517 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidation.tsx | TypeScript JSX | 360 | 78 | 85 | 523 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidationState.tsx | TypeScript JSX | 1,190 | 288 | 289 | 1,767 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/index.tsx | TypeScript JSX | 20 | 6 | 2 | 28 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/types.ts | TypeScript | 4 | 0 | 1 | 5 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/types/index.ts | TypeScript | 16 | 4 | 4 | 24 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/dataMutations.ts | TypeScript | 124 | 4 | 14 | 142 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/errorUtils.ts | TypeScript | 21 | 15 | 5 | 41 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/upcValidation.ts | TypeScript | 43 | 24 | 7 | 74 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/validation-helper.js | JavaScript | 28 | 7 | 9 | 44 |
|
||||
| /Users/matt/Dev/inventory/inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/validationUtils.ts | TypeScript | 101 | 59 | 24 | 184 |
|
||||
| Total | | 6,961 | 1,254 | 1,252 | 9,467 |
|
||||
+-------------------------------------------------------------------------------------------------------------------------------------------+----------------+------------+------------+------------+------------+
|
||||
15
.gitignore
vendored
15
.gitignore
vendored
@@ -50,6 +50,11 @@ dashboard-server/meta-server/._package-lock.json
|
||||
dashboard-server/meta-server/._services
|
||||
*.tsbuildinfo
|
||||
|
||||
uploads/*
|
||||
uploads/**/*
|
||||
**/uploads/*
|
||||
**/uploads/**/*
|
||||
|
||||
# CSV data files
|
||||
*.csv
|
||||
csv/*
|
||||
@@ -59,3 +64,13 @@ csv/**/*
|
||||
!csv/.gitkeep
|
||||
inventory/tsconfig.tsbuildinfo
|
||||
inventory-server/scripts/.fuse_hidden00000fa20000000a
|
||||
|
||||
.VSCodeCounter/
|
||||
.VSCodeCounter/*
|
||||
.VSCodeCounter/**/*
|
||||
|
||||
*/chat/db-convert/db/*
|
||||
*/chat/db-convert/mongo_converter_env/*
|
||||
|
||||
# Ignore compiled Vite config to avoid duplication
|
||||
vite.config.js
|
||||
172
docs/PERMISSIONS.md
Normal file
172
docs/PERMISSIONS.md
Normal file
@@ -0,0 +1,172 @@
|
||||
# Permission System Documentation
|
||||
|
||||
This document outlines the permission system implemented in the Inventory Manager application.
|
||||
|
||||
## Permission Structure
|
||||
|
||||
Permissions follow this naming convention:
|
||||
|
||||
- Page access: `access:{page_name}`
|
||||
- Actions: `{action}:{resource}`
|
||||
|
||||
Examples:
|
||||
- `access:products` - Can access the Products page
|
||||
- `create:products` - Can create new products
|
||||
- `edit:users` - Can edit user accounts
|
||||
|
||||
## Permission Components
|
||||
|
||||
### PermissionGuard
|
||||
|
||||
The core component that conditionally renders content based on permissions.
|
||||
|
||||
```tsx
|
||||
<PermissionGuard
|
||||
permission="create:products"
|
||||
fallback={<p>No permission</p>}
|
||||
>
|
||||
<button>Create Product</button>
|
||||
</PermissionGuard>
|
||||
```
|
||||
|
||||
Options:
|
||||
- `permission`: Single permission code
|
||||
- `anyPermissions`: Array of permissions (ANY match grants access)
|
||||
- `allPermissions`: Array of permissions (ALL required)
|
||||
- `adminOnly`: For admin-only sections
|
||||
- `page`: Page name (checks `access:{page}` permission)
|
||||
- `fallback`: Content to show if permission check fails
|
||||
|
||||
### PermissionProtectedRoute
|
||||
|
||||
Protects entire pages based on page access permissions.
|
||||
|
||||
```tsx
|
||||
<Route path="/products" element={
|
||||
<PermissionProtectedRoute page="products">
|
||||
<Products />
|
||||
</PermissionProtectedRoute>
|
||||
} />
|
||||
```
|
||||
|
||||
### ProtectedSection
|
||||
|
||||
Protects sections within a page based on action permissions.
|
||||
|
||||
```tsx
|
||||
<ProtectedSection page="products" action="create">
|
||||
<button>Add Product</button>
|
||||
</ProtectedSection>
|
||||
```
|
||||
|
||||
### PermissionButton
|
||||
|
||||
Button that automatically handles permissions.
|
||||
|
||||
```tsx
|
||||
<PermissionButton
|
||||
page="products"
|
||||
action="create"
|
||||
onClick={handleCreateProduct}
|
||||
>
|
||||
Add Product
|
||||
</PermissionButton>
|
||||
```
|
||||
|
||||
### SettingsSection
|
||||
|
||||
Specific component for settings with built-in permission checks.
|
||||
|
||||
```tsx
|
||||
<SettingsSection
|
||||
title="System Settings"
|
||||
description="Configure global settings"
|
||||
permission="edit:system_settings"
|
||||
>
|
||||
{/* Settings content */}
|
||||
</SettingsSection>
|
||||
```
|
||||
|
||||
## Permission Hooks
|
||||
|
||||
### usePermissions
|
||||
|
||||
Core hook for checking any permission.
|
||||
|
||||
```tsx
|
||||
const { hasPermission, hasPageAccess, isAdmin } = usePermissions();
|
||||
if (hasPermission('delete:products')) {
|
||||
// Can delete products
|
||||
}
|
||||
```
|
||||
|
||||
### usePagePermission
|
||||
|
||||
Specialized hook for page-level permissions.
|
||||
|
||||
```tsx
|
||||
const { canView, canCreate, canEdit, canDelete } = usePagePermission('products');
|
||||
if (canEdit()) {
|
||||
// Can edit products
|
||||
}
|
||||
```
|
||||
|
||||
## Database Schema
|
||||
|
||||
Permissions are stored in the database:
|
||||
- `permissions` table: Stores all available permissions
|
||||
- `user_permissions` junction table: Maps permissions to users
|
||||
|
||||
Admin users automatically have all permissions.
|
||||
|
||||
## Common Permission Codes
|
||||
|
||||
| Code | Description |
|
||||
|------|-------------|
|
||||
| `access:dashboard` | Access to Dashboard page |
|
||||
| `access:products` | Access to Products page |
|
||||
| `create:products` | Create new products |
|
||||
| `edit:products` | Edit existing products |
|
||||
| `delete:products` | Delete products |
|
||||
| `view:users` | View user accounts |
|
||||
| `edit:users` | Edit user accounts |
|
||||
| `manage:permissions` | Assign permissions to users |
|
||||
|
||||
## Implementation Examples
|
||||
|
||||
### Page Protection
|
||||
|
||||
In `App.tsx`:
|
||||
```tsx
|
||||
<Route path="/products" element={
|
||||
<PermissionProtectedRoute page="products">
|
||||
<Products />
|
||||
</PermissionProtectedRoute>
|
||||
} />
|
||||
```
|
||||
|
||||
### Component Level Protection
|
||||
|
||||
```tsx
|
||||
const { canEdit } = usePagePermission('products');
|
||||
|
||||
function handleEdit() {
|
||||
if (!canEdit()) {
|
||||
toast.error("You don't have permission");
|
||||
return;
|
||||
}
|
||||
// Edit logic
|
||||
}
|
||||
```
|
||||
|
||||
### UI Element Protection
|
||||
|
||||
```tsx
|
||||
<PermissionButton
|
||||
page="products"
|
||||
action="delete"
|
||||
onClick={handleDelete}
|
||||
>
|
||||
Delete
|
||||
</PermissionButton>
|
||||
```
|
||||
396
docs/ValidationStep-Refactoring-Plan.md
Normal file
396
docs/ValidationStep-Refactoring-Plan.md
Normal file
@@ -0,0 +1,396 @@
|
||||
# ValidationStep Component Refactoring Plan
|
||||
|
||||
## Overview
|
||||
|
||||
This document outlines a comprehensive plan to refactor the current ValidationStep component (4000+ lines) into a more maintainable, modular structure. The new implementation will be developed alongside the existing component without modifying the original code. Once completed, the previous step in the workflow will offer the option to continue to either the original ValidationStep or the new implementation.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [Current Component Analysis](#current-component-analysis)
|
||||
2. [New Architecture Design](#new-architecture-design)
|
||||
3. [Component Structure](#component-structure)
|
||||
4. [State Management](#state-management)
|
||||
5. [Key Features Implementation](#key-features-implementation)
|
||||
6. [Integration Plan](#integration-plan)
|
||||
7. [Testing Strategy](#testing-strategy)
|
||||
8. [Project Timeline](#project-timeline)
|
||||
9. [Design Principles](#design-principles)
|
||||
10. [Appendix: Function Reference](#appendix-function-reference)
|
||||
|
||||
## Current Component Analysis
|
||||
|
||||
The current ValidationStep component has several issues:
|
||||
|
||||
- **Size**: At over 4000 lines, it's difficult to maintain and understand
|
||||
- **Multiple responsibilities**: Handles validation, UI rendering, template management, and more
|
||||
- **Special cases**: Contains numerous special case handlers and exceptions
|
||||
- **Complex state management**: State is distributed across multiple useState calls
|
||||
- **Tightly coupled concerns**: UI, validation logic, and business rules are intertwined
|
||||
|
||||
### Key Features to Preserve
|
||||
|
||||
1. **Data Validation**
|
||||
- Field-level validation (required, regex, unique)
|
||||
- Row-level validation (supplier, company fields)
|
||||
- UPC validation with API integration
|
||||
- AI-assisted validation
|
||||
|
||||
2. **Template Management**
|
||||
- Saving, loading, and applying templates
|
||||
- Template-based validation
|
||||
|
||||
3. **UI Components**
|
||||
- Editable table with specialized cell renderers
|
||||
- Error display and management
|
||||
- Filtering and sorting capabilities
|
||||
- Status indicators and progress tracking
|
||||
|
||||
4. **Special Field Handling**
|
||||
- Input fields with price formatting
|
||||
- Multi-input fields with separator configuration
|
||||
- Select fields with dropdown options
|
||||
- Checkbox fields with boolean value mapping
|
||||
- UPC fields with specialized validation
|
||||
|
||||
5. **User Interaction Flows**
|
||||
- Tab and keyboard navigation
|
||||
- Bulk operations (select all, apply template)
|
||||
- Row validation on value change
|
||||
- Error reporting and display
|
||||
|
||||
## New Architecture Design
|
||||
|
||||
The new architecture will follow these principles:
|
||||
|
||||
1. **Separation of Concerns**
|
||||
- UI rendering separate from business logic
|
||||
- Validation logic isolated from state management
|
||||
- Clear interfaces between components
|
||||
|
||||
2. **Composable Components**
|
||||
- Small, focused components with single responsibilities
|
||||
- Reusable pattern for different field types
|
||||
|
||||
3. **Centralized State Management**
|
||||
- Custom hooks for state management
|
||||
- Clear data flow patterns
|
||||
- Reduced prop drilling
|
||||
|
||||
4. **Consistent Error Handling**
|
||||
- Standardized error structure
|
||||
- Predictable error propagation
|
||||
- User-friendly error display
|
||||
|
||||
5. **Performance Optimization**
|
||||
- Virtualized table rendering
|
||||
- Memoization of expensive computations
|
||||
- Deferred validation for better user experience
|
||||
|
||||
## Component Structure
|
||||
|
||||
The new ValidationStepNew folder has the following structure:
|
||||
|
||||
```
|
||||
ValidationStepNew/
|
||||
├── index.tsx # Main entry point that composes all pieces
|
||||
├── components/ # UI Components
|
||||
│ ├── ValidationContainer.tsx # Main wrapper component
|
||||
│ ├── ValidationTable.tsx # Table implementation
|
||||
│ ├── ValidationCell.tsx # Cell component
|
||||
│ ├── ValidationSidebar.tsx # Sidebar with controls
|
||||
│ ├── ValidationToolbar.tsx # Top toolbar (removed as unnecessary)
|
||||
│ ├── TemplateManager.tsx # Template management
|
||||
│ ├── FilterPanel.tsx # Filtering interface (integrated into Container)
|
||||
│ └── cells/ # Specialized cell renderers
|
||||
│ ├── InputCell.tsx
|
||||
│ ├── SelectCell.tsx
|
||||
│ ├── MultiInputCell.tsx
|
||||
│ └── CheckboxCell.tsx
|
||||
├── hooks/ # Custom hooks
|
||||
│ ├── useValidationState.tsx # Main state management
|
||||
│ ├── useTemplates.tsx # Template-related logic (integrated into ValidationState)
|
||||
│ ├── useFilters.tsx # Filtering logic (integrated into ValidationState)
|
||||
│ └── useUpcValidation.tsx # UPC-specific validation
|
||||
└── utils/ # Utility functions
|
||||
├── validationUtils.ts # Validation helper functions
|
||||
├── formatters.ts # Value formatting utilities
|
||||
└── constants.ts # Constant values and configuration
|
||||
```
|
||||
|
||||
### Component Responsibilities
|
||||
|
||||
#### ValidationContainer
|
||||
- Main container component
|
||||
- Coordinates between subcomponents
|
||||
- Manages global state
|
||||
- Handles navigation events (next, back)
|
||||
- Contains filter controls
|
||||
|
||||
#### ValidationTable
|
||||
- Displays the data in tabular form
|
||||
- Manages selection state
|
||||
- Handles keyboard navigation
|
||||
- Integrates with TanStack Table
|
||||
- Displays properly styled rows and cells
|
||||
|
||||
#### ValidationCell
|
||||
- Factory component that renders appropriate cell type
|
||||
- Manages cell-level state
|
||||
- Handles validation errors display
|
||||
- Manages edit mode
|
||||
- Shows consistent error indicators
|
||||
|
||||
#### TemplateManager
|
||||
- Handles template selection UI
|
||||
- Provides template save/load functionality
|
||||
- Manages template application to rows
|
||||
|
||||
#### Cell Components
|
||||
- **InputCell**: Handles text input with multiline and price support
|
||||
- **MultiInputCell**: Handles multiple values with separator configuration
|
||||
- **SelectCell**: Command/popover component for single selection
|
||||
- **CheckboxCell**: Boolean value selection with mapping support
|
||||
|
||||
## State Management
|
||||
|
||||
### Core State Interface
|
||||
|
||||
```typescript
|
||||
interface ValidationState<T extends string> {
|
||||
// Core data
|
||||
data: RowData<T>[];
|
||||
filteredData: RowData<T>[];
|
||||
|
||||
// Validation state
|
||||
isValidating: boolean;
|
||||
validationErrors: Map<number, Record<string, Error[]>>;
|
||||
rowValidationStatus: Map<number, 'pending' | 'validating' | 'validated' | 'error'>;
|
||||
|
||||
// Selection state
|
||||
rowSelection: RowSelectionState;
|
||||
|
||||
// Template state
|
||||
templates: Template[];
|
||||
selectedTemplateId: string | null;
|
||||
|
||||
// Filter state
|
||||
filters: FilterState;
|
||||
|
||||
// Methods
|
||||
updateRow: (rowIndex: number, key: T, value: any) => void;
|
||||
validateRow: (rowIndex: number) => Promise<void>;
|
||||
validateUpc: (rowIndex: number, upcValue: string) => Promise<void>;
|
||||
applyTemplate: (templateId: string, rowIndexes: number[]) => void;
|
||||
saveTemplate: (name: string, type: string) => void;
|
||||
setFilters: (newFilters: Partial<FilterState>) => void;
|
||||
// Additional methods...
|
||||
}
|
||||
```
|
||||
|
||||
### useValidationState Hook
|
||||
|
||||
The main state management hook handles:
|
||||
|
||||
- Data manipulation (update, sort, filter)
|
||||
- Selection management
|
||||
- Validation coordination
|
||||
- Integration with validation utilities
|
||||
- Template management
|
||||
- Filtering and sorting
|
||||
|
||||
## Key Features Implementation
|
||||
|
||||
### 1. Field Type Handling
|
||||
|
||||
Implemented a strategy pattern for different field types:
|
||||
|
||||
```typescript
|
||||
// In ValidationCell
|
||||
const renderCellContent = () => {
|
||||
const fieldType = field.fieldType.type
|
||||
|
||||
switch (fieldType) {
|
||||
case 'input':
|
||||
return <InputCell<T> field={field} value={value} onChange={onChange} ... />
|
||||
case 'multi-input':
|
||||
return <MultiInputCell<T> field={field} value={value} onChange={onChange} ... />
|
||||
case 'select':
|
||||
return <SelectCell<T> field={field} value={value} onChange={onChange} ... />
|
||||
// etc.
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Validation Logic
|
||||
|
||||
Validation is broken down into clear steps:
|
||||
|
||||
1. **Field Validation**: Apply field-level validations (required, regex, etc.)
|
||||
2. **Row Validation**: Apply row-level validations and rowHook
|
||||
3. **Table Validation**: Apply table-level validations (unique) and tableHook
|
||||
|
||||
Validation now happens automatically without explicit buttons, with immediate feedback on field blur.
|
||||
|
||||
### 3. UI Components
|
||||
|
||||
UI components follow these principles:
|
||||
|
||||
1. **Consistent Styling**: All components use shadcn UI for consistent look and feel
|
||||
2. **Visual Feedback**: Errors are clearly indicated with icons and border styling
|
||||
3. **Intuitive Editing**: Fields show outlines even when not in focus, and edit on click
|
||||
4. **Proper Command Pattern**: Select and multi-select fields use command/popover pattern for better UX
|
||||
5. **Focus Management**: Fields close when clicking away and perform validation on blur
|
||||
|
||||
## Design Principles
|
||||
|
||||
Based on user preferences and best practices, the following design principles guide this refactoring:
|
||||
|
||||
1. **Automatic Validation**
|
||||
- Validation should happen automatically without explicit buttons
|
||||
- All validation should run on initial data load
|
||||
- Fields should validate on blur (when user clicks away)
|
||||
|
||||
2. **Modern UI Patterns**
|
||||
- Command/popover components for all selects and multi-selects
|
||||
- Consistent field outlines and borders even when not in focus
|
||||
- Badge patterns for multi-select items
|
||||
- Clear visual indicators for errors
|
||||
|
||||
3. **Reduced Complexity**
|
||||
- Remove unnecessary UI elements like "validate all" buttons
|
||||
- Eliminate redundant state and toast notifications
|
||||
- Simplify component hierarchy where possible
|
||||
- Find root causes rather than adding special cases
|
||||
|
||||
4. **Consistent Component Behavior**
|
||||
- Fields should close when clicking away
|
||||
- All inputs should follow the same editing pattern
|
||||
- Error handling should be consistent across all field types
|
||||
- Multi-select fields should allow selecting multiple items with clear visual feedback
|
||||
|
||||
## Integration Plan
|
||||
|
||||
### 1. Creating the New Component Structure
|
||||
|
||||
Folder structure has been created without modifying the existing code:
|
||||
|
||||
```bash
|
||||
mkdir -p inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/{components,hooks,utils}
|
||||
mkdir -p inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells
|
||||
```
|
||||
|
||||
### 2. Implementing Basic Components
|
||||
|
||||
Core components have been implemented:
|
||||
|
||||
1. Created index.tsx as the main entry point
|
||||
2. Implemented ValidationContainer with basic state management
|
||||
3. Created ValidationTable for data display
|
||||
4. Implemented basic cell rendering with specialized cell types
|
||||
|
||||
### 3. Implementing State Management
|
||||
|
||||
State management has been implemented:
|
||||
|
||||
1. Created useValidationState hook
|
||||
2. Implemented data transformation utilities
|
||||
3. Added validation logic
|
||||
|
||||
### 4. Integrating with Previous Step
|
||||
|
||||
The previous step component allows choosing between validation implementations, enabling gradual testing and adoption.
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
1. **Unit Tests**
|
||||
- Test individual utility functions
|
||||
- Test hooks in isolation
|
||||
- Test individual UI components
|
||||
|
||||
2. **Integration Tests**
|
||||
- Test component interactions
|
||||
- Test state management flow
|
||||
- Test validation logic integration
|
||||
|
||||
3. **Comparison Tests**
|
||||
- Compare output of new component with original
|
||||
- Verify that all functionality works the same
|
||||
|
||||
4. **Performance Tests**
|
||||
- Measure render times
|
||||
- Evaluate memory usage
|
||||
- Compare against original component
|
||||
|
||||
## Project Timeline
|
||||
|
||||
1. **Phase 1: Initial Structure (Completed)**
|
||||
- Set up folder structure
|
||||
- Implement basic components
|
||||
- Create core state management
|
||||
|
||||
2. **Phase 2: Core Functionality (In Progress)**
|
||||
- Implement validation logic (completed)
|
||||
- Create cell renderers (completed)
|
||||
- Add template management (in progress)
|
||||
|
||||
3. **Phase 3: Special Features (Upcoming)**
|
||||
- Implement UPC validation
|
||||
- Add AI validation
|
||||
- Handle special cases
|
||||
|
||||
4. **Phase 4: UI Refinement (Ongoing)**
|
||||
- Improve error display (completed)
|
||||
- Enhance user interactions (completed)
|
||||
- Optimize performance (in progress)
|
||||
|
||||
5. **Phase 5: Testing and Integration (Upcoming)**
|
||||
- Write tests
|
||||
- Fix bugs
|
||||
- Integrate with previous step
|
||||
|
||||
## Appendix: Function Reference
|
||||
|
||||
This section documents the core functions from the original ValidationStep that need to be preserved in the new implementation.
|
||||
|
||||
### Validation Functions
|
||||
|
||||
1. **validateRegex** - Validates values against regex patterns
|
||||
2. **getValidationError** - Determines field-level validation errors
|
||||
3. **validateAndCommit** - Validates and commits new values
|
||||
4. **validateData** - Validates all data rows
|
||||
5. **validateUpcAndGenerateItemNumbers** - Validates UPC codes and generates item numbers
|
||||
|
||||
### Formatting Functions
|
||||
|
||||
1. **formatPrice** - Formats price values
|
||||
2. **getDisplayValue** - Gets formatted display value based on field type
|
||||
3. **isMultiInputType** - Checks if field is multi-input type
|
||||
4. **getMultiInputSeparator** - Gets separator for multi-input fields
|
||||
5. **isPriceField** - Checks if field should be formatted as price
|
||||
|
||||
### Template Functions
|
||||
|
||||
1. **loadTemplates** - Loads templates from storage
|
||||
2. **saveTemplate** - Saves a new template
|
||||
3. **applyTemplate** - Applies a template to selected rows
|
||||
4. **getTemplateDisplayText** - Gets display text for a template
|
||||
|
||||
### AI Validation Functions
|
||||
|
||||
1. **handleAiValidation** - Triggers AI validation
|
||||
2. **showCurrentPrompt** - Shows current AI prompt
|
||||
3. **getFieldDisplayValue** - Gets display value for a field
|
||||
4. **highlightDifferences** - Highlights differences between original and corrected values
|
||||
5. **getFieldDisplayValueWithHighlight** - Gets display value with highlighted differences
|
||||
6. **revertAiChange** - Reverts an AI-suggested change
|
||||
7. **isChangeReverted** - Checks if an AI change has been reverted
|
||||
|
||||
### Event Handlers
|
||||
|
||||
1. **handleUpcValueUpdate** - Handles UPC value updates
|
||||
2. **handleBlur** - Handles input blur events
|
||||
3. **handleWheel** - Handles wheel events for navigation
|
||||
4. **copyValueDown** - Copies a value to cells below
|
||||
5. **handleSkuGeneration** - Generates SKUs
|
||||
|
||||
By following this refactoring plan, we continue to transform the monolithic ValidationStep component into a modular, maintainable set of components while preserving all existing functionality and aligning with user preferences for design and behavior.
|
||||
137
docs/ValidationStepNew-Implementation-Status.md
Normal file
137
docs/ValidationStepNew-Implementation-Status.md
Normal file
@@ -0,0 +1,137 @@
|
||||
# ValidationStepNew Implementation Status
|
||||
|
||||
## Overview
|
||||
|
||||
This document outlines the current status of the ValidationStepNew implementation, a refactored version of the original ValidationStep component. The goal is to create a more maintainable, modular component that preserves all functionality of the original while eliminating technical debt and implementing modern UI patterns.
|
||||
|
||||
## Design Principles
|
||||
|
||||
Based on the user's preferences, we're following these core design principles:
|
||||
|
||||
1. **Automatic Validation**
|
||||
- ✅ Validation runs automatically on data load
|
||||
- ✅ No explicit "validate all" button needed
|
||||
- ✅ Fields validate on blur when user clicks away
|
||||
- ✅ Immediate visual feedback for validation errors
|
||||
|
||||
2. **Modern UI Patterns**
|
||||
- ✅ Command/popover components for selects and multi-selects
|
||||
- ✅ Consistent field outlines and borders even when not in focus
|
||||
- ✅ Badge pattern for multi-select field items
|
||||
- ✅ Visual indicators for errors with appropriate styling
|
||||
|
||||
3. **Reduced Complexity**
|
||||
- ✅ Removed unnecessary UI elements like "validate all" button
|
||||
- ✅ Eliminated redundant toast notifications
|
||||
- ✅ Simplified component hierarchy
|
||||
- ✅ Fixed root causes rather than adding special cases
|
||||
|
||||
4. **Consistent Behavior**
|
||||
- ✅ Fields close when clicking away
|
||||
- ✅ All inputs follow the same editing pattern
|
||||
- ✅ Error handling is consistent across field types
|
||||
- ✅ Multi-select fields allow selecting multiple items
|
||||
|
||||
## Completed Components
|
||||
|
||||
### Core Structure
|
||||
- ✅ Main component structure
|
||||
- ✅ Directory organization
|
||||
- ✅ TypeScript interfaces
|
||||
- ✅ Props definition and passing
|
||||
|
||||
### State Management
|
||||
- ✅ `useValidationState` hook for centralized state
|
||||
- ✅ Data validation logic
|
||||
- ✅ Integration with rowHook and tableHook
|
||||
- ✅ Error tracking and management
|
||||
- ✅ Row selection
|
||||
- ✅ Automatic validation on data load
|
||||
|
||||
### UI Components
|
||||
- ✅ ValidationContainer with appropriate layout
|
||||
- ✅ ValidationTable with shadcn UI components
|
||||
- ✅ ValidationCell factory component
|
||||
- ✅ Row select/deselect functionality
|
||||
- ✅ Error display and indicators
|
||||
- ✅ Selection action bar
|
||||
|
||||
### Cell Components
|
||||
- ✅ InputCell with price and multiline support
|
||||
- ✅ MultiInputCell with separator configuration
|
||||
- ✅ SelectCell using command/popover pattern
|
||||
- ✅ CheckboxCell with boolean mapping
|
||||
- ✅ Consistent styling across all field types
|
||||
- ✅ Proper edit/view state management
|
||||
- ✅ Outlined borders in both edit and view modes
|
||||
|
||||
### Utility Functions
|
||||
- ✅ Value formatting for display
|
||||
- ✅ Field type detection
|
||||
- ✅ Error creation and management
|
||||
- ✅ Price formatting
|
||||
|
||||
### UI Improvements
|
||||
- ✅ Consistent borders and field outlines
|
||||
- ✅ Fields that properly close when clicking away
|
||||
- ✅ Multi-select with badge UI pattern
|
||||
- ✅ Command pattern for searchable select menus
|
||||
- ✅ Better visual error indication
|
||||
|
||||
## Pending Tasks
|
||||
|
||||
### Enhanced Validation
|
||||
- ⏳ AI validation system
|
||||
- ⏳ Custom validation hooks
|
||||
- ⏳ Enhanced UPC validation with API integration
|
||||
- ⏳ Validation visualizations
|
||||
|
||||
### Advanced UI Features
|
||||
- ⏳ Table virtualization for performance
|
||||
- ⏳ Drag-and-drop reordering
|
||||
- ⏳ Bulk operations (copy down, fill all, etc.)
|
||||
- ⏳ Keyboard navigation improvements
|
||||
- ⏳ Template dialogs and management UI
|
||||
|
||||
### Special Features
|
||||
- ⏳ Image preview integration
|
||||
- ⏳ SKU generation system
|
||||
- ⏳ Item number generation
|
||||
- ⏳ Dependent dropdown values
|
||||
|
||||
### Testing
|
||||
- ⏳ Unit tests for utility functions
|
||||
- ⏳ Component tests
|
||||
- ⏳ Integration tests
|
||||
- ⏳ Performance benchmarks
|
||||
|
||||
## Known Issues
|
||||
|
||||
1. TypeScript error for `validationDisabled` property in ValidationCell.tsx
|
||||
2. Some type casting is needed due to complex generic types
|
||||
3. Need to address edge cases for multi-select fields validation
|
||||
4. Proper error handling for API calls needs implementation
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. Fix TypeScript errors in ValidationCell and related components
|
||||
2. Complete template management functionality
|
||||
3. Implement UPC validation with API integration
|
||||
4. Make multi-select field validation more robust
|
||||
5. Add comprehensive tests
|
||||
|
||||
## Performance Improvements
|
||||
|
||||
We've already implemented several performance optimizations:
|
||||
|
||||
1. ✅ More efficient state updates by removing unnecessary re-renders
|
||||
2. ✅ Better error handling to prevent cascading validations
|
||||
3. ✅ Improved component isolation to prevent unnecessary re-renders
|
||||
4. ✅ Automatic validation that doesn't block the UI
|
||||
|
||||
Additional planned improvements:
|
||||
|
||||
1. Virtualized table rendering for large datasets
|
||||
2. Memoization of expensive calculations
|
||||
3. Optimized state updates to minimize re-renders
|
||||
4. Batched API calls for validation
|
||||
72
docs/fix-multi-select.md
Normal file
72
docs/fix-multi-select.md
Normal file
@@ -0,0 +1,72 @@
|
||||
# Solution: Keeping Dropdowns Open During Multiple Selections
|
||||
|
||||
## The Problem
|
||||
|
||||
When implementing a multi-select dropdown in React, a common issue occurs:
|
||||
|
||||
1. You select an item in the dropdown
|
||||
2. The `onChange` handler is called, updating the data
|
||||
3. This triggers a re-render of the parent component (in this case, the entire table)
|
||||
4. During the re-render, the dropdown is unmounted and remounted
|
||||
5. This causes the dropdown to close before you can make multiple selections
|
||||
|
||||
## The Solution: Deferred State Updates
|
||||
|
||||
The key insight is to **separate local state management from parent state updates**:
|
||||
|
||||
```typescript
|
||||
// Step 1: Add local state to track selections
|
||||
const [internalValue, setInternalValue] = useState<string[]>(value)
|
||||
|
||||
// Step 2: Handle popover open state changes
|
||||
const handleOpenChange = useCallback((newOpen: boolean) => {
|
||||
if (open && !newOpen) {
|
||||
// Only update parent state when dropdown closes
|
||||
if (JSON.stringify(internalValue) !== JSON.stringify(value)) {
|
||||
onChange(internalValue);
|
||||
}
|
||||
}
|
||||
|
||||
setOpen(newOpen);
|
||||
|
||||
if (newOpen) {
|
||||
// Sync internal state with external state when opening
|
||||
setInternalValue(value);
|
||||
}
|
||||
}, [open, internalValue, value, onChange]);
|
||||
|
||||
// Step 3: Toggle selection only updates internal state
|
||||
const toggleSelection = useCallback((selectedValue: string) => {
|
||||
setInternalValue(prev => {
|
||||
if (prev.includes(selectedValue)) {
|
||||
return prev.filter(v => v !== selectedValue);
|
||||
} else {
|
||||
return [...prev, selectedValue];
|
||||
}
|
||||
});
|
||||
}, []);
|
||||
```
|
||||
|
||||
## Why This Works
|
||||
|
||||
1. **No parent re-renders during selection**: Since we're only updating local state, the parent component doesn't re-render during selection.
|
||||
2. **Consistent UI**: The dropdown shows accurate selected states using the internal value.
|
||||
3. **Data integrity**: The final selections are properly synchronized back to the parent when done.
|
||||
4. **Resilient to external changes**: Initial state is synchronized when opening the dropdown.
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
1. Create a local state variable to track selections inside the component
|
||||
2. Only make selections against this local state while the dropdown is open
|
||||
3. Defer updating the parent until the dropdown is explicitly closed
|
||||
4. When opening, synchronize the internal state with the external value
|
||||
|
||||
## Benefits
|
||||
|
||||
This pattern:
|
||||
- Avoids re-render cycles that would unmount the dropdown
|
||||
- Maintains UI consistency during multi-selection
|
||||
- Simplifies the component's interaction with parent components
|
||||
- Works with existing component lifecycles rather than fighting against them
|
||||
|
||||
This solution is much simpler than trying to prevent event propagation or manipulating DOM events, and addresses the root cause of the issue: premature re-rendering.
|
||||
342
docs/import-from-prod-data-mapping.md
Normal file
342
docs/import-from-prod-data-mapping.md
Normal file
@@ -0,0 +1,342 @@
|
||||
# MySQL to PostgreSQL Import Process Documentation
|
||||
|
||||
This document outlines the data import process from the production MySQL database to the local PostgreSQL database, focusing on column mappings, data transformations, and the overall import architecture.
|
||||
|
||||
## Table of Contents
|
||||
1. [Overview](#overview)
|
||||
2. [Import Architecture](#import-architecture)
|
||||
3. [Column Mappings](#column-mappings)
|
||||
- [Categories](#categories)
|
||||
- [Products](#products)
|
||||
- [Product Categories (Relationship)](#product-categories-relationship)
|
||||
- [Orders](#orders)
|
||||
- [Purchase Orders](#purchase-orders)
|
||||
- [Metadata Tables](#metadata-tables)
|
||||
4. [Special Calculations](#special-calculations)
|
||||
5. [Implementation Notes](#implementation-notes)
|
||||
|
||||
## Overview
|
||||
|
||||
The import process extracts data from a MySQL 5.7 production database and imports it into a PostgreSQL database. It can operate in two modes:
|
||||
|
||||
- **Full Import**: Imports all data regardless of last sync time
|
||||
- **Incremental Import**: Only imports data that has changed since the last import
|
||||
|
||||
The process handles four main data types:
|
||||
- Categories (product categorization hierarchy)
|
||||
- Products (inventory items)
|
||||
- Orders (sales records)
|
||||
- Purchase Orders (vendor orders)
|
||||
|
||||
## Import Architecture
|
||||
|
||||
The import process follows these steps:
|
||||
|
||||
1. **Establish Connection**: Creates a SSH tunnel to the production server and establishes database connections
|
||||
2. **Setup Import History**: Creates a record of the current import operation
|
||||
3. **Import Categories**: Processes product categories in hierarchical order
|
||||
4. **Import Products**: Processes products with their attributes and category relationships
|
||||
5. **Import Orders**: Processes customer orders with line items, taxes, and discounts
|
||||
6. **Import Purchase Orders**: Processes vendor purchase orders with line items
|
||||
7. **Record Results**: Updates the import history with results
|
||||
8. **Close Connections**: Cleans up connections and resources
|
||||
|
||||
Each import step uses temporary tables for processing and wraps operations in transactions to ensure data consistency.
|
||||
|
||||
## Column Mappings
|
||||
|
||||
### Categories
|
||||
| PostgreSQL Column | MySQL Source | Transformation |
|
||||
|-------------------|---------------------------------|----------------------------------------------|
|
||||
| cat_id | product_categories.cat_id | Direct mapping |
|
||||
| name | product_categories.name | Direct mapping |
|
||||
| type | product_categories.type | Direct mapping |
|
||||
| parent_id | product_categories.master_cat_id| NULL for top-level categories (types 10, 20) |
|
||||
| description | product_categories.combined_name| Direct mapping |
|
||||
| status | N/A | Hard-coded 'active' |
|
||||
| created_at | N/A | Current timestamp |
|
||||
| updated_at | N/A | Current timestamp |
|
||||
|
||||
**Notes:**
|
||||
- Categories are processed in hierarchical order by type: [10, 20, 11, 21, 12, 13]
|
||||
- Type 10/20 are top-level categories with no parent
|
||||
- Types 11/21/12/13 are child categories that reference parent categories
|
||||
|
||||
### Products
|
||||
| PostgreSQL Column | MySQL Source | Transformation |
|
||||
|----------------------|----------------------------------|---------------------------------------------------------------|
|
||||
| pid | products.pid | Direct mapping |
|
||||
| title | products.description | Direct mapping |
|
||||
| description | products.notes | Direct mapping |
|
||||
| sku | products.itemnumber | Fallback to 'NO-SKU' if empty |
|
||||
| stock_quantity | shop_inventory.available_local | Capped at 5000, minimum 0 |
|
||||
| preorder_count | current_inventory.onpreorder | Default 0 |
|
||||
| notions_inv_count | product_notions_b2b.inventory | Default 0 |
|
||||
| price | product_current_prices.price_each| Default 0, filtered on active=1 |
|
||||
| regular_price | products.sellingprice | Default 0 |
|
||||
| cost_price | product_inventory | Weighted average: SUM(costeach * count) / SUM(count) when count > 0, or latest costeach |
|
||||
| vendor | suppliers.companyname | Via supplier_item_data.supplier_id |
|
||||
| vendor_reference | supplier_item_data | supplier_itemnumber or notions_itemnumber based on vendor |
|
||||
| notions_reference | supplier_item_data.notions_itemnumber | Direct mapping |
|
||||
| brand | product_categories.name | Linked via products.company |
|
||||
| line | product_categories.name | Linked via products.line |
|
||||
| subline | product_categories.name | Linked via products.subline |
|
||||
| artist | product_categories.name | Linked via products.artist |
|
||||
| categories | product_category_index | Comma-separated list of category IDs |
|
||||
| created_at | products.date_created | Validated date, NULL if invalid |
|
||||
| first_received | products.datein | Validated date, NULL if invalid |
|
||||
| landing_cost_price | NULL | Not set |
|
||||
| barcode | products.upc | Direct mapping |
|
||||
| harmonized_tariff_code| products.harmonized_tariff_code | Direct mapping |
|
||||
| updated_at | products.stamp | Validated date, NULL if invalid |
|
||||
| visible | shop_inventory | Calculated from show + buyable > 0 |
|
||||
| managing_stock | N/A | Hard-coded true |
|
||||
| replenishable | Multiple fields | Complex calculation based on reorder, dates, etc. |
|
||||
| permalink | N/A | Constructed URL with product ID |
|
||||
| moq | supplier_item_data | notions_qty_per_unit or supplier_qty_per_unit, minimum 1 |
|
||||
| uom | N/A | Hard-coded 1 |
|
||||
| rating | products.rating | Direct mapping |
|
||||
| reviews | products.rating_votes | Direct mapping |
|
||||
| weight | products.weight | Direct mapping |
|
||||
| length | products.length | Direct mapping |
|
||||
| width | products.width | Direct mapping |
|
||||
| height | products.height | Direct mapping |
|
||||
| country_of_origin | products.country_of_origin | Direct mapping |
|
||||
| location | products.location | Direct mapping |
|
||||
| total_sold | order_items | SUM(qty_ordered) for all order_items where prod_pid = pid |
|
||||
| baskets | mybasket | COUNT of records where mb.item = pid and qty > 0 |
|
||||
| notifies | product_notify | COUNT of records where pn.pid = pid |
|
||||
| date_last_sold | product_last_sold.date_sold | Validated date, NULL if invalid |
|
||||
| image | N/A | Constructed from pid and image URL pattern |
|
||||
| image_175 | N/A | Constructed from pid and image URL pattern |
|
||||
| image_full | N/A | Constructed from pid and image URL pattern |
|
||||
| options | NULL | Not set |
|
||||
| tags | NULL | Not set |
|
||||
|
||||
**Notes:**
|
||||
- Replenishable calculation:
|
||||
```javascript
|
||||
CASE
|
||||
WHEN p.reorder < 0 THEN 0
|
||||
WHEN (
|
||||
(COALESCE(pls.date_sold, '0000-00-00') = '0000-00-00' OR pls.date_sold <= DATE_SUB(CURRENT_DATE, INTERVAL 5 YEAR))
|
||||
AND (p.datein = '0000-00-00 00:00:00' OR p.datein <= DATE_SUB(CURRENT_TIMESTAMP, INTERVAL 5 YEAR))
|
||||
AND (p.date_refill = '0000-00-00 00:00:00' OR p.date_refill <= DATE_SUB(CURRENT_TIMESTAMP, INTERVAL 5 YEAR))
|
||||
) THEN 0
|
||||
ELSE 1
|
||||
END
|
||||
```
|
||||
|
||||
In business terms, a product is considered NOT replenishable only if:
|
||||
- It was manually flagged as not replenishable (negative reorder value)
|
||||
- OR it shows no activity across ALL metrics (no sales AND no receipts AND no refills in the past 5 years)
|
||||
- Image URLs are constructed using this pattern:
|
||||
```javascript
|
||||
const paddedPid = pid.toString().padStart(6, '0');
|
||||
const prefix = paddedPid.slice(0, 3);
|
||||
const basePath = `${imageUrlBase}${prefix}/${pid}`;
|
||||
return {
|
||||
image: `${basePath}-t-${iid}.jpg`,
|
||||
image_175: `${basePath}-175x175-${iid}.jpg`,
|
||||
image_full: `${basePath}-o-${iid}.jpg`
|
||||
};
|
||||
```
|
||||
|
||||
### Product Categories (Relationship)
|
||||
|
||||
| PostgreSQL Column | MySQL Source | Transformation |
|
||||
|-------------------|-----------------------------------|---------------------------------------------------------------|
|
||||
| pid | products.pid | Direct mapping |
|
||||
| cat_id | product_category_index.cat_id | Direct mapping, filtered by category types |
|
||||
|
||||
**Notes:**
|
||||
- Only categories of types 10, 20, 11, 21, 12, 13 are imported
|
||||
- Categories 16 and 17 are explicitly excluded
|
||||
|
||||
### Orders
|
||||
|
||||
| PostgreSQL Column | MySQL Source | Transformation |
|
||||
|-------------------|-----------------------------------|---------------------------------------------------------------|
|
||||
| order_number | order_items.order_id | Direct mapping |
|
||||
| pid | order_items.prod_pid | Direct mapping |
|
||||
| sku | order_items.prod_itemnumber | Fallback to 'NO-SKU' if empty |
|
||||
| date | _order.date_placed_onlydate | Via join to _order table |
|
||||
| price | order_items.prod_price | Direct mapping |
|
||||
| quantity | order_items.qty_ordered | Direct mapping |
|
||||
| discount | Multiple sources | Complex calculation (see notes) |
|
||||
| tax | order_tax_info_products.item_taxes_to_collect | Via latest order_tax_info record |
|
||||
| tax_included | N/A | Hard-coded false |
|
||||
| shipping | N/A | Hard-coded 0 |
|
||||
| customer | _order.order_cid | Direct mapping |
|
||||
| customer_name | users | CONCAT(users.firstname, ' ', users.lastname) |
|
||||
| status | _order.order_status | Direct mapping |
|
||||
| canceled | _order.date_cancelled | Boolean: true if date_cancelled is not '0000-00-00 00:00:00' |
|
||||
| costeach | order_costs | From latest record or fallback to price * 0.5 |
|
||||
|
||||
**Notes:**
|
||||
- Only orders with order_status >= 15 and with a valid date_placed are processed
|
||||
- For incremental imports, only orders modified since last sync are processed
|
||||
- Discount calculation combines three sources:
|
||||
1. Base discount: order_items.prod_price_reg - order_items.prod_price
|
||||
2. Promo discount: SUM of order_discount_items.amount
|
||||
3. Proportional order discount: Calculation based on order subtotal proportion
|
||||
```javascript
|
||||
(oi.base_discount +
|
||||
COALESCE(ot.promo_discount, 0) +
|
||||
CASE
|
||||
WHEN om.summary_discount > 0 AND om.summary_subtotal > 0 THEN
|
||||
ROUND((om.summary_discount * (oi.price * oi.quantity)) / NULLIF(om.summary_subtotal, 0), 2)
|
||||
ELSE 0
|
||||
END)::DECIMAL(10,2)
|
||||
```
|
||||
- Taxes are taken from the latest tax record for an order
|
||||
- Cost data is taken from the latest non-pending cost record
|
||||
|
||||
### Purchase Orders
|
||||
|
||||
| PostgreSQL Column | MySQL Source | Transformation |
|
||||
|-------------------|-----------------------------------|---------------------------------------------------------------|
|
||||
| po_id | po.po_id | Default 0 if NULL |
|
||||
| pid | po_products.pid | Direct mapping |
|
||||
| sku | products.itemnumber | Fallback to 'NO-SKU' if empty |
|
||||
| name | products.description | Fallback to 'Unknown Product' |
|
||||
| cost_price | po_products.cost_each | Direct mapping |
|
||||
| po_cost_price | po_products.cost_each | Duplicate of cost_price |
|
||||
| vendor | suppliers.companyname | Fallback to 'Unknown Vendor' if empty |
|
||||
| date | po.date_ordered | Fallback to po.date_created if NULL |
|
||||
| expected_date | po.date_estin | Direct mapping |
|
||||
| status | po.status | Default 1 if NULL |
|
||||
| notes | po.short_note | Fallback to po.notes if NULL |
|
||||
| ordered | po_products.qty_each | Direct mapping |
|
||||
| received | N/A | Hard-coded 0 |
|
||||
| receiving_status | N/A | Hard-coded 1 |
|
||||
|
||||
**Notes:**
|
||||
- Only POs created within last 1 year (incremental) or 5 years (full) are processed
|
||||
- For incremental imports, only POs modified since last sync are processed
|
||||
|
||||
### Metadata Tables
|
||||
|
||||
#### import_history
|
||||
|
||||
| PostgreSQL Column | Source | Notes |
|
||||
|-------------------|-----------------------------------|---------------------------------------------------------------|
|
||||
| id | Auto-increment | Primary key |
|
||||
| table_name | Code | 'all_tables' for overall import |
|
||||
| start_time | NOW() | Import start time |
|
||||
| end_time | NOW() | Import completion time |
|
||||
| duration_seconds | Calculation | Elapsed seconds |
|
||||
| is_incremental | INCREMENTAL_UPDATE | Flag from config |
|
||||
| records_added | Calculation | Sum from all imports |
|
||||
| records_updated | Calculation | Sum from all imports |
|
||||
| status | Code | 'running', 'completed', 'failed', or 'cancelled' |
|
||||
| error_message | Exception | Error message if failed |
|
||||
| additional_info | JSON | Configuration and results |
|
||||
|
||||
#### sync_status
|
||||
|
||||
| PostgreSQL Column | Source | Notes |
|
||||
|----------------------|--------------------------------|---------------------------------------------------------------|
|
||||
| table_name | Code | Name of imported table |
|
||||
| last_sync_timestamp | NOW() | Timestamp of successful sync |
|
||||
| last_sync_id | NULL | Not used currently |
|
||||
|
||||
## Special Calculations
|
||||
|
||||
### Date Validation
|
||||
|
||||
MySQL dates are validated before insertion into PostgreSQL:
|
||||
|
||||
```javascript
|
||||
function validateDate(mysqlDate) {
|
||||
if (!mysqlDate || mysqlDate === '0000-00-00' || mysqlDate === '0000-00-00 00:00:00') {
|
||||
return null;
|
||||
}
|
||||
// Check if the date is valid
|
||||
const date = new Date(mysqlDate);
|
||||
return isNaN(date.getTime()) ? null : mysqlDate;
|
||||
}
|
||||
```
|
||||
|
||||
### Retry Mechanism
|
||||
|
||||
Operations that might fail temporarily are retried with exponential backoff:
|
||||
|
||||
```javascript
|
||||
async function withRetry(operation, errorMessage) {
|
||||
let lastError;
|
||||
for (let attempt = 1; attempt <= MAX_RETRIES; attempt++) {
|
||||
try {
|
||||
return await operation();
|
||||
} catch (error) {
|
||||
lastError = error;
|
||||
console.error(`${errorMessage} (Attempt ${attempt}/${MAX_RETRIES}):`, error);
|
||||
if (attempt < MAX_RETRIES) {
|
||||
const backoffTime = RETRY_DELAY * Math.pow(2, attempt - 1);
|
||||
await new Promise(resolve => setTimeout(resolve, backoffTime));
|
||||
}
|
||||
}
|
||||
}
|
||||
throw lastError;
|
||||
}
|
||||
```
|
||||
|
||||
### Progress Tracking
|
||||
|
||||
Progress is tracked with estimated time remaining:
|
||||
|
||||
```javascript
|
||||
function estimateRemaining(startTime, current, total) {
|
||||
if (current === 0) return "Calculating...";
|
||||
const elapsedSeconds = (Date.now() - startTime) / 1000;
|
||||
const itemsPerSecond = current / elapsedSeconds;
|
||||
const remainingItems = total - current;
|
||||
const remainingSeconds = remainingItems / itemsPerSecond;
|
||||
return formatElapsedTime(remainingSeconds);
|
||||
}
|
||||
```
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
### Transaction Management
|
||||
|
||||
All imports use transactions to ensure data consistency:
|
||||
|
||||
- **Categories**: Uses savepoints for each category type
|
||||
- **Products**: Uses a single transaction for the entire import
|
||||
- **Orders**: Uses a single transaction with temporary tables
|
||||
- **Purchase Orders**: Uses a single transaction with temporary tables
|
||||
|
||||
### Memory Usage Optimization
|
||||
|
||||
To minimize memory usage when processing large datasets:
|
||||
|
||||
1. Data is processed in batches (100-5000 records per batch)
|
||||
2. Temporary tables are used for intermediate data
|
||||
3. Some queries use cursors to avoid loading all results at once
|
||||
|
||||
### MySQL vs PostgreSQL Compatibility
|
||||
|
||||
The scripts handle differences between MySQL and PostgreSQL:
|
||||
|
||||
1. MySQL-specific syntax like `USE INDEX` is removed for PostgreSQL
|
||||
2. `GROUP_CONCAT` in MySQL becomes string operations in PostgreSQL
|
||||
3. Transaction syntax differences are abstracted in the connection wrapper
|
||||
4. PostgreSQL's `ON CONFLICT` replaces MySQL's `ON DUPLICATE KEY UPDATE`
|
||||
|
||||
### SSH Tunnel
|
||||
|
||||
Database connections go through an SSH tunnel for security:
|
||||
|
||||
```javascript
|
||||
ssh.forwardOut(
|
||||
"127.0.0.1",
|
||||
0,
|
||||
sshConfig.prodDbConfig.host,
|
||||
sshConfig.prodDbConfig.port,
|
||||
async (err, stream) => {
|
||||
if (err) reject(err);
|
||||
resolve({ ssh, stream });
|
||||
}
|
||||
);
|
||||
```
|
||||
1380
docs/inventory-calculation-reference.md
Normal file
1380
docs/inventory-calculation-reference.md
Normal file
File diff suppressed because it is too large
Load Diff
1065
docs/metrics-calculation-system.md
Normal file
1065
docs/metrics-calculation-system.md
Normal file
File diff suppressed because it is too large
Load Diff
271
docs/routes-cleanup.md
Normal file
271
docs/routes-cleanup.md
Normal file
@@ -0,0 +1,271 @@
|
||||
**Analysis of Potential Issues**
|
||||
|
||||
1. **Obsolete Functionality:**
|
||||
* **`config.js` Legacy Endpoints:** The endpoints `GET /config/`, `PUT /config/stock-thresholds/:id`, `PUT /config/lead-time-thresholds/:id`, `PUT /config/sales-velocity/:id`, `PUT /config/abc-classification/:id`, `PUT /config/safety-stock/:id`, and `PUT /config/turnover/:id` appear **highly likely to be obsolete**. They reference older, single-row config tables (`stock_thresholds`, etc.) while newer endpoints (`/config/global`, `/config/products`, `/config/vendors`) manage settings in more structured tables (`settings_global`, `settings_product`, `settings_vendor`). Unless specifically required for backward compatibility, these legacy endpoints should be removed to avoid confusion and potential data conflicts.
|
||||
* **`analytics.js` Forecast Endpoint (`GET /analytics/forecast`):** This endpoint uses **MySQL syntax** (`DATEDIFF`, `DATE_FORMAT`, `JSON_OBJECT`, `?` placeholders) but seems intended to run within the analytics module which otherwise uses PostgreSQL (`req.app.locals.pool`, `date_trunc`, `::text`, `$1` placeholders). This endpoint is likely **obsolete or misplaced** and will not function correctly against the PostgreSQL database.
|
||||
* **`csv.js` Redundant Actions:**
|
||||
* `POST /csv/update` seems redundant with `POST /csv/full-update`. The latter uses the `runScript` helper and dedicated state (`activeFullUpdate`), appearing more robust. `/csv/update` might be older or incomplete.
|
||||
* `POST /csv/reset` seems redundant with `POST /csv/full-reset`. Similar reasoning applies; `/csv/full-reset` appears preferred.
|
||||
* **`products.js` Import Endpoint (`POST /products/import`):** This is **dangerous duplication**. The `/csv` module handles imports (`/csv/import`, `/csv/import-from-prod`) with locking (`activeImport`) to prevent concurrent operations. This endpoint lacks such locking and could corrupt data if run simultaneously with other CSV/reset operations. It should likely be removed.
|
||||
* **`products.js` Metrics Endpoint (`GET /products/:id/metrics`):** This is redundant. The `/metrics/:pid` endpoint provides the same, possibly more comprehensive, data directly from the `product_metrics` table. Clients should use `/metrics/:pid` instead.
|
||||
|
||||
2. **Overlap or Inappropriate Duplication of Effort:**
|
||||
* **AI Prompt Getters:** `GET /ai-prompts/type/general` and `GET /ai-prompts/type/system` could potentially be handled by adding a query parameter filter to `GET /ai-prompts/` (e.g., `GET /ai-prompts?prompt_type=general`). However, dedicated endpoints for single, specific items can sometimes be simpler. This is more of a design choice than a major issue.
|
||||
* **Vendor Performance/Metrics:** There are multiple ways to get vendor performance data:
|
||||
* `GET /analytics/vendors` (uses `vendor_metrics`)
|
||||
* `GET /dashboard/vendor/performance` (uses `purchase_orders`)
|
||||
* `GET /purchase-orders/vendor-metrics` (uses `purchase_orders`)
|
||||
* `GET /vendors-aggregate/` (uses `vendor_metrics`, augmented with `purchase_orders`)
|
||||
This suggests significant overlap. The `/vendors-aggregate` endpoint seems the most comprehensive, combining pre-aggregated data with some real-time info. The others, especially `/dashboard/vendor/performance` and `/purchase-orders/vendor-metrics` which calculate directly from `purchase_orders`, might be redundant or less performant.
|
||||
* **Product Listing:**
|
||||
* `GET /products/` lists products joining `products`, `product_metrics`, and `categories`.
|
||||
* `GET /metrics/` lists products primarily from `product_metrics`.
|
||||
They offer similar filtering/sorting. If `product_metrics` contains all necessary display fields, `GET /products/` might be partly redundant for simple listing views, although it does provide aggregated category names. Evaluate if both full list endpoints are necessary.
|
||||
* **Image Uploads/Management:** Image handling is split:
|
||||
* `products-import.js`: Uploads temporary images for product import to `/uploads/products/`, schedules deletion.
|
||||
* `reusable-images.js`: Uploads persistent images to `/uploads/reusable/`, stores metadata in DB.
|
||||
* `products-import.js` has `/check-file` and `/list-uploads` that can see *both* directories, while `reusable-images.js` has a `/check-file` that only sees its own. This separation could be confusing. Clarify the purpose and lifecycle of images in each directory.
|
||||
* **Background Task Management (`csv.js`):** The use of `activeImport` for multiple unrelated tasks (import, reset, metrics calc) prevents concurrency, which might be too restrictive. The cancellation logic (`/cancel`) only targets `full-update`/`full-reset`, not tasks locked by `activeImport`. This needs unification.
|
||||
* **Analytics/Dashboard Base Table Queries:** Several endpoints in `analytics.js` (`/pricing`, `/categories`) and `dashboard.js` (`/best-sellers`, `/sales/metrics`, `/trending/products`, `/key-metrics`, `/inventory-health`, `/sales-overview`) query base tables (`orders`, `products`, `purchase_orders`) directly, while many others leverage pre-aggregated `_metrics` tables. This inconsistency can lead to performance differences and suggests potential for optimization by using aggregates where possible.
|
||||
|
||||
3. **Obvious Mistakes / Data Issues:**
|
||||
* **AI Prompt Fetching:** `GET /ai-prompts/company/:companyId`, `/type/general`, `/type/system` return `result.rows[0]`. This assumes uniqueness. If the underlying DB constraints (`unique_company_prompt`, etc.) fail or aren't present, this could silently hide data if multiple rows match. The use of unique constraint handling in POST/PUT suggests this is likely intended and safe *if* DB constraints are solid.
|
||||
* **Mixed Databases & SSH Tunnels:** The heavy reliance in `ai_validation.js` and `products-import.js` on connecting to a production MySQL DB via SSH tunnel while also using a local PostgreSQL DB adds significant architectural complexity.
|
||||
* **Inefficiency:** In `ai_validation.js` (`generateDebugResponse`), an SSH tunnel and MySQL connection (`promptTunnel`, `promptConnection`) are established but seem unused when fetching prompts (which correctly come from the PG pool `res.app.locals.pool`). This is wasted effort.
|
||||
* **Improvement:** The `getDbConnection` function in `products-import.js` implements caching/pooling for the SSH/MySQL connection – this is much better and should ideally be used consistently wherever the production DB is accessed (e.g., in `ai_validation.js`).
|
||||
* **`products.js` Brand Filtering:** `GET /products/brands` filters brands based on having associated purchase orders with a cost >= 500. This seems arbitrary for a general list of brands and might return incomplete results depending on the use case.
|
||||
* **Type Handling:** Ensure `parseValue` handles all required types and edge cases correctly, especially for filtering complex queries in `*-aggregate` and `metrics` routes. Explicit type casting in SQL (`::numeric`, `::text`, etc.) is generally good practice in PostgreSQL.
|
||||
* **Dummy Data:** Several `dashboard.js` endpoints return hardcoded dummy data on errors or when no data is found. While this prevents UI crashes, it can mask real issues. Ensure logging is robust when fallbacks are used.
|
||||
|
||||
**Summary of Endpoints**
|
||||
|
||||
Here's a summary of the available endpoints, grouped by their likely file/module:
|
||||
|
||||
**1. AI Prompts (`ai_prompts.js`)**
|
||||
* `GET /`: Get all AI prompts.
|
||||
* `GET /:id`: Get a specific AI prompt by its ID.
|
||||
* `GET /company/:companyId`: Get the AI prompt for a specific company (expects one). **(Deprecated)**
|
||||
* `GET /type/general`: Get the general AI prompt (expects one). **(Deprecated)**
|
||||
* `GET /type/system`: Get the system AI prompt (expects one). **(Deprecated)**
|
||||
* `GET /by-type`: Get AI prompt by type (general, system, company_specific) with optional company parameter. **(New Consolidated Endpoint)**
|
||||
* `POST /`: Create a new AI prompt.
|
||||
* `PUT /:id`: Update an existing AI prompt.
|
||||
* `DELETE /:id`: Delete an AI prompt.
|
||||
|
||||
**2. AI Validation (`ai_validation.js`)**
|
||||
* `POST /debug`: Generate and view the structure of prompts and taxonomy data (for debugging, doesn't call OpenAI). Connects to Prod MySQL (taxonomy) and Local PG (prompts, performance).
|
||||
* `POST /validate`: Validate product data using OpenAI. Connects to Prod MySQL (taxonomy) and Local PG (prompts, performance).
|
||||
* `GET /test-taxonomy`: Test endpoint to query sample taxonomy data from Prod MySQL.
|
||||
|
||||
**3. Analytics (`analytics.js`)**
|
||||
* `GET /stats`: Get overall business statistics from metrics tables.
|
||||
* `GET /profit`: Get profit analysis data (by category, over time, top products) from metrics tables.
|
||||
* `GET /vendors`: Get vendor performance analysis from `vendor_metrics`.
|
||||
* `GET /stock`: Get stock analysis data (turnover, levels, critical items) from metrics tables.
|
||||
* `GET /pricing`: Get pricing analysis (price points, elasticity, recommendations) - **uses `orders` table**.
|
||||
* `GET /categories`: Get category performance analysis (revenue, profit, growth, distribution, trends) - **uses `orders` and `products` tables**.
|
||||
* `GET /forecast`: (**Likely Obsolete/Broken**) Attempts to get forecast data using MySQL syntax.
|
||||
|
||||
**4. Brands Aggregate (`brands-aggregate.js`)**
|
||||
* `GET /filter-options`: Get distinct brand names and statuses for UI filters (from `brand_metrics`).
|
||||
* `GET /stats`: Get overall statistics related to brands (from `brand_metrics`).
|
||||
* `GET /`: List brands with aggregated metrics, supporting filtering, sorting, pagination (from `brand_metrics`).
|
||||
|
||||
**5. Categories Aggregate (`categories-aggregate.js`)**
|
||||
* `GET /filter-options`: Get distinct category types, statuses, and counts for UI filters (from `category_metrics` & `categories`).
|
||||
* `GET /stats`: Get overall statistics related to categories (from `category_metrics` & `categories`).
|
||||
* `GET /`: List categories with aggregated metrics, supporting filtering, sorting (incl. hierarchy), pagination (from `category_metrics` & `categories`).
|
||||
|
||||
**6. Configuration (`config.js`)**
|
||||
* **(New)** `GET /global`: Get all global settings.
|
||||
* **(New)** `PUT /global`: Update global settings.
|
||||
* **(New)** `GET /products`: List product-specific settings with pagination/search.
|
||||
* **(New)** `PUT /products/:pid`: Update/Create product-specific settings.
|
||||
* **(New)** `POST /products/:pid/reset`: Reset product settings to defaults.
|
||||
* **(New)** `GET /vendors`: List vendor-specific settings with pagination/search.
|
||||
* **(New)** `PUT /vendors/:vendor`: Update/Create vendor-specific settings.
|
||||
* **(New)** `POST /vendors/:vendor/reset`: Reset vendor settings to defaults.
|
||||
* **(Legacy/Obsolete)** `GET /`: Get all config from old single-row tables.
|
||||
* **(Legacy/Obsolete)** `PUT /stock-thresholds/:id`: Update old stock thresholds.
|
||||
* **(Legacy/Obsolete)** `PUT /lead-time-thresholds/:id`: Update old lead time thresholds.
|
||||
* **(Legacy/Obsolete)** `PUT /sales-velocity/:id`: Update old sales velocity config.
|
||||
* **(Legacy/Obsolete)** `PUT /abc-classification/:id`: Update old ABC config.
|
||||
* **(Legacy/Obsolete)** `PUT /safety-stock/:id`: Update old safety stock config.
|
||||
* **(Legacy/Obsolete)** `PUT /turnover/:id`: Update old turnover config.
|
||||
|
||||
**7. CSV Operations & Background Tasks (`csv.js`)**
|
||||
* `GET /:type/progress`: SSE endpoint for full update/reset progress.
|
||||
* `GET /test`: Simple test endpoint.
|
||||
* `GET /status`: Check status of the generic background task lock (`activeImport`).
|
||||
* `GET /calculate-metrics/status`: Check status of metrics calculation.
|
||||
* `GET /history/import`: Get recent import history.
|
||||
* `GET /history/calculate`: Get recent metrics calculation history.
|
||||
* `GET /status/modules`: Get last calculation time per module.
|
||||
* `GET /status/tables`: Get last sync time per table.
|
||||
* `GET /status/table-counts`: Get record counts for key tables.
|
||||
* `POST /update`: (**Potentially Obsolete**) Trigger `update-csv.js` script.
|
||||
* `POST /import`: Trigger `import-csv.js` script.
|
||||
* `POST /cancel`: Cancel `/full-update` or `/full-reset` task.
|
||||
* `POST /reset`: (**Potentially Obsolete**) Trigger `reset-db.js` script.
|
||||
* `POST /reset-metrics`: Trigger `reset-metrics.js` script.
|
||||
* `POST /calculate-metrics`: Trigger `calculate-metrics.js` script.
|
||||
* `POST /import-from-prod`: Trigger `import-from-prod.js` script.
|
||||
* `POST /full-update`: Trigger `full-update.js` script (preferred update).
|
||||
* `POST /full-reset`: Trigger `full-reset.js` script (preferred reset).
|
||||
|
||||
**8. Dashboard (`dashboard.js`)**
|
||||
* `GET /stock/metrics`: Get dashboard stock summary metrics & brand breakdown.
|
||||
* `GET /purchase/metrics`: Get dashboard purchase order summary metrics & vendor breakdown.
|
||||
* `GET /replenishment/metrics`: Get dashboard replenishment summary & top variants.
|
||||
* `GET /forecast/metrics`: Get dashboard forecast summary, daily, and category breakdown.
|
||||
* `GET /overstock/metrics`: Get dashboard overstock summary & category breakdown.
|
||||
* `GET /overstock/products`: Get list of top overstocked products.
|
||||
* `GET /best-sellers`: Get dashboard best-selling products, brands, categories - **uses `orders`, `products`**.
|
||||
* `GET /sales/metrics`: Get dashboard sales summary for a period - **uses `orders`**.
|
||||
* `GET /low-stock/products`: Get list of top low stock/critical products.
|
||||
* `GET /trending/products`: Get list of trending products - **uses `orders`, `products`**.
|
||||
* `GET /vendor/performance`: Get dashboard vendor performance details - **uses `purchase_orders`**.
|
||||
* `GET /key-metrics`: Get dashboard summary KPIs - **uses multiple base tables**.
|
||||
* `GET /inventory-health`: Get dashboard inventory health overview - **uses `products`, `product_metrics`**.
|
||||
* `GET /replenish/products`: Get list of products needing replenishment (overlaps `/low-stock/products`).
|
||||
* `GET /sales-overview`: Get daily sales totals for chart - **uses `orders`**.
|
||||
|
||||
**9. Product Import Utilities (`products-import.js`)**
|
||||
* `POST /upload-image`: Upload temporary product image, schedule deletion.
|
||||
* `DELETE /delete-image`: Delete temporary product image.
|
||||
* `GET /field-options`: Get dropdown options for product fields from Prod MySQL (cached).
|
||||
* `GET /product-lines/:companyId`: Get product lines for a company from Prod MySQL (cached).
|
||||
* `GET /sublines/:lineId`: Get sublines for a line from Prod MySQL (cached).
|
||||
* `GET /check-file/:filename`: Check existence/permissions of uploaded file (temp or reusable).
|
||||
* `GET /list-uploads`: List files in upload directories.
|
||||
* `GET /search-products`: Search products in Prod MySQL DB.
|
||||
* `GET /check-upc-and-generate-sku`: Check UPC existence and generate SKU suggestion based on Prod MySQL data.
|
||||
* `GET /product-categories/:pid`: Get assigned categories for a product from Prod MySQL.
|
||||
|
||||
**10. Product Metrics (`product-metrics.js`)**
|
||||
* `GET /filter-options`: Get distinct filter values (vendor, brand, abcClass) from `product_metrics`.
|
||||
* `GET /`: List detailed product metrics with filtering, sorting, pagination (primary data access).
|
||||
* `GET /:pid`: Get full metrics record for a single product.
|
||||
|
||||
**11. Orders (`orders.js`)**
|
||||
* `GET /`: List orders with summary info, filtering, sorting, pagination, and stats.
|
||||
* `GET /:orderNumber`: Get details for a single order, including items.
|
||||
|
||||
**12. Products (`products.js`)**
|
||||
* `GET /brands`: Get distinct brands (filtered by PO value).
|
||||
* `GET /`: List products with core data + metrics, filtering, sorting, pagination.
|
||||
* `GET /trending`: Get trending products based on `product_metrics`.
|
||||
* `GET /:id`: Get details for a single product (core data + metrics).
|
||||
* `POST /import`: (**Likely Obsolete/Dangerous**) Import products from CSV.
|
||||
* `PUT /:id`: Update core product data.
|
||||
* `GET /:id/metrics`: (**Redundant**) Get metrics for a single product.
|
||||
* `GET /:id/time-series`: Get sales/PO history for a single product.
|
||||
|
||||
**13. Purchase Orders (`purchase-orders.js`)**
|
||||
* `GET /`: List purchase orders with summary info, filtering, sorting, pagination, and summary stats.
|
||||
* `GET /vendor-metrics`: Calculate vendor performance metrics from `purchase_orders`.
|
||||
* `GET /cost-analysis`: Calculate cost analysis by category from `purchase_orders`.
|
||||
* `GET /receiving-status`: Get summary counts based on PO receiving status.
|
||||
* `GET /order-vs-received`: List product ordered vs. received quantities.
|
||||
|
||||
**14. Reusable Images (`reusable-images.js`)**
|
||||
* `GET /`: List all reusable images.
|
||||
* `GET /by-company/:companyId`: List global and company-specific images.
|
||||
* `GET /global`: List only global images.
|
||||
* `GET /:id`: Get a single reusable image record.
|
||||
* `POST /upload`: Upload a new reusable image and create DB record.
|
||||
* `PUT /:id`: Update reusable image metadata (name, global, company).
|
||||
* `DELETE /:id`: Delete reusable image record and file.
|
||||
* `GET /check-file/:filename`: Check existence/permissions of a reusable image file.
|
||||
|
||||
**15. Templates (`templates.js`)**
|
||||
* `GET /`: List all product data templates.
|
||||
* `GET /:company/:productType`: Get a specific template.
|
||||
* `POST /`: Create a new template.
|
||||
* `PUT /:id`: Update an existing template.
|
||||
* `DELETE /:id`: Delete a template.
|
||||
|
||||
**16. Vendors Aggregate (`vendors-aggregate.js`)**
|
||||
* `GET /filter-options`: Get distinct vendor names and statuses for UI filters (from `vendor_metrics`).
|
||||
* `GET /stats`: Get overall statistics related to vendors (from `vendor_metrics` & `purchase_orders`).
|
||||
* `GET /`: List vendors with aggregated metrics, supporting filtering, sorting, pagination (from `vendor_metrics` & `purchase_orders`).
|
||||
|
||||
**Recommendations:**
|
||||
|
||||
1. **Address Obsolete Endpoints:** Prioritize removing or confirming the necessity of the endpoints marked as obsolete/redundant (legacy config, `/analytics/forecast`, `/csv/update`, `/csv/reset`, `/products/import`, `/products/:id/metrics`).
|
||||
2. **Consolidate Overlapping Functionality:** Review the multiple vendor performance and product listing endpoints. Decide on the primary method (e.g., using aggregate tables via `/vendors-aggregate` and `/metrics`) and refactor or remove the others. Clarify the image upload strategies.
|
||||
3. **Standardize Data Access:** Decide whether `dashboard` and `analytics` endpoints should primarily use aggregate tables (like `/metrics`, `/brands-aggregate`, etc.) or if direct access to base tables is sometimes necessary. Aim for consistency and document the reasoning. Optimize queries hitting base tables if they must remain.
|
||||
4. **Improve Background Task Management:** Refactor `csv.js` to use a unified locking mechanism (maybe separate locks per task type?) and a consistent cancellation strategy for all spawned/managed processes. Clarify the purpose of `update` vs `full-update` and `reset` vs `full-reset`.
|
||||
5. **Optimize DB Connections:** Ensure the `getDbConnection` pooling/caching helper from `products-import.js` is used *consistently* across all modules interacting with the production MySQL database (especially `ai_validation.js`). Remove unnecessary tunnel creations.
|
||||
6. **Review Data Integrity:** Double-check the assumptions made (e.g., uniqueness of AI prompts) and ensure database constraints enforce them. Review the `GET /products/brands` filtering logic.
|
||||
|
||||
## Changes Made
|
||||
|
||||
1. **Removed Obsolete Legacy Endpoints in `config.js`**:
|
||||
- Removed `GET /config/` endpoint
|
||||
- Removed `PUT /config/stock-thresholds/:id` endpoint
|
||||
- Removed `PUT /config/lead-time-thresholds/:id` endpoint
|
||||
- Removed `PUT /config/sales-velocity/:id` endpoint
|
||||
- Removed `PUT /config/abc-classification/:id` endpoint
|
||||
- Removed `PUT /config/safety-stock/:id` endpoint
|
||||
- Removed `PUT /config/turnover/:id` endpoint
|
||||
|
||||
These endpoints were obsolete as they referenced older, single-row config tables that have been replaced by newer endpoints using the structured tables `settings_global`, `settings_product`, and `settings_vendor`.
|
||||
|
||||
2. **Removed MySQL Syntax `/forecast` Endpoint in `analytics.js`**:
|
||||
- Removed `GET /analytics/forecast` endpoint that was using MySQL-specific syntax incompatible with the PostgreSQL database used elsewhere in the application.
|
||||
|
||||
3. **Renamed and Removed Redundant Endpoints**:
|
||||
- Renamed `csv.js` to `data-management.js` while maintaining the same `/csv/*` endpoint paths for consistency
|
||||
- Removed deprecated `/csv/update` endpoint (now fully replaced by `/csv/full-update`)
|
||||
- Removed deprecated `/csv/reset` endpoint (now fully replaced by `/csv/full-reset`)
|
||||
- Removed deprecated `/products/import` endpoint (now handled by `/csv/import`)
|
||||
- Removed deprecated `/products/:id/metrics` endpoint (now handled by `/metrics/:pid`)
|
||||
|
||||
4. **Fixed Data Integrity Issues**:
|
||||
- Improved `GET /products/brands` endpoint by removing the arbitrary filtering logic that was only showing brands with purchase orders that had a total cost of at least $500
|
||||
- The updated endpoint now returns all distinct brands from visible products, providing more complete data
|
||||
|
||||
5. **Optimized Database Connections**:
|
||||
- Created a new `dbConnection.js` utility file that encapsulates the optimized database connection management logic
|
||||
- Improved the `ai-validation.js` file to use this shared connection management, eliminating unnecessary repeated tunnel creation
|
||||
- Added proper connection pooling with timeout-based connection reuse, reducing the overhead of repeatedly creating SSH tunnels
|
||||
- Added query result caching for frequently accessed data to improve performance
|
||||
|
||||
These changes improve maintainability by removing duplicate code, enhance consistency by standardizing on the newer endpoint patterns, and optimize performance by reducing redundant database connections.
|
||||
|
||||
## Additional Improvements
|
||||
|
||||
1. **Further Database Connection Optimizations**:
|
||||
- Extended the use of the optimized database connection utility to additional endpoints in `ai-validation.js`
|
||||
- Updated the `/validate` endpoint and `/test-taxonomy` endpoint to use `getDbConnection`
|
||||
- Ensured consistent connection management across all routes that access the production database
|
||||
|
||||
2. **AI Prompts Data Integrity Verification**:
|
||||
- Confirmed proper uniqueness constraints are in place in the database schema for AI prompts
|
||||
- The schema includes:
|
||||
- `unique_company_prompt` constraint ensuring only one prompt per company
|
||||
- `idx_unique_general_prompt` index ensuring only one general prompt in the system
|
||||
- `idx_unique_system_prompt` index ensuring only one system prompt in the system
|
||||
- Endpoint handlers properly handle uniqueness constraint violations with appropriate 409 Conflict responses
|
||||
- Validation ensures company-specific prompts have company IDs, while general/system prompts do not
|
||||
|
||||
3. **AI Prompts Endpoint Consolidation**:
|
||||
- Added a new consolidated `/by-type` endpoint that handles all types of prompts (general, system, company_specific)
|
||||
- Marked the existing separate endpoints as deprecated with console warnings
|
||||
- Maintained backward compatibility while providing a cleaner API moving forward
|
||||
|
||||
## Completed Items
|
||||
|
||||
✅ Removed obsolete legacy endpoints in `config.js`
|
||||
✅ Removed MySQL syntax `/forecast` endpoint in `analytics.js`
|
||||
✅ Fixed `GET /products/brands` endpoint filtering logic
|
||||
✅ Created reusable database connection utility (`dbConnection.js`)
|
||||
✅ Optimized database connections in `ai-validation.js`
|
||||
✅ Verified data integrity in AI prompts handling
|
||||
✅ Consolidated AI prompts endpoints with a unified `/by-type` endpoint
|
||||
|
||||
## Remaining Items
|
||||
|
||||
- Consider adding additional error handling and logging for database connections
|
||||
- Perform load testing on the optimized database connections to ensure they handle high traffic properly
|
||||
23
docs/setup-chat.md
Normal file
23
docs/setup-chat.md
Normal file
@@ -0,0 +1,23 @@
|
||||
This portion of the application is going to be a read only chat archive. It will pull data from a rocketchat export converted to postgresql. This is a separate database than the rest of the inventory application uses, but it will still use users and permissions from the inventory database. Both databases are on the same postgres instance.
|
||||
|
||||
For now, let's add a select to the top of the page that allows me to "view as" any of the users in the rocketchat database. We'll connect this to the authorization in the main application later.
|
||||
|
||||
The db connection info is stored in the .env file in the inventory-server root. It contains these variables
|
||||
DB_HOST=localhost
|
||||
DB_USER=rocketchat_user
|
||||
DB_PASSWORD=password
|
||||
DB_NAME=rocketchat_converted
|
||||
DB_PORT=5432
|
||||
|
||||
Not all of the information in this database is relevant as it's a direct export from another app with more features. You can use the query tool to examine the structure and data available.
|
||||
|
||||
Server-side files should use similar conventions and the same technologies as the inventory-server (inventor-server root) and auth-server (inventory-server/auth). I will provide my current pm2 ecosystem file upon request for you to add the configuration for the new "chat-server". I use Caddy on the server and can provide my caddyfile to assist with configuring the api routes. All configuration and routes for the chat-server should go in the inventory-server/chat folder or subfolders you create.
|
||||
|
||||
The folder you see as inventory-server is actually a direct mount of the /var/www/html/inventory folder on the server. You can read and write files from there like usual, but any terminal commands for the server I will have to run myself.
|
||||
|
||||
The "Chat" page should be added to the main application sidebar and a similar page to the others should be created in inventory/src/pages. All other frontend pages should go in inventory/src/components/chat.
|
||||
|
||||
The application uses shadcn components and those should be used for all ui elements where possible (located in inventory/src/components/ui). The UI should match existing pages and components.
|
||||
|
||||
|
||||
|
||||
112
docs/split-up-pos.md
Normal file
112
docs/split-up-pos.md
Normal file
@@ -0,0 +1,112 @@
|
||||
Okay, I understand completely now. The core issue is that the previous approaches tried too hard to reconcile every receipt back to a specific PO line within the `purchase_orders` table structure, which doesn't reflect the reality where receipts can be independent events. Your downstream scripts, especially `daily_snapshots` and `product_metrics`, rely on having a complete picture of *all* receivings.
|
||||
|
||||
Let's pivot to a model that respects both distinct data streams: **Orders (Intent)** and **Receivings (Actuals)**.
|
||||
|
||||
**Proposed Solution: Separate `purchase_orders` and `receivings` Tables**
|
||||
|
||||
This is the cleanest way to model the reality you've described.
|
||||
|
||||
1. **`purchase_orders` Table:**
|
||||
* **Purpose:** Tracks the status and details of purchase *orders* placed. Represents the *intent* to receive goods.
|
||||
* **Key Columns:** `po_id`, `pid`, `ordered` (quantity ordered), `po_cost_price`, `date` (order/created date), `expected_date`, `status` (PO lifecycle: 'ordered', 'canceled', 'done'), `vendor`, `notes`, etc.
|
||||
* **Crucially:** This table *does not* need a `received` column or a `receiving_history` column derived from complex allocations. It focuses solely on the PO itself.
|
||||
|
||||
2. **`receivings` Table (New or Refined):**
|
||||
* **Purpose:** Tracks every single line item received, regardless of whether it was linked to a PO during the receiving process. Represents the *actual* goods that arrived.
|
||||
* **Key Columns:**
|
||||
* `receiving_id` (Identifier for the overall receiving document/batch)
|
||||
* `pid` (Product ID received)
|
||||
* `received_qty` (Quantity received for this specific line)
|
||||
* `cost_each` (Actual cost paid for this item on this receiving)
|
||||
* `received_date` (Actual date the item was received)
|
||||
* `received_by` (Employee ID/Name)
|
||||
* `source_po_id` (The `po_id` entered on the receiving screen, *nullable*. Stores the original link attempt, even if it was wrong or missing)
|
||||
* `source_receiving_status` (The status from the source `receivings` table: 'partial_received', 'full_received', 'paid', 'canceled')
|
||||
|
||||
**How the Import Script Changes:**
|
||||
|
||||
1. **Fetch POs:** Fetch data from `po` and `po_products`.
|
||||
2. **Populate `purchase_orders`:**
|
||||
* Insert/Update rows into `purchase_orders` based directly on the fetched PO data.
|
||||
* Set `po_id`, `pid`, `ordered`, `po_cost_price`, `date` (`COALESCE(date_ordered, date_created)`), `expected_date`.
|
||||
* Set `status` by mapping the source `po.status` code directly ('ordered', 'canceled', 'done', etc.).
|
||||
* **No complex allocation needed here.**
|
||||
3. **Fetch Receivings:** Fetch data from `receivings` and `receivings_products`.
|
||||
4. **Populate `receivings`:**
|
||||
* For *every* line item fetched from `receivings_products`:
|
||||
* Perform necessary data validation (dates, numbers).
|
||||
* Insert a new row into `receivings` with all the relevant details (`receiving_id`, `pid`, `received_qty`, `cost_each`, `received_date`, `received_by`, `source_po_id`, `source_receiving_status`).
|
||||
* Use `ON CONFLICT (receiving_id, pid)` (or similar unique key based on your source data) `DO UPDATE SET ...` for incremental updates if necessary, or simply delete/re-insert based on `receiving_id` for simplicity if performance allows.
|
||||
|
||||
**Impact on Downstream Scripts (and how to adapt):**
|
||||
|
||||
* **Initial Query (Active POs):**
|
||||
* `SELECT ... FROM purchase_orders po WHERE po.status NOT IN ('canceled', 'done', 'paid_equivalent_status?') AND po.date >= ...`
|
||||
* `active_pos`: `COUNT(DISTINCT po.po_id)` based on the filtered POs.
|
||||
* `overdue_pos`: Add `AND po.expected_date < CURRENT_DATE`.
|
||||
* `total_units`: `SUM(po.ordered)`. Represents total units *ordered* on active POs.
|
||||
* `total_cost`: `SUM(po.ordered * po.po_cost_price)`. Cost of units *ordered*.
|
||||
* `total_retail`: `SUM(po.ordered * pm.current_price)`. Retail value of units *ordered*.
|
||||
* **Result:** This query now cleanly reports on the status of *orders* placed, which seems closer to its original intent. The filter `po.receiving_status NOT IN ('partial_received', 'full_received', 'paid')` is replaced by `po.status NOT IN ('canceled', 'done', 'paid_equivalent?')`. The 90% received check is removed as `received` is not reliably tracked *on the PO* anymore.
|
||||
|
||||
* **`daily_product_snapshots`:**
|
||||
* **`SalesData` CTE:** No change needed.
|
||||
* **`ReceivingData` CTE:** **Must be changed.** Query the **`receivings`** table instead of `purchase_orders`.
|
||||
```sql
|
||||
ReceivingData AS (
|
||||
SELECT
|
||||
rl.pid,
|
||||
COUNT(DISTINCT rl.receiving_id) as receiving_doc_count,
|
||||
SUM(rl.received_qty) AS units_received,
|
||||
SUM(rl.received_qty * rl.cost_each) AS cost_received
|
||||
FROM public.receivings rl
|
||||
WHERE rl.received_date::date = _date
|
||||
-- Optional: Filter out canceled receivings if needed
|
||||
-- AND rl.source_receiving_status <> 'canceled'
|
||||
GROUP BY rl.pid
|
||||
),
|
||||
```
|
||||
* **Result:** This now accurately reflects *all* units received on a given day from the definitive source.
|
||||
|
||||
* **`update_product_metrics`:**
|
||||
* **`CurrentInfo` CTE:** No change needed (pulls from `products`).
|
||||
* **`OnOrderInfo` CTE:** Needs re-evaluation. How do you want to define "On Order"?
|
||||
* **Option A (Strict PO View):** `SUM(po.ordered)` from `purchase_orders po WHERE po.status NOT IN ('canceled', 'done', 'paid_equivalent?')`. This is quantity on *open orders*, ignoring fulfillment state. Simple, but might overestimate if items arrived unlinked.
|
||||
* **Option B (Approximate Fulfillment):** `SUM(po.ordered)` from open POs MINUS `SUM(rl.received_qty)` from `receivings rl` where `rl.source_po_id = po.po_id` (summing only directly linked receivings). Better, but still misses fulfillment via unlinked receivings.
|
||||
* **Option C (Heuristic):** `SUM(po.ordered)` from open POs MINUS `SUM(rl.received_qty)` from `receivings rl` where `rl.pid = po.pid` and `rl.received_date >= po.date`. This *tries* to account for unlinked receivings but is imprecise.
|
||||
* **Recommendation:** Start with **Option A** for simplicity, clearly labeling it "Quantity on Open POs". You might need a separate process or metric for a more nuanced view of expected vs. actual pipeline.
|
||||
```sql
|
||||
-- Example for Option A
|
||||
OnOrderInfo AS (
|
||||
SELECT
|
||||
pid,
|
||||
SUM(ordered) AS on_order_qty, -- Total qty on open POs
|
||||
SUM(ordered * po_cost_price) AS on_order_cost -- Cost of qty on open POs
|
||||
FROM public.purchase_orders
|
||||
WHERE status NOT IN ('canceled', 'done', 'paid_equivalent?') -- Define your open statuses
|
||||
GROUP BY pid
|
||||
),
|
||||
```
|
||||
* **`HistoricalDates` CTE:**
|
||||
* `date_first_sold`, `max_order_date`: No change (queries `orders`).
|
||||
* `date_first_received_calc`, `date_last_received_calc`: **Must be changed.** Query `MIN(rl.received_date)` and `MAX(rl.received_date)` from the **`receivings`** table grouped by `pid`.
|
||||
* **`SnapshotAggregates` CTE:**
|
||||
* `received_qty_30d`, `received_cost_30d`: These are calculated from `daily_product_snapshots`, which are now correctly sourced from `receivings`, so this part is fine.
|
||||
* **Forecasting Calculations:** Will use the chosen definition of `on_order_qty`. Be aware of the implications of Option A (potentially inflated if unlinked receivings fulfill orders).
|
||||
* **Result:** Metrics are calculated based on distinct order data and complete receiving data. The definition of "on order" needs careful consideration.
|
||||
|
||||
**Summary of this Approach:**
|
||||
|
||||
* **Pros:**
|
||||
* Accurately models distinct order and receiving events.
|
||||
* Provides a definitive source (`receivings`) for all received inventory.
|
||||
* Simplifies the `purchase_orders` table and its import logic.
|
||||
* Avoids complex/potentially inaccurate allocation logic for unlinked receivings within the main tables.
|
||||
* Avoids synthetic records.
|
||||
* Fixes downstream reporting (`daily_snapshots` receiving data).
|
||||
* **Cons:**
|
||||
* Requires creating/managing the `receivings` table.
|
||||
* Requires modifying downstream queries (`ReceivingData`, `OnOrderInfo`, `HistoricalDates`).
|
||||
* Calculating a precise "net quantity still expected to arrive" (true on-order minus all relevant fulfillment) becomes more complex and may require specific business rules or heuristics outside the basic table structure if Option A for `OnOrderInfo` isn't sufficient.
|
||||
|
||||
This two-table approach (`purchase_orders` + `receivings`) seems the most robust and accurate way to handle your requirement for complete receiving records independent of potentially flawed PO linking. It directly addresses the shortcomings of the previous attempts.
|
||||
239
docs/validate-table-changes-implementation-issue4.md
Normal file
239
docs/validate-table-changes-implementation-issue4.md
Normal file
@@ -0,0 +1,239 @@
|
||||
# Validation Display Issue Implementation
|
||||
|
||||
## Issue Being Addressed
|
||||
|
||||
**Validation Display Issue**: Validation isn't happening beyond checking if a cell is required or not. All validation rules defined in import.tsx need to be respected.
|
||||
* Required fields correctly show a red border when empty (✅ ALREADY WORKING)
|
||||
* Non-empty fields with validation errors (regex, unique, etc.) should show a red border AND an alert circle icon with tooltip explaining the error (❌ NOT WORKING)
|
||||
|
||||
## Implementation Attempts
|
||||
|
||||
!!!!**NOTE** All previous attempts have been reverted and are no longer part of the code, please take this into account when trying a new solution. !!!!
|
||||
|
||||
### Attempt 1: Fix Validation Display Logic
|
||||
|
||||
**Approach**: Modified `processErrors` function to separate required errors from validation errors and show alert icons only for non-empty fields with validation errors.
|
||||
|
||||
**Changes Made**:
|
||||
```typescript
|
||||
function processErrors(value: any, errors: ErrorObject[]) {
|
||||
// ...existing code...
|
||||
|
||||
// Separate required errors from other validation errors
|
||||
const requiredErrors = errors.filter(error =>
|
||||
error.message?.toLowerCase().includes('required')
|
||||
);
|
||||
const validationErrors = errors.filter(error =>
|
||||
!error.message?.toLowerCase().includes('required')
|
||||
);
|
||||
|
||||
const isRequiredButEmpty = valueIsEmpty && requiredErrors.length > 0;
|
||||
const hasValidationErrors = validationErrors.length > 0;
|
||||
const shouldShowErrorIcon = hasValidationErrors && !valueIsEmpty;
|
||||
|
||||
// ...more code...
|
||||
}
|
||||
```
|
||||
|
||||
**Result**: Non-empty fields with validation errors still aren't displaying the alert icon with tooltip.
|
||||
|
||||
### Attempt 2: Comprehensive Fix for Validation Display
|
||||
|
||||
**Approach**: Completely rewrote `processErrors` function with consistent empty value detection, clear error separation, and improved error message extraction.
|
||||
|
||||
**Changes Made**:
|
||||
```typescript
|
||||
function processErrors(value: any, errors: ErrorObject[]) {
|
||||
if (!errors || errors.length === 0) {
|
||||
return { filteredErrors: [], hasError: false, isRequiredButEmpty: false,
|
||||
shouldShowErrorIcon: false, errorMessages: '' };
|
||||
}
|
||||
|
||||
const valueIsEmpty = isEmpty(value);
|
||||
const requiredErrors = errors.filter(error =>
|
||||
error.message?.toLowerCase().includes('required')
|
||||
);
|
||||
const validationErrors = errors.filter(error =>
|
||||
!error.message?.toLowerCase().includes('required')
|
||||
);
|
||||
|
||||
let filteredErrors = valueIsEmpty ? requiredErrors : validationErrors;
|
||||
|
||||
const isRequiredButEmpty = valueIsEmpty && requiredErrors.length > 0;
|
||||
const hasValidationErrors = validationErrors.length > 0;
|
||||
const hasError = isRequiredButEmpty || hasValidationErrors;
|
||||
const shouldShowErrorIcon = hasValidationErrors && !valueIsEmpty;
|
||||
|
||||
let errorMessages = '';
|
||||
if (shouldShowErrorIcon) {
|
||||
errorMessages = validationErrors.map(getErrorMessage).join('\n');
|
||||
}
|
||||
|
||||
return { filteredErrors, hasError, isRequiredButEmpty, shouldShowErrorIcon, errorMessages };
|
||||
}
|
||||
```
|
||||
|
||||
**Result**: Non-empty fields with validation errors still aren't displaying the alert icon with tooltip.
|
||||
|
||||
### Attempt 3: Simplified Error Processing Logic
|
||||
|
||||
**Approach**: Refactored `processErrors` to use shared `isEmpty` function, simplified error icon logic, and made error message extraction more direct.
|
||||
|
||||
**Changes Made**:
|
||||
```typescript
|
||||
function processErrors(value: any, errors: ErrorObject[]) {
|
||||
if (!errors || errors.length === 0) {
|
||||
return { filteredErrors: [], hasError: false, isRequiredButEmpty: false,
|
||||
shouldShowErrorIcon: false, errorMessages: '' };
|
||||
}
|
||||
|
||||
const valueIsEmpty = isEmpty(value);
|
||||
const requiredErrors = errors.filter(error =>
|
||||
error.message?.toLowerCase().includes('required')
|
||||
);
|
||||
const validationErrors = errors.filter(error =>
|
||||
!error.message?.toLowerCase().includes('required')
|
||||
);
|
||||
|
||||
let filteredErrors = valueIsEmpty ? requiredErrors : validationErrors;
|
||||
|
||||
const isRequiredButEmpty = valueIsEmpty && requiredErrors.length > 0;
|
||||
const hasValidationErrors = !valueIsEmpty && validationErrors.length > 0;
|
||||
const hasError = isRequiredButEmpty || hasValidationErrors;
|
||||
const shouldShowErrorIcon = hasValidationErrors;
|
||||
|
||||
let errorMessages = '';
|
||||
if (shouldShowErrorIcon) {
|
||||
errorMessages = validationErrors.map(getErrorMessage).join('\n');
|
||||
}
|
||||
|
||||
return { filteredErrors, hasError, isRequiredButEmpty, shouldShowErrorIcon, errorMessages };
|
||||
}
|
||||
```
|
||||
|
||||
**Result**: Non-empty fields with validation errors still aren't displaying the alert icon with tooltip.
|
||||
|
||||
### Attempt 4: Consistent Error Processing Across Components
|
||||
|
||||
**Approach**: Updated both `processErrors` function and `ValidationCell` component to ensure consistent error handling between components.
|
||||
|
||||
**Changes Made**:
|
||||
```typescript
|
||||
// In processErrors function
|
||||
function processErrors(value: any, errors: ErrorObject[]) {
|
||||
// Similar to Attempt 3 with consistent error handling
|
||||
}
|
||||
|
||||
// In ValidationCell component
|
||||
const ValidationCell = ({ field, value, onChange, errors, /* other props */ }) => {
|
||||
// ...existing code...
|
||||
|
||||
// Use the processErrors function to handle validation errors
|
||||
const { hasError, isRequiredButEmpty, shouldShowErrorIcon, errorMessages } =
|
||||
React.useMemo(() => processErrors(value, errors), [value, errors]);
|
||||
|
||||
// ...rest of the component...
|
||||
}
|
||||
```
|
||||
|
||||
**Result**: Non-empty fields with validation errors still aren't displaying the alert icon with tooltip.
|
||||
|
||||
### Attempt 5: Unified Error Processing with ItemNumberCell
|
||||
|
||||
**Approach**: Replaced custom error processing in `ValidationCell` with the same `processErrors` utility used by `ItemNumberCell`.
|
||||
|
||||
**Changes Made**:
|
||||
```typescript
|
||||
const ValidationCell = ({ field, value, onChange, errors, /* other props */ }) => {
|
||||
// State and context setup...
|
||||
|
||||
// For item_number fields, use the specialized component
|
||||
if (fieldKey === 'item_number') {
|
||||
return <ItemNumberCell {...props} />;
|
||||
}
|
||||
|
||||
// Use the same processErrors utility function that ItemNumberCell uses
|
||||
const { hasError, isRequiredButEmpty, shouldShowErrorIcon, errorMessages } =
|
||||
React.useMemo(() => processErrors(value, errors), [value, errors]);
|
||||
|
||||
// Rest of component...
|
||||
}
|
||||
```
|
||||
|
||||
**Result**: Non-empty fields with validation errors still aren't displaying the alert icon with tooltip.
|
||||
|
||||
### Attempt 6: Standardize Error Processing Across Cell Types
|
||||
|
||||
**Approach**: Standardized error handling across all cell types using the shared `processErrors` utility function.
|
||||
|
||||
**Changes Made**: Similar to Attempt 5, with focus on standardizing the approach for determining when to show validation error icons.
|
||||
|
||||
**Result**: Non-empty fields with validation errors still aren't displaying the alert icon with tooltip.
|
||||
|
||||
### Attempt 7: Replace Custom Error Processing with Shared Utility
|
||||
|
||||
**Approach**: Ensured consistent error handling between `ItemNumberCell` and regular `ValidationCell` components.
|
||||
|
||||
**Changes Made**: Similar to Attempts 5 and 6, with focus on using the shared utility function consistently.
|
||||
|
||||
**Result**: Non-empty fields with validation errors still aren't displaying the alert icon with tooltip.
|
||||
|
||||
### Attempt 8: Improved Error Normalization and Deep Comparison
|
||||
|
||||
**Approach**: Modified `MemoizedCell` in `ValidationTable.tsx` to use deep comparison for error objects and improved error normalization.
|
||||
|
||||
**Changes Made**:
|
||||
```typescript
|
||||
// Create a memoized cell component
|
||||
const MemoizedCell = React.memo(({ field, value, onChange, errors, /* other props */ }) => {
|
||||
return <ValidationCell {...props} />;
|
||||
}, (prev, next) => {
|
||||
// Basic prop comparison
|
||||
if (prev.value !== next.value) return false;
|
||||
if (prev.isValidating !== next.isValidating) return false;
|
||||
if (prev.itemNumber !== next.itemNumber) return false;
|
||||
|
||||
// Deep compare errors - critical for validation display
|
||||
if (!prev.errors && next.errors) return false;
|
||||
if (prev.errors && !next.errors) return false;
|
||||
if (prev.errors && next.errors) {
|
||||
if (prev.errors.length !== next.errors.length) return false;
|
||||
|
||||
// Compare each error object
|
||||
for (let i = 0; i < prev.errors.length; i++) {
|
||||
if (prev.errors[i].message !== next.errors[i].message) return false;
|
||||
if (prev.errors[i].level !== next.errors[i].level) return false;
|
||||
if (prev.errors[i].source !== next.errors[i].source) return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Compare options...
|
||||
|
||||
return true;
|
||||
});
|
||||
|
||||
// In the field columns definition:
|
||||
cell: ({ row }) => {
|
||||
const rowErrors = validationErrors.get(row.index);
|
||||
const cellErrors = rowErrors?.[fieldKey] || [];
|
||||
|
||||
// Ensure cellErrors is always an array
|
||||
const normalizedErrors = Array.isArray(cellErrors) ? cellErrors : [cellErrors];
|
||||
|
||||
return <MemoizedCell {...props} errors={normalizedErrors} />;
|
||||
}
|
||||
```
|
||||
|
||||
**Result**: Non-empty fields with validation errors still aren't displaying the alert icon with tooltip.
|
||||
|
||||
## Root Causes (Revised Hypothesis)
|
||||
|
||||
After multiple attempts, the issue appears more complex than initially thought. Possible root causes:
|
||||
|
||||
1. **Error Object Structure**: Error objects might not have the expected structure or properties
|
||||
2. **Error Propagation**: Errors might be getting filtered out before reaching cell components
|
||||
3. **Validation Rules Configuration**: Validation rules in import.tsx might be incorrectly configured
|
||||
4. **Error State Management**: Error state might not be properly updated or might be reset incorrectly
|
||||
5. **Component Rendering Logic**: Components might not re-render when validation state changes
|
||||
6. **CSS/Styling Issues**: Validation icons might be rendered but hidden due to styling issues
|
||||
7. **Validation Timing**: Validation might be happening at the wrong time or getting overridden
|
||||
138
docs/validate-table-changes-implementation-issue8.md
Normal file
138
docs/validate-table-changes-implementation-issue8.md
Normal file
@@ -0,0 +1,138 @@
|
||||
# Multiple Cell Edit Issue Implementation
|
||||
|
||||
## Issue Being Addressed
|
||||
|
||||
**Multiple Cell Edit Issue**: When you enter values in 2+ cells before validation finishes, contents from all edited cells get erased when validation finishes.
|
||||
|
||||
## Implementation Attempts
|
||||
|
||||
### Attempt 1: Fix Multiple Cell Edit Issue (First Approach)
|
||||
|
||||
**Approach**:
|
||||
- Added a tracking mechanism using a Set to keep track of cells that are currently being edited
|
||||
- Modified the `flushPendingUpdates` function to preserve values of cells being edited
|
||||
- Added cleanup of editing state after validation completes
|
||||
|
||||
**Changes Made**:
|
||||
```typescript
|
||||
// Add ref to track cells currently being edited
|
||||
const currentlyEditingCellsRef = useRef(new Set<string>());
|
||||
|
||||
// Update a row's field value
|
||||
const updateRow = useCallback((rowIndex: number, key: T, value: any) => {
|
||||
// Add this cell to currently editing cells
|
||||
const cellKey = `${rowIndex}-${key}`;
|
||||
currentlyEditingCellsRef.current.add(cellKey);
|
||||
|
||||
// ...existing code...
|
||||
|
||||
// After validation completes, remove this cell from currently editing list
|
||||
setTimeout(() => {
|
||||
currentlyEditingCellsRef.current.delete(cellKey);
|
||||
}, 100);
|
||||
}, []);
|
||||
|
||||
// Modify flushPendingUpdates to respect currently editing cells
|
||||
const flushPendingUpdates = useCallback(() => {
|
||||
// ...existing code...
|
||||
|
||||
if (dataUpdates.length > 0) {
|
||||
setData(prev => {
|
||||
// ...existing code...
|
||||
|
||||
dataUpdates.forEach((row, index) => {
|
||||
if (index < newData.length) {
|
||||
const updatedRow = { ...row };
|
||||
|
||||
// Check if any fields in this row are currently being edited
|
||||
// If so, preserve their current values in the previous data
|
||||
Object.keys(prev[index] || {}).forEach(key => {
|
||||
const cellKey = `${index}-${key}`;
|
||||
if (currentlyEditingCellsRef.current.has(cellKey)) {
|
||||
// Keep the value from the previous state for this field
|
||||
updatedRow[key] = prev[index][key];
|
||||
}
|
||||
});
|
||||
|
||||
newData[index] = updatedRow;
|
||||
}
|
||||
});
|
||||
|
||||
return newData;
|
||||
});
|
||||
}
|
||||
}, []);
|
||||
```
|
||||
|
||||
**Result**:
|
||||
- Slight improvement - the first value entered was saved, but any subsequent values still got erased
|
||||
|
||||
### Attempt 2: Fix Multiple Cell Edit Issue (Second Approach)
|
||||
|
||||
**Approach**:
|
||||
- Completely revised the cell editing tracking system
|
||||
- Used a Map with timestamps to track editing cells more accurately
|
||||
- Added proper Promise-based tracking for cell validation
|
||||
- Increased timeout from 100ms to 1000ms
|
||||
- Made cleanup more robust by checking if it's still the same editing session
|
||||
|
||||
**Changes Made**:
|
||||
```typescript
|
||||
// Add ref to track cells currently being edited with timestamps
|
||||
const currentlyEditingCellsRef = useRef(new Map<string, number>());
|
||||
|
||||
// Add ref to track validation promises
|
||||
const validationPromisesRef = useRef<Map<string, Promise<void>>>(new Map());
|
||||
|
||||
// Update a row's field value
|
||||
const updateRow = useCallback((rowIndex: number, key: T, value: any) => {
|
||||
// Mark this cell as being edited with the current timestamp
|
||||
const cellKey = `${rowIndex}-${key}`;
|
||||
currentlyEditingCellsRef.current.set(cellKey, Date.now());
|
||||
|
||||
// ...existing code...
|
||||
|
||||
// Create a validation promise
|
||||
const validationPromise = new Promise<void>((resolve) => {
|
||||
setTimeout(() => {
|
||||
try {
|
||||
validateRow(rowIndex);
|
||||
} finally {
|
||||
resolve();
|
||||
}
|
||||
}, 0);
|
||||
});
|
||||
|
||||
validationPromisesRef.current.set(cellKey, validationPromise);
|
||||
|
||||
// When validation is complete, remove from validating cells
|
||||
validationPromise.then(() => {
|
||||
// ...existing code...
|
||||
|
||||
// Keep this cell in the editing state for a longer time
|
||||
setTimeout(() => {
|
||||
if (currentlyEditingCellsRef.current.has(cellKey)) {
|
||||
currentlyEditingCellsRef.current.delete(cellKey);
|
||||
}
|
||||
}, 1000); // Keep as "editing" for 1 second
|
||||
});
|
||||
}, []);
|
||||
```
|
||||
|
||||
**Result**:
|
||||
- Worse than the first approach - now all values get erased, including the first one
|
||||
|
||||
## Root Causes (Hypothesized)
|
||||
|
||||
- The validation process might be updating the entire data state, causing race conditions with cell edits
|
||||
- The timing of validation completions might be problematic
|
||||
- State updates might be happening in a way that overwrites user changes
|
||||
- The cell state tracking system is not robust enough to prevent overwrites
|
||||
|
||||
## Next Steps
|
||||
|
||||
The issue requires a more fundamental approach than just tweaking the editing logic. We need to:
|
||||
|
||||
1. Implement a more robust state management system for cell edits that can survive validation cycles
|
||||
2. Consider disabling validation during active editing
|
||||
3. Implement a proper "dirty state" tracking system for cells
|
||||
305
docs/validate-table-changes.md
Normal file
305
docs/validate-table-changes.md
Normal file
@@ -0,0 +1,305 @@
|
||||
# Current Issues to Address
|
||||
4. Validation isn't happening beyond checking if a cell is required or not - needs to respect rules in import.tsx
|
||||
* Red cell outline if cell is required and it's empty
|
||||
* Red outline + alert circle icon with tooltip if cell is NOT empty and isn't valid
|
||||
8. When you enter a value in 2+ cells before validation finishes, contents from all edited cells get erased when validation finishes
|
||||
|
||||
## Do NOT change or edit
|
||||
* Anything related to AI validation
|
||||
* Anything about how templates or UPC validation work (only focus on specific issues described above)
|
||||
* Anything outside of the ValidationStepNew folder
|
||||
|
||||
## Issues already fixed - do not work on these
|
||||
✅FIXED 1. The red row background should go away when all cells in the row are valid and all required cells are populated
|
||||
✅FIXED 2. Columns alignment with header is slightly off, gets worse the further right you go
|
||||
✅FIXED 3. The copy down button is in the way of the validation error icon and the select open trigger - all three need to be in unique locations
|
||||
✅FIXED 5. Description column needs to have an expanded view of some sort, maybe a popover to allow for easier editing
|
||||
* Don't distort table to make it happen
|
||||
✅FIXED 6. Need to ensure all cell's contents don't overflow the input (truncate). COO does this currently, probably more
|
||||
✅FIXED 7. The template select cell is expanding, needs to be fixed size and truncate
|
||||
✅FIXED 9. Import dialog state not fully reset when closing? (validate data step appears scrolled to the middle of the table where I left it)
|
||||
✅FIXED 10. UPC column doesn't need to show loading state when Item Number is being processed, only show on item number column
|
||||
✅FIXED 11. Copy down needs to show a loading state on the cells that it will copy to
|
||||
✅FIXED 12. Shipping restrictions/tax category should default to ID 0 if we didn't get it elsewhere
|
||||
✅FIXED 13. Header row should be sticky (both up/down and left/right)
|
||||
✅FIXED 14. Need a way to scroll around table if user doesn't have mouse wheel for left/right
|
||||
✅FIXED 15. Enhance copy down feature by allowing user to choose the last cell to copy to, instead of going all the way to the bottom
|
||||
|
||||
---------
|
||||
|
||||
# Validation Step Components Overview
|
||||
|
||||
## Core Components
|
||||
|
||||
### ValidationContainer
|
||||
`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationContainer.tsx`
|
||||
- Main wrapper component for the validation step
|
||||
- Manages global state and coordinates between subcomponents
|
||||
- Handles navigation events (next, back)
|
||||
- Manages template application and validation state
|
||||
- Coordinates UPC validation and product line loading
|
||||
- Manages row selection and filtering
|
||||
- Contains cache management for UPC validation results
|
||||
- Maintains item number references separate from main data
|
||||
|
||||
### ValidationTable
|
||||
`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationTable.tsx`
|
||||
- Handles data display and column configuration
|
||||
- Uses TanStack Table for core functionality
|
||||
- Features:
|
||||
- Sticky header (both vertical and horizontal) - currently doesn't work properly
|
||||
- Row selection with checkboxes
|
||||
- Template selection column
|
||||
- Dynamic column widths based on field types - specified in import.tsx component
|
||||
- Copy down functionality for cell values
|
||||
- Error highlighting for rows and cells
|
||||
- Loading states for cells being validated
|
||||
|
||||
### ValidationCell
|
||||
`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationCell.tsx`
|
||||
- Base cell component that renders different cell types based on field configuration
|
||||
- Handles error display with tooltips
|
||||
- Manages copy down button visibility
|
||||
- Supports loading states during validation
|
||||
- Cell Types:
|
||||
1. InputCell: For single-value text input
|
||||
2. SelectCell: For dropdown selection
|
||||
3. MultiInputCell: For multiple value inputs
|
||||
4. Template selection cells with SearchableTemplateSelect component
|
||||
|
||||
### SearchableTemplateSelect
|
||||
`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/SearchableTemplateSelect.tsx`
|
||||
- Advanced template selection component with search functionality
|
||||
- Features:
|
||||
- Real-time search filtering of templates
|
||||
- Customizable display text for templates
|
||||
- Support for default brand selection
|
||||
- Accessible popover interface
|
||||
- Keyboard navigation support
|
||||
- Custom styling through className props
|
||||
- Scroll event handling for nested scrollable areas
|
||||
|
||||
### TemplateManager
|
||||
`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/TemplateManager.tsx`
|
||||
- Comprehensive template management interface
|
||||
- Features:
|
||||
- Template selection with search functionality
|
||||
- Save template dialog with name and type inputs
|
||||
- Batch template application to selected rows
|
||||
- Template count tracking
|
||||
- Toast notifications for user feedback
|
||||
- Dialog-based interface for template operations
|
||||
|
||||
### AiValidationDialogs
|
||||
`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/AiValidationDialogs.tsx`
|
||||
- Manages AI-assisted validation dialogs and interactions
|
||||
|
||||
### SaveTemplateDialog
|
||||
`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/SaveTemplateDialog.tsx`
|
||||
- Dialog component for saving new templates
|
||||
|
||||
## Cell Components
|
||||
|
||||
### InputCell
|
||||
`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/InputCell.tsx`
|
||||
- Handles single value text input
|
||||
- Features:
|
||||
- Inline/edit mode switching
|
||||
- Multiline support
|
||||
- Price formatting
|
||||
- Error state display
|
||||
- Loading state during validation
|
||||
- Width constraints
|
||||
- Automated cleanPriceFields processing for "$" formatting
|
||||
|
||||
### SelectCell
|
||||
`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/SelectCell.tsx`
|
||||
- Handles dropdown selection
|
||||
- Features:
|
||||
- Searchable dropdown
|
||||
- Custom option rendering
|
||||
- Error state display
|
||||
- Loading state during validation
|
||||
- Width constraints
|
||||
- Disabled state support
|
||||
- Deferred search query handling for performance
|
||||
|
||||
### MultiInputCell
|
||||
`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/cells/MultiInputCell.tsx`
|
||||
- Handles multiple value inputs
|
||||
- Features:
|
||||
- Comma-separated input support
|
||||
- Multi-select dropdown for predefined options
|
||||
- Custom separators
|
||||
- Badge display for selected count
|
||||
- Truncation for long values
|
||||
- Width constraints
|
||||
- Price formatting support
|
||||
- Internal state management to avoid excessive re-renders
|
||||
|
||||
## Validation System
|
||||
|
||||
### useValidation Hook
|
||||
`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidation.tsx`
|
||||
- Provides core validation logic
|
||||
- Validates at multiple levels:
|
||||
1. Field-level validation (required, regex, unique)
|
||||
2. Row-level validation (supplier, company fields)
|
||||
3. Table-level validation
|
||||
4. Custom validation hooks support
|
||||
- Error object structure includes message, level, and source properties
|
||||
- Handles debounced validation updates to avoid UI freezing
|
||||
|
||||
### useAiValidation Hook
|
||||
`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useAiValidation.tsx`
|
||||
- Manages AI-assisted validation logic and state
|
||||
- Features:
|
||||
- Tracks detailed changes per product
|
||||
- Manages validation progress with estimated completion time
|
||||
- Handles warnings and change suggestions
|
||||
- Supports diff generation for changes
|
||||
- Progress tracking with step indicators
|
||||
- Prompt management for AI interactions
|
||||
- Timer management for long-running operations
|
||||
|
||||
### useTemplates Hook
|
||||
`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useTemplates.tsx`
|
||||
- Comprehensive template management system
|
||||
- Features:
|
||||
- Template CRUD operations
|
||||
- Template application logic
|
||||
- Default value handling
|
||||
- Template search and filtering
|
||||
- Batch template operations
|
||||
- Template validation
|
||||
|
||||
### useUpcValidation Hook
|
||||
`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useUpcValidation.tsx`
|
||||
- Dedicated UPC validation management
|
||||
- Features:
|
||||
- UPC format validation
|
||||
- Supplier data validation
|
||||
- Cache management for validation results
|
||||
- Batch processing of UPC validations
|
||||
- Item number generation logic
|
||||
- Loading state management
|
||||
|
||||
### useFilters Hook
|
||||
`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useFilters.tsx`
|
||||
- Advanced filtering system for table data
|
||||
- Features:
|
||||
- Multiple filter criteria support
|
||||
- Dynamic filter updates
|
||||
- Filter persistence
|
||||
- Filter combination logic
|
||||
- Performance optimized filtering
|
||||
|
||||
### useValidationState Hook
|
||||
`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidationState.tsx`
|
||||
- Manages global validation state
|
||||
- Handles:
|
||||
- Data updates
|
||||
- Template management
|
||||
- Error tracking using Map objects
|
||||
- Row selection
|
||||
- Filtering
|
||||
- UPC validation with caching to prevent duplicate API calls
|
||||
- Product line loading
|
||||
- Batch processing of updates
|
||||
- Default value application for tax_cat and ship_restrictions (defaulting to "0")
|
||||
- Price field auto-formatting to remove "$" symbols
|
||||
|
||||
### Utility Files
|
||||
`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/validationUtils.ts`
|
||||
- Core validation utility functions
|
||||
- Includes:
|
||||
- Field validation logic
|
||||
- Error message formatting
|
||||
- Validation rule processing
|
||||
- Type checking utilities
|
||||
|
||||
`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/errorUtils.ts`
|
||||
- Error handling and formatting utilities
|
||||
- Includes:
|
||||
- Error object creation
|
||||
- Error message formatting
|
||||
- Error source tracking
|
||||
- Error level management
|
||||
|
||||
`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/dataMutations.ts`
|
||||
- Data transformation and mutation utilities
|
||||
- Includes:
|
||||
- Row data updates
|
||||
- Batch data processing
|
||||
- Data structure conversions
|
||||
- Change tracking
|
||||
|
||||
`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/validation-helper.js`
|
||||
- Helper functions for validation
|
||||
- Includes:
|
||||
- Common validation patterns
|
||||
- Validation state management
|
||||
- Validation result processing
|
||||
|
||||
`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/utils/upcValidation.ts`
|
||||
- UPC-specific validation utilities
|
||||
- Includes:
|
||||
- UPC format checking
|
||||
- Checksum validation
|
||||
- Supplier data matching
|
||||
- Cache management
|
||||
|
||||
### Types
|
||||
`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/types.ts`
|
||||
- Core type definitions for the validation step
|
||||
|
||||
### Validation Types
|
||||
1. Required field validation
|
||||
2. Regex pattern validation
|
||||
3. Unique value validation
|
||||
4. Custom field validation
|
||||
5. Row-level validation
|
||||
6. Table-level validation
|
||||
|
||||
## State Management
|
||||
|
||||
### useValidationState Hook
|
||||
`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidationState.tsx`
|
||||
- Manages global validation state
|
||||
- Handles:
|
||||
- Data updates
|
||||
- Template management
|
||||
- Error tracking using Map objects
|
||||
- Row selection
|
||||
- Filtering
|
||||
- UPC validation with caching to prevent duplicate API calls
|
||||
- Product line loading
|
||||
- Batch processing of updates
|
||||
- Default value application for tax_cat and ship_restrictions (defaulting to "0")
|
||||
- Price field auto-formatting to remove "$" symbols
|
||||
|
||||
## UPC Validation System
|
||||
|
||||
### UPC Processing
|
||||
- Validates UPCs against supplier data
|
||||
- Cache system for UPC validation results
|
||||
- Batch processing of UPC validation requests
|
||||
- Auto-generation of item numbers based on UPC
|
||||
- Special loading states for UPC/item number fields
|
||||
- Separate state tracking to avoid unnecessary data structure updates
|
||||
|
||||
## Template System
|
||||
|
||||
### Template Management
|
||||
- Supports saving and loading templates
|
||||
- Template application to single/multiple rows
|
||||
- Default template values
|
||||
- Template search and filtering
|
||||
|
||||
## Performance Optimizations
|
||||
1. Memoized components to prevent unnecessary renders
|
||||
2. Virtualized table for large datasets
|
||||
3. Deferred value updates for search inputs
|
||||
4. Efficient error state management
|
||||
5. Optimized cell update handling
|
||||
|
||||
131
docs/validation-hook-refactor.md
Normal file
131
docs/validation-hook-refactor.md
Normal file
@@ -0,0 +1,131 @@
|
||||
|
||||
|
||||
# Refactoring Plan for Validation Code
|
||||
|
||||
## Current Structure Analysis
|
||||
- **useValidationState.tsx**: ~1650 lines - Core validation state management
|
||||
- **useValidation.tsx**: ~425 lines - Field/data validation utility
|
||||
- **useUpcValidation.tsx**: ~410 lines - UPC-specific validation
|
||||
|
||||
## Proposed New Structure
|
||||
|
||||
### 1. Core Types & Utilities (150-200 lines)
|
||||
**File: `validation/types.ts`**
|
||||
- All interfaces and types (RowData, ValidationError, FilterState, Template, etc.)
|
||||
- Shared utility functions (isEmpty, getCellKey, etc.)
|
||||
|
||||
**File: `validation/utils.ts`**
|
||||
- Generic validation utility functions
|
||||
- Caching mechanism and cache clearing helpers
|
||||
- API URL helpers
|
||||
|
||||
### 2. Field Validation (300-350 lines)
|
||||
**File: `validation/hooks/useFieldValidation.ts`**
|
||||
- `validateField` function
|
||||
- Field-level validation logic
|
||||
- Required, regex, and other field validations
|
||||
|
||||
### 3. Uniqueness Validation (250-300 lines)
|
||||
**File: `validation/hooks/useUniquenessValidation.ts`**
|
||||
- `validateUniqueField` function
|
||||
- `validateUniqueItemNumbers` function
|
||||
- All uniqueness checking logic
|
||||
|
||||
### 4. UPC Validation (300-350 lines)
|
||||
**File: `validation/hooks/useUpcValidation.ts`**
|
||||
- `fetchProductByUpc` function
|
||||
- `validateUpc` function
|
||||
- `applyItemNumbersToData` function
|
||||
- UPC validation state management
|
||||
|
||||
### 5. Validation Status Management (300-350 lines)
|
||||
**File: `validation/hooks/useValidationStatus.ts`**
|
||||
- Error state management
|
||||
- Row validation status tracking
|
||||
- Validation indicators and refs
|
||||
- Batch validation processing
|
||||
|
||||
### 6. Data Management (300-350 lines)
|
||||
**File: `validation/hooks/useValidationData.ts`**
|
||||
- Data state management
|
||||
- Row updates
|
||||
- Data filtering
|
||||
- Initial data processing
|
||||
|
||||
### 7. Template Management (250-300 lines)
|
||||
**File: `validation/hooks/useTemplateManagement.ts`**
|
||||
- Template saving
|
||||
- Template application
|
||||
- Template loading
|
||||
- Template display helpers
|
||||
|
||||
### 8. Main Validation Hook (300-350 lines)
|
||||
**File: `validation/hooks/useValidation.ts`**
|
||||
- Main hook that composes all other hooks
|
||||
- Public API export
|
||||
- Initialization logic
|
||||
- Core validation flow
|
||||
|
||||
## Function Distribution
|
||||
|
||||
### Core Types & Utilities
|
||||
- All interfaces (InfoWithSource, ValidationState, etc.)
|
||||
- `isEmpty` utility
|
||||
- `getApiUrl` helper
|
||||
|
||||
### Field Validation
|
||||
- `validateField`
|
||||
- `validateRow`
|
||||
- `validateData` (partial)
|
||||
- All validation result caching
|
||||
|
||||
### Uniqueness Validation
|
||||
- `validateUniqueField`
|
||||
- `validateUniqueItemNumbers`
|
||||
- Uniqueness caching mechanisms
|
||||
|
||||
### UPC Validation
|
||||
- `fetchProductByUpc`
|
||||
- `validateUpc`
|
||||
- `validateAllUPCs`
|
||||
- `applyItemNumbersToData`
|
||||
- UPC validation state tracking (cells, rows)
|
||||
|
||||
### Validation Status Management
|
||||
- `startValidatingCell`/`stopValidatingCell`
|
||||
- `startValidatingRow`/`stopValidatingRow`
|
||||
- `isValidatingCell`/`isRowValidatingUpc`
|
||||
- Error state management
|
||||
- `revalidateRows`
|
||||
|
||||
### Data Management
|
||||
- Initial data cleaning/processing
|
||||
- `updateRow`
|
||||
- `copyDown`
|
||||
- Search/filter functionality
|
||||
- `filteredData` calculation
|
||||
|
||||
### Template Management
|
||||
- `saveTemplate`
|
||||
- `applyTemplate`
|
||||
- `applyTemplateToSelected`
|
||||
- `getTemplateDisplayText`
|
||||
- `loadTemplates`/`refreshTemplates`
|
||||
|
||||
### Main Validation Hook
|
||||
- Composition of all other hooks
|
||||
- Initialization logic
|
||||
- Button/navigation handling
|
||||
- Field options management
|
||||
|
||||
## Implementation Approach
|
||||
|
||||
1. **Start with Types**: Create the types file first, as all other files will depend on it
|
||||
2. **Create Utility Functions**: Move shared utilities next
|
||||
3. **Build Core Validation**: Extract the field validation and uniqueness validation
|
||||
4. **Separate UPC Logic**: Move all UPC-specific code to its own module
|
||||
5. **Extract State Management**: Move data and status management to separate files
|
||||
6. **Move Template Logic**: Extract template functionality
|
||||
7. **Create Composition Hook**: Build the main hook that uses all other hooks
|
||||
|
||||
This approach will give you more maintainable code with clearer separation of concerns, making it easier to understand, test, and modify each component independently.
|
||||
354
docs/validation-process-issues.md
Normal file
354
docs/validation-process-issues.md
Normal file
@@ -0,0 +1,354 @@
|
||||
## 1. ✅ Error Filtering Logic Inconsistency (RESOLVED)
|
||||
|
||||
> **Note: This issue has been resolved by implementing a type-based error system.**
|
||||
|
||||
The filtering logic in `ValidationCell.tsx` previously relied on string matching, which was fragile:
|
||||
|
||||
```typescript
|
||||
// Old implementation (string-based matching)
|
||||
const filteredErrors = React.useMemo(() => {
|
||||
return !isEmpty(value)
|
||||
? errors.filter(error => !error.message?.toLowerCase().includes('required'))
|
||||
: errors;
|
||||
}, [value, errors]);
|
||||
|
||||
// New implementation (type-based filtering)
|
||||
const filteredErrors = React.useMemo(() => {
|
||||
return !isEmpty(value)
|
||||
? errors.filter(error => error.type !== ErrorType.Required)
|
||||
: errors;
|
||||
}, [value, errors]);
|
||||
```
|
||||
|
||||
The solution implemented:
|
||||
- Added an `ErrorType` enum in `types.ts` to standardize error categorization
|
||||
- Updated all error creation to include the appropriate error type
|
||||
- Modified error filtering to use the type property instead of string matching
|
||||
- Ensured consistent error handling across the application
|
||||
|
||||
**Guidelines for future development:**
|
||||
- Always use the `ErrorType` enum when creating errors
|
||||
- Never rely on string matching for error filtering
|
||||
- Ensure all error objects include the `type` property
|
||||
- Use the appropriate error type for each validation rule:
|
||||
- `ErrorType.Required` for required field validations
|
||||
- `ErrorType.Regex` for regex validations
|
||||
- `ErrorType.Unique` for uniqueness validations
|
||||
- `ErrorType.Custom` for custom validations
|
||||
- `ErrorType.Api` for API-based validations
|
||||
|
||||
## 2. ⚠️ Redundant Error Processing (PARTIALLY RESOLVED)
|
||||
|
||||
> **Note: This issue has been partially resolved by the re-rendering optimizations.**
|
||||
|
||||
The system still processes errors in multiple places:
|
||||
- In `ValidationCell.tsx`, errors are filtered by the optimized `processErrors` function
|
||||
- In `useValidation.tsx`, errors are generated at the field level
|
||||
- In `ValidationContainer.tsx`, errors are manipulated at the container level
|
||||
|
||||
While the error processing has been optimized to be more efficient, there is still some redundancy in how errors are handled across components. However, the current implementation has mitigated the performance impact.
|
||||
|
||||
**Improvements made:**
|
||||
- Created a central `processErrors` function in ValidationCell that efficiently handles error filtering
|
||||
- Implemented a batched update system to reduce redundant error processing
|
||||
- Added better memoization to avoid reprocessing errors when not needed
|
||||
|
||||
**Future improvement opportunities:**
|
||||
- Further consolidate error processing logic into a single location
|
||||
- Create a dedicated error handling service or hook
|
||||
- Implement a more declarative approach to error handling
|
||||
|
||||
## 3. Race Conditions in Async Validation
|
||||
|
||||
async validations could create race conditions:
|
||||
- If a user types quickly, multiple validation requests might be in flight
|
||||
- Later responses could overwrite more recent ones if they complete out of order
|
||||
- The debouncing helps but doesn't fully solve this issue
|
||||
|
||||
## 4. Memory Leaks in Timeout Management
|
||||
|
||||
The validation timeouts are stored in refs:
|
||||
```typescript
|
||||
const validationTimeoutsRef = useRef<Record<number, NodeJS.Timeout>>({});
|
||||
```
|
||||
|
||||
While there is cleanup on unmount, if rows are added/removed dynamically, timeouts for deleted rows might not be properly cleared.
|
||||
|
||||
## 5. ✅ Inefficient Error Storage (RESOLVED)
|
||||
|
||||
**Status: RESOLVED**
|
||||
|
||||
### Problem
|
||||
|
||||
Previously, validation errors were stored in multiple locations:
|
||||
- In the `validationErrors` Map in `useValidationState`
|
||||
- In the row data itself as `__errors`
|
||||
|
||||
This redundancy caused several issues:
|
||||
- Inconsistent error states between the two storage locations
|
||||
- Increased memory usage by storing the same information twice
|
||||
- Complex state management to keep both sources in sync
|
||||
- Difficulty reasoning about where errors should be accessed from
|
||||
|
||||
### Solution
|
||||
|
||||
We've implemented a unified error storage approach by:
|
||||
- Making the `validationErrors` Map in `useValidationState` the single source of truth for all validation errors
|
||||
- Removed the `__errors` property from row data
|
||||
- Updated all validation functions to interact with the central error store instead of modifying row data
|
||||
- Modified UPC validation to use the central error store
|
||||
- Updated all components to read errors from the `validationErrors` Map instead of row data
|
||||
|
||||
### Key Changes
|
||||
|
||||
1. Modified `dataMutations.ts` to stop storing errors in row data
|
||||
2. Updated the `Meta` type to remove the `__errors` property
|
||||
3. Modified the `RowData` type to remove the `__errors` property
|
||||
4. Updated the `useValidation` hook to return errors separately from row data
|
||||
5. Modified the `useAiValidation` hook to work with the central error store
|
||||
6. Updated the `useFilters` hook to check for errors in the `validationErrors` Map
|
||||
7. Modified the `ValidationTable` and `ValidationCell` components to read errors from the `validationErrors` Map
|
||||
|
||||
### Benefits
|
||||
|
||||
- **Single Source of Truth**: All validation errors are now stored in one place
|
||||
- **Reduced Memory Usage**: No duplicate storage of error information
|
||||
- **Simplified State Management**: Only one state to update when errors change
|
||||
- **Cleaner Data Structure**: Row data no longer contains validation metadata
|
||||
- **More Maintainable Code**: Clearer separation of concerns between data and validation
|
||||
|
||||
### Future Improvements
|
||||
|
||||
While this refactoring addresses the core issue of inefficient error storage, there are still opportunities for further optimization:
|
||||
|
||||
1. ✅ **Redundant Error Processing**: ~~The validation process still performs some redundant calculations that could be optimized.~~ This has been largely addressed by the re-rendering optimizations.
|
||||
2. **Race Conditions**: Async validation can lead to race conditions when multiple validations are triggered in quick succession.
|
||||
3. **Memory Leaks**: The timeout management for validation could be improved to prevent potential memory leaks.
|
||||
4. **Tight Coupling**: Components are still tightly coupled to the validation state structure.
|
||||
5. **Error Prioritization**: The system doesn't prioritize errors well, showing all errors at once rather than focusing on the most critical ones first.
|
||||
|
||||
### Validation Flow
|
||||
|
||||
The validation process now works as follows:
|
||||
|
||||
1. **Error Generation**:
|
||||
- Field-level validations generate errors based on validation rules
|
||||
- Row-level hooks add custom validation errors
|
||||
- Table-level validations (like uniqueness checks) add errors across rows
|
||||
|
||||
2. **Error Storage**:
|
||||
- All errors are stored in the `validationErrors` Map in `useValidationState`
|
||||
- The Map uses row indices as keys and objects of field errors as values
|
||||
|
||||
3. **Error Display**:
|
||||
- The `ValidationTable` component checks the `validationErrors` Map to highlight rows with errors
|
||||
- The `ValidationCell` component receives errors for specific fields from the `validationErrors` Map
|
||||
- Errors are filtered in the UI to avoid showing "required" errors for fields with values
|
||||
|
||||
This focused refactoring approach has successfully addressed a critical issue while keeping changes manageable and targeted.
|
||||
|
||||
## 6. ✅ Excessive Re-rendering (RESOLVED)
|
||||
|
||||
**Status: RESOLVED**
|
||||
|
||||
### Problem
|
||||
|
||||
The validation system was suffering from excessive re-renders due to several key issues:
|
||||
|
||||
- **Inefficient Error Filtering**: The ValidationCell component was filtering errors on every render
|
||||
- **Redundant Error Processing**: The same validation work was repeated in multiple components
|
||||
- **Poor Memoization**: Components were inadequately memoized, causing unnecessary re-renders
|
||||
- **Inefficient Batch Updates**: The state update system wasn't optimally batching changes
|
||||
|
||||
These issues led to performance problems, especially with large datasets, and affected the user experience.
|
||||
|
||||
### Solution
|
||||
|
||||
We've implemented a comprehensive optimization approach:
|
||||
|
||||
- **Optimized Error Processing**: Created an efficient `processErrors` function in ValidationCell that calculates all derived state in one pass
|
||||
- **Enhanced Memoization**: Improved memo comparison functions to avoid unnecessary rerenders
|
||||
- **Improved Batch Updates**: Redesigned the batching system to aggregate multiple changes before state updates
|
||||
- **Single Update Pattern**: Implemented a queue-based update mechanism that applies multiple state changes at once
|
||||
|
||||
### Key Changes
|
||||
|
||||
1. Added a more efficient error processing function in ValidationCell
|
||||
2. Created an enhanced error comparison function to properly compare error arrays
|
||||
3. Improved the memo comparison function in ValidationCell
|
||||
4. Added a batch update system in useValidationState
|
||||
5. Implemented a queue-based update mechanism for row modifications
|
||||
|
||||
### Benefits
|
||||
|
||||
- **Improved Performance**: Reduced render cycles = faster UI response
|
||||
- **Better User Experience**: Less lag when editing large datasets
|
||||
- **Reduced Memory Usage**: Fewer component instantiations and temporary objects
|
||||
- **Increased Scalability**: The application can now handle larger datasets without slowdown
|
||||
- **Maintainable Code**: More predictable update flow that's easier to debug and extend
|
||||
|
||||
### Guidelines for future development
|
||||
|
||||
- Use the `processErrors` function for error filtering and processing
|
||||
- Ensure React.memo components have proper comparison functions
|
||||
- Use the batched update system for state changes
|
||||
- Maintain stable references to objects and functions
|
||||
- Use appropriate React hooks (useMemo, useCallback) with correct dependencies
|
||||
- Avoid unnecessary recreations of arrays, objects, and functions
|
||||
|
||||
## 7. Complex Error Merging Logic
|
||||
|
||||
When merging errors from different sources, the logic is complex and potentially error-prone:
|
||||
```typescript
|
||||
// Merge field errors and row hook errors
|
||||
const mergedErrors: Record<string, InfoWithSource> = {}
|
||||
|
||||
// Convert field errors to InfoWithSource
|
||||
Object.entries(fieldErrors).forEach(([key, errors]) => {
|
||||
if (errors.length > 0) {
|
||||
mergedErrors[key] = {
|
||||
message: errors[0].message,
|
||||
level: errors[0].level,
|
||||
source: ErrorSources.Row,
|
||||
type: errors[0].type || ErrorType.Custom
|
||||
}
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
This only takes the first error for each field, potentially hiding important validation issues.
|
||||
|
||||
## 8. ✅ Inconsistent Error Handling for Empty Values (PARTIALLY RESOLVED)
|
||||
|
||||
> **Note: This issue has been partially resolved by standardizing the isEmpty function and error type system.**
|
||||
|
||||
The system previously had different approaches to handling empty values:
|
||||
- Some validations skipped empty values unless they're required
|
||||
- Others processed empty values differently
|
||||
- The `isEmpty` function was defined multiple times with slight variations
|
||||
|
||||
The solution implemented:
|
||||
- Standardized the `isEmpty` function implementation
|
||||
- Ensured consistent error type usage for required field validations
|
||||
- Made error filtering consistent across the application
|
||||
|
||||
**Guidelines for future development:**
|
||||
- Always use the shared `isEmpty` function for checking empty values
|
||||
- Ensure consistent handling of empty values across all validation rules
|
||||
- Use the `ErrorType.Required` type for all required field validations
|
||||
|
||||
## 9. Tight Coupling Between Components
|
||||
|
||||
The validation system is tightly coupled across components:
|
||||
- `ValidationCell` needs to understand the structure of errors
|
||||
- `ValidationTable` needs to extract and pass the right errors
|
||||
- `ValidationContainer` directly manipulates the error structure
|
||||
|
||||
This makes it harder to refactor or reuse components independently.
|
||||
|
||||
## 10. Limited Error Prioritization
|
||||
|
||||
There's no clear prioritization of errors:
|
||||
- When multiple errors exist for a field, which one should be shown first?
|
||||
- Are some errors more important than others?
|
||||
- The current system mostly shows the first error it finds
|
||||
|
||||
A more robust approach would be to have a consistent error source identification system and a clear prioritization strategy for displaying errors.
|
||||
|
||||
------------
|
||||
|
||||
Let me explain how these hooks fit together to create the validation errors that eventually get filtered in the `ValidationCell` component:
|
||||
|
||||
## The Validation Flow
|
||||
|
||||
1. **useValidationState Hook**:
|
||||
This is the main state management hook used by the `ValidationContainer` component. It:
|
||||
- Manages the core data state (`data`)
|
||||
- Tracks validation errors in a Map (`validationErrors`)
|
||||
- Provides functions to update and validate rows
|
||||
|
||||
2. **useValidation Hook**:
|
||||
This is a utility hook that provides the core validation logic:
|
||||
- `validateField`: Validates a single field against its validation rules
|
||||
- `validateRow`: Validates an entire row, field by field
|
||||
- `validateTable`: Runs table-level validations
|
||||
- `validateUnique`: Checks for uniqueness constraints
|
||||
- `validateData`: Orchestrates the complete validation process
|
||||
|
||||
## How Errors Are Generated
|
||||
|
||||
Validation errors come from multiple sources:
|
||||
|
||||
1. **Field-Level Validations**:
|
||||
In `useValidation.tsx`, the `validateField` function checks individual fields against rules like:
|
||||
- `required`: Field must have a value
|
||||
- `regex`: Value must match a pattern
|
||||
- `min`/`max`: Numeric constraints
|
||||
|
||||
2. **Row-Level Validations**:
|
||||
The `validateRow` function in `useValidation.tsx` runs:
|
||||
- Field validations for each field in the row
|
||||
- Special validations for required fields like supplier and company
|
||||
- Custom row hooks provided by the application
|
||||
|
||||
3. **Table-Level Validations**:
|
||||
- `validateUnique` checks for duplicate values in fields marked as unique
|
||||
- `validateTable` runs custom table hooks for cross-row validations
|
||||
|
||||
4. **API-Based Validations**:
|
||||
In `useValidationState.tsx` and `ValidationContainer.tsx`:
|
||||
- UPC validation via API calls
|
||||
- Item number uniqueness checks
|
||||
|
||||
## The Error Flow
|
||||
|
||||
1. Errors are collected in the `validationErrors` Map in `useValidationState`
|
||||
2. This Map is passed to `ValidationTable` as a prop
|
||||
3. `ValidationTable` extracts the relevant errors for each cell and passes them to `ValidationCell`
|
||||
4. In `ValidationCell`, the errors are filtered based on whether the cell has a value:
|
||||
```typescript
|
||||
// Updated implementation using type-based filtering
|
||||
const filteredErrors = React.useMemo(() => {
|
||||
return !isEmpty(value)
|
||||
? errors.filter(error => error.type !== ErrorType.Required)
|
||||
: errors;
|
||||
}, [value, errors]);
|
||||
```
|
||||
|
||||
## Key Insights
|
||||
|
||||
1. **Error Structure**:
|
||||
Errors now have a consistent structure with type information:
|
||||
```typescript
|
||||
type ErrorObject = {
|
||||
message: string;
|
||||
level: string; // 'error', 'warning', etc.
|
||||
source?: ErrorSources; // Where the error came from
|
||||
type: ErrorType; // The type of error (Required, Regex, Unique, etc.)
|
||||
}
|
||||
```
|
||||
|
||||
2. **Error Sources**:
|
||||
Errors can come from:
|
||||
- Field validations (required, regex, etc.)
|
||||
- Row validations (custom business logic)
|
||||
- Table validations (uniqueness checks)
|
||||
- API validations (UPC checks)
|
||||
|
||||
3. **Error Types**:
|
||||
Errors are now categorized by type:
|
||||
- `ErrorType.Required`: Field is required but empty
|
||||
- `ErrorType.Regex`: Value doesn't match the regex pattern
|
||||
- `ErrorType.Unique`: Value must be unique across rows
|
||||
- `ErrorType.Custom`: Custom validation errors
|
||||
- `ErrorType.Api`: Errors from API calls
|
||||
|
||||
4. **Error Filtering**:
|
||||
The filtering in `ValidationCell` is now more robust:
|
||||
- When a field has a value, errors of type `ErrorType.Required` are filtered out
|
||||
- When a field is empty, all errors are shown
|
||||
|
||||
5. **Performance Optimizations**:
|
||||
- Batch processing of validations
|
||||
- Debounced updates to avoid excessive re-renders
|
||||
- Memoization of computed values
|
||||
538
docs/validation-table-scroll-issue.md
Normal file
538
docs/validation-table-scroll-issue.md
Normal file
@@ -0,0 +1,538 @@
|
||||
# ValidationTable Scroll Position Issue
|
||||
|
||||
## Problem Description
|
||||
|
||||
The `ValidationTable` component in the inventory application suffers from a persistent scroll position issue. When the table content updates or re-renders, the scroll position resets to the top left corner. This creates a poor user experience, especially when users are working with large datasets and need to maintain their position while making edits or filtering data.
|
||||
|
||||
Specific behaviors:
|
||||
- Scroll position resets to the top left corner during re-renders
|
||||
- User loses their place in the table when data is updated
|
||||
- The table does not preserve vertical or horizontal scroll position
|
||||
|
||||
## Relevant Files
|
||||
|
||||
- **`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationTable.tsx`**
|
||||
- Main component that renders the validation table
|
||||
- Handles scroll position management
|
||||
|
||||
- **`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationContainer.tsx`**
|
||||
- Parent component that wraps ValidationTable
|
||||
- Creates an EnhancedValidationTable wrapper component
|
||||
- Manages data and state for the validation table
|
||||
|
||||
- **`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/hooks/useValidationState.tsx`**
|
||||
- Provides state management and data manipulation functions
|
||||
- Contains scroll-related code in the `updateRow` function
|
||||
|
||||
- **`inventory/src/lib/react-spreadsheet-import/src/steps/ValidationStepNew/components/ValidationCell.tsx`**
|
||||
- Renders individual cells in the table
|
||||
- May influence re-renders that affect scroll position
|
||||
|
||||
## Failed Attempts
|
||||
|
||||
We've tried multiple approaches to fix the scroll position issue, none of which have been successful:
|
||||
|
||||
### 1. Using Refs for Scroll Position
|
||||
|
||||
```typescript
|
||||
const scrollPosition = useRef({
|
||||
left: 0,
|
||||
top: 0
|
||||
});
|
||||
|
||||
// Capture position on scroll
|
||||
const handleScroll = useCallback(() => {
|
||||
if (tableContainerRef.current) {
|
||||
scrollPosition.current = {
|
||||
left: tableContainerRef.current.scrollLeft,
|
||||
top: tableContainerRef.current.scrollTop
|
||||
};
|
||||
}
|
||||
}, []);
|
||||
|
||||
// Restore in useLayoutEffect
|
||||
useLayoutEffect(() => {
|
||||
const container = tableContainerRef.current;
|
||||
if (container) {
|
||||
const { left, top } = scrollPosition.current;
|
||||
if (left || top) {
|
||||
container.scrollLeft = left;
|
||||
container.scrollTop = top;
|
||||
}
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
Result: Scroll position was still lost during updates.
|
||||
|
||||
### 2. Multiple Restoration Attempts with Timeouts
|
||||
|
||||
```typescript
|
||||
// Multiple timeouts at different intervals
|
||||
setTimeout(() => {
|
||||
if (tableContainerRef.current) {
|
||||
tableContainerRef.current.scrollTop = savedPosition.top;
|
||||
tableContainerRef.current.scrollLeft = savedPosition.left;
|
||||
}
|
||||
}, 0);
|
||||
|
||||
setTimeout(() => {
|
||||
if (tableContainerRef.current) {
|
||||
tableContainerRef.current.scrollTop = savedPosition.top;
|
||||
tableContainerRef.current.scrollLeft = savedPosition.left;
|
||||
}
|
||||
}, 50);
|
||||
|
||||
// Additional timeouts at 100ms, 300ms
|
||||
```
|
||||
|
||||
Result: Still not reliable, scroll position would reset between timeouts or after all timeouts completed.
|
||||
|
||||
### 3. Using MutationObserver and ResizeObserver
|
||||
|
||||
```typescript
|
||||
// Create a mutation observer to detect DOM changes
|
||||
const mutationObserver = new MutationObserver(() => {
|
||||
if (shouldPreserveScroll) {
|
||||
restoreScrollPosition();
|
||||
}
|
||||
});
|
||||
|
||||
// Start observing the table for DOM changes
|
||||
mutationObserver.observe(scrollableContainer, {
|
||||
childList: true,
|
||||
subtree: true,
|
||||
attributes: true,
|
||||
attributeFilter: ['style', 'class']
|
||||
});
|
||||
|
||||
// Create a resize observer
|
||||
const resizeObserver = new ResizeObserver(() => {
|
||||
if (shouldPreserveScroll) {
|
||||
restoreScrollPosition();
|
||||
}
|
||||
});
|
||||
|
||||
// Observe the table container
|
||||
resizeObserver.observe(scrollableContainer);
|
||||
```
|
||||
|
||||
Result: Did not reliably maintain scroll position, and sometimes caused other rendering issues.
|
||||
|
||||
### 4. Recursive Restoration Approach
|
||||
|
||||
```typescript
|
||||
let attempts = 0;
|
||||
const maxAttempts = 5;
|
||||
|
||||
const restore = () => {
|
||||
if (tableContainerRef.current) {
|
||||
tableContainerRef.current.scrollTop = y;
|
||||
tableContainerRef.current.scrollLeft = x;
|
||||
|
||||
attempts++;
|
||||
if (attempts < maxAttempts) {
|
||||
setTimeout(restore, 50 * attempts);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
restore();
|
||||
```
|
||||
|
||||
Result: No improvement, scroll position still reset.
|
||||
|
||||
### 5. Using React State for Scroll Position
|
||||
|
||||
```typescript
|
||||
const [scrollPos, setScrollPos] = useState<{top: number; left: number}>({top: 0, left: 0});
|
||||
|
||||
// Track the scroll event
|
||||
useEffect(() => {
|
||||
const handleScroll = () => {
|
||||
if (scrollContainerRef.current) {
|
||||
setScrollPos({
|
||||
top: scrollContainerRef.current.scrollTop,
|
||||
left: scrollContainerRef.current.scrollLeft
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
// Add scroll listener...
|
||||
}, []);
|
||||
|
||||
// Restore scroll position
|
||||
useLayoutEffect(() => {
|
||||
const container = scrollContainerRef.current;
|
||||
const { top, left } = scrollPos;
|
||||
|
||||
if (top > 0 || left > 0) {
|
||||
requestAnimationFrame(() => {
|
||||
if (container) {
|
||||
container.scrollTop = top;
|
||||
container.scrollLeft = left;
|
||||
}
|
||||
});
|
||||
}
|
||||
}, [scrollPos, data]);
|
||||
```
|
||||
|
||||
Result: Caused the screen to shake violently when scrolling and did not preserve position.
|
||||
|
||||
### 6. Using Key Attribute for Stability
|
||||
|
||||
```typescript
|
||||
return (
|
||||
<div
|
||||
key="validation-table-container"
|
||||
ref={scrollContainerRef}
|
||||
className="overflow-auto max-h-[calc(100vh-300px)]"
|
||||
>
|
||||
{/* Table content */}
|
||||
</div>
|
||||
);
|
||||
```
|
||||
|
||||
Result: Did not resolve the issue and may have contributed to rendering instability.
|
||||
|
||||
### 7. Removing Scroll Management from Other Components
|
||||
|
||||
We removed scroll position management code from:
|
||||
- `useValidationState.tsx` (in the updateRow function)
|
||||
- `ValidationContainer.tsx` (in the enhancedUpdateRow function)
|
||||
|
||||
Result: This did not fix the issue either.
|
||||
|
||||
### 8. Simple Scroll Position Management with Event Listeners
|
||||
|
||||
```typescript
|
||||
// Create a ref to store scroll position
|
||||
const scrollPosition = useRef({ left: 0, top: 0 });
|
||||
const tableContainerRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
// Save scroll position when scrolling
|
||||
const handleScroll = useCallback(() => {
|
||||
if (tableContainerRef.current) {
|
||||
scrollPosition.current = {
|
||||
left: tableContainerRef.current.scrollLeft,
|
||||
top: tableContainerRef.current.scrollTop
|
||||
};
|
||||
}
|
||||
}, []);
|
||||
|
||||
// Add scroll listener
|
||||
useEffect(() => {
|
||||
const container = tableContainerRef.current;
|
||||
if (container) {
|
||||
container.addEventListener('scroll', handleScroll);
|
||||
return () => container.removeEventListener('scroll', handleScroll);
|
||||
}
|
||||
}, [handleScroll]);
|
||||
|
||||
// Restore scroll position after data changes
|
||||
useLayoutEffect(() => {
|
||||
const container = tableContainerRef.current;
|
||||
if (container) {
|
||||
const { left, top } = scrollPosition.current;
|
||||
if (left > 0 || top > 0) {
|
||||
container.scrollLeft = left;
|
||||
container.scrollTop = top;
|
||||
}
|
||||
}
|
||||
}, [data]);
|
||||
```
|
||||
|
||||
Result: Still did not maintain scroll position during updates.
|
||||
|
||||
### 9. Memoized Scroll Container Component
|
||||
|
||||
```typescript
|
||||
// Create a stable scroll container that won't re-render with the table
|
||||
const ScrollContainer = React.memo(({ children }: { children: React.ReactNode }) => {
|
||||
const containerRef = useRef<HTMLDivElement>(null);
|
||||
const scrollPosition = useRef({ left: 0, top: 0 });
|
||||
|
||||
const handleScroll = useCallback(() => {
|
||||
if (containerRef.current) {
|
||||
scrollPosition.current = {
|
||||
left: containerRef.current.scrollLeft,
|
||||
top: containerRef.current.scrollTop
|
||||
};
|
||||
}
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
const container = containerRef.current;
|
||||
if (container) {
|
||||
// Set initial scroll position if it exists
|
||||
if (scrollPosition.current.left > 0 || scrollPosition.current.top > 0) {
|
||||
container.scrollLeft = scrollPosition.current.left;
|
||||
container.scrollTop = scrollPosition.current.top;
|
||||
}
|
||||
container.addEventListener('scroll', handleScroll);
|
||||
return () => container.removeEventListener('scroll', handleScroll);
|
||||
}
|
||||
}, [handleScroll]);
|
||||
|
||||
return (
|
||||
<div ref={containerRef} className="overflow-auto max-h-[calc(100vh-300px)]">
|
||||
{children}
|
||||
</div>
|
||||
);
|
||||
});
|
||||
```
|
||||
|
||||
Result: Still did not maintain scroll position during updates, even with a memoized container.
|
||||
|
||||
### 10. Using TanStack Table State Management
|
||||
|
||||
```typescript
|
||||
// Track scroll state in the table instance
|
||||
const [scrollState, setScrollState] = useState({ scrollLeft: 0, scrollTop: 0 });
|
||||
|
||||
const table = useReactTable({
|
||||
data,
|
||||
columns,
|
||||
getCoreRowModel: getCoreRowModel(),
|
||||
state: {
|
||||
rowSelection,
|
||||
// Include scroll position in table state
|
||||
scrollLeft: scrollState.scrollLeft,
|
||||
scrollTop: scrollState.scrollTop
|
||||
},
|
||||
onStateChange: (updater) => {
|
||||
if (typeof updater === 'function') {
|
||||
const newState = updater({
|
||||
rowSelection,
|
||||
scrollLeft: scrollState.scrollLeft,
|
||||
scrollTop: scrollState.scrollTop
|
||||
});
|
||||
if ('scrollLeft' in newState || 'scrollTop' in newState) {
|
||||
setScrollState({
|
||||
scrollLeft: newState.scrollLeft ?? scrollState.scrollLeft,
|
||||
scrollTop: newState.scrollTop ?? scrollState.scrollTop
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Handle scroll events
|
||||
const handleScroll = useCallback((event: React.UIEvent<HTMLDivElement>) => {
|
||||
const target = event.target as HTMLDivElement;
|
||||
setScrollState({
|
||||
scrollLeft: target.scrollLeft,
|
||||
scrollTop: target.scrollTop
|
||||
});
|
||||
}, []);
|
||||
|
||||
// Restore scroll position after updates
|
||||
useLayoutEffect(() => {
|
||||
if (tableContainerRef.current) {
|
||||
tableContainerRef.current.scrollLeft = scrollState.scrollLeft;
|
||||
tableContainerRef.current.scrollTop = scrollState.scrollTop;
|
||||
}
|
||||
}, [data, scrollState]);
|
||||
```
|
||||
|
||||
Result: Still did not maintain scroll position during updates, even with table state management.
|
||||
|
||||
### 11. Using CSS Sticky Positioning
|
||||
|
||||
```typescript
|
||||
return (
|
||||
<div className="relative max-h-[calc(100vh-300px)] overflow-auto">
|
||||
<Table>
|
||||
<TableHeader className="sticky top-0 z-10 bg-background">
|
||||
<TableRow>
|
||||
{table.getFlatHeaders().map((header) => (
|
||||
<TableHead
|
||||
key={header.id}
|
||||
style={{
|
||||
width: `${header.getSize()}px`,
|
||||
minWidth: `${header.getSize()}px`,
|
||||
position: 'sticky',
|
||||
top: 0,
|
||||
backgroundColor: 'inherit'
|
||||
}}
|
||||
>
|
||||
{/* Header content */}
|
||||
</TableHead>
|
||||
))}
|
||||
</TableRow>
|
||||
</TableHeader>
|
||||
<TableBody>
|
||||
{/* Table body content */}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</div>
|
||||
);
|
||||
```
|
||||
|
||||
Result: Still did not maintain scroll position during updates, even with native CSS scrolling.
|
||||
|
||||
### 12. Optimized Memoization with Object.is
|
||||
|
||||
```typescript
|
||||
// Memoize data structures to prevent unnecessary re-renders
|
||||
const memoizedData = useMemo(() => data, [data]);
|
||||
const memoizedValidationErrors = useMemo(() => validationErrors, [validationErrors]);
|
||||
const memoizedValidatingCells = useMemo(() => validatingCells, [validatingCells]);
|
||||
const memoizedItemNumbers = useMemo(() => itemNumbers, [itemNumbers]);
|
||||
|
||||
// Use Object.is for more efficient comparisons
|
||||
export default React.memo(ValidationTable, (prev, next) => {
|
||||
if (!Object.is(prev.data.length, next.data.length)) return false;
|
||||
|
||||
if (prev.validationErrors.size !== next.validationErrors.size) return false;
|
||||
for (const [key, value] of prev.validationErrors) {
|
||||
if (!next.validationErrors.has(key)) return false;
|
||||
if (!Object.is(value, next.validationErrors.get(key))) return false;
|
||||
}
|
||||
|
||||
// ... more optimized comparisons ...
|
||||
});
|
||||
```
|
||||
|
||||
Result: Caused the page to crash with "TypeError: undefined has no properties" in the MemoizedCell component.
|
||||
|
||||
### 13. Simplified Component Structure
|
||||
|
||||
```typescript
|
||||
const ValidationTable = <T extends string>({
|
||||
data,
|
||||
fields,
|
||||
rowSelection,
|
||||
setRowSelection,
|
||||
updateRow,
|
||||
validationErrors,
|
||||
// ... other props
|
||||
}) => {
|
||||
const tableContainerRef = useRef<HTMLDivElement>(null);
|
||||
const lastScrollPosition = useRef({ left: 0, top: 0 });
|
||||
|
||||
// Simple scroll position management
|
||||
const handleScroll = useCallback(() => {
|
||||
if (tableContainerRef.current) {
|
||||
lastScrollPosition.current = {
|
||||
left: tableContainerRef.current.scrollLeft,
|
||||
top: tableContainerRef.current.scrollTop
|
||||
};
|
||||
}
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
const container = tableContainerRef.current;
|
||||
if (container) {
|
||||
container.addEventListener('scroll', handleScroll);
|
||||
return () => container.removeEventListener('scroll', handleScroll);
|
||||
}
|
||||
}, [handleScroll]);
|
||||
|
||||
useLayoutEffect(() => {
|
||||
const container = tableContainerRef.current;
|
||||
if (container) {
|
||||
const { left, top } = lastScrollPosition.current;
|
||||
if (left > 0 || top > 0) {
|
||||
requestAnimationFrame(() => {
|
||||
if (container) {
|
||||
container.scrollLeft = left;
|
||||
container.scrollTop = top;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}, [data]);
|
||||
|
||||
return (
|
||||
<div ref={tableContainerRef} className="overflow-auto max-h-[calc(100vh-300px)]">
|
||||
<Table>
|
||||
{/* ... table content ... */}
|
||||
<TableBody>
|
||||
{table.getRowModel().rows.map((row) => (
|
||||
<TableRow
|
||||
key={row.id}
|
||||
className={cn(
|
||||
"hover:bg-muted/50",
|
||||
row.getIsSelected() ? "bg-muted/50" : "",
|
||||
validationErrors.get(data.indexOf(row.original)) ? "bg-red-50/40" : ""
|
||||
)}
|
||||
>
|
||||
{/* ... row content ... */}
|
||||
</TableRow>
|
||||
))}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
```
|
||||
|
||||
Result: Still did not maintain scroll position during updates. However, this implementation restored the subtle red highlight on rows with validation errors, which is a useful visual indicator that should be preserved in future attempts.
|
||||
|
||||
### 14. Portal-Based Scroll Container
|
||||
|
||||
```typescript
|
||||
// Create a stable container outside of React's control
|
||||
const createStableContainer = () => {
|
||||
const containerId = 'validation-table-container';
|
||||
let container = document.getElementById(containerId);
|
||||
|
||||
if (!container) {
|
||||
container = document.createElement('div');
|
||||
container.id = containerId;
|
||||
container.className = 'overflow-auto';
|
||||
container.style.maxHeight = 'calc(100vh - 300px)';
|
||||
document.body.appendChild(container);
|
||||
}
|
||||
|
||||
return container;
|
||||
};
|
||||
|
||||
const ValidationTable = <T extends string>({...props}) => {
|
||||
const [container] = useState(createStableContainer);
|
||||
const [mounted, setMounted] = useState(false);
|
||||
|
||||
useEffect(() => {
|
||||
setMounted(true);
|
||||
return () => {
|
||||
if (container && container.parentNode) {
|
||||
container.parentNode.removeChild(container);
|
||||
}
|
||||
};
|
||||
}, [container]);
|
||||
|
||||
// ... table configuration ...
|
||||
|
||||
return createPortal(content, container);
|
||||
};
|
||||
```
|
||||
|
||||
Result: The table contents failed to render at all. The portal-based approach to maintain scroll position by moving the scroll container outside of React's control was unsuccessful.
|
||||
|
||||
## Current Understanding
|
||||
|
||||
The scroll position issue appears to be complex and likely stems from multiple factors:
|
||||
|
||||
1. React's virtual DOM reconciliation may be replacing the scroll container element during updates
|
||||
2. The table uses complex memo patterns with custom equality checks that may not be working as expected
|
||||
3. The data structure may be changing in ways that cause complete re-renders
|
||||
4. The component hierarchy (with EnhancedValidationTable wrapper) may be affecting DOM stability
|
||||
|
||||
## Next Steps to Consider
|
||||
|
||||
At this point, we have tried multiple approaches without success:
|
||||
1. Various scroll position management techniques
|
||||
2. Memoization and optimization strategies
|
||||
3. Different component structures
|
||||
4. Portal-based rendering
|
||||
|
||||
Given that none of these approaches have fully resolved the issue, it may be worth:
|
||||
1. Investigating if there are any parent component updates forcing re-renders
|
||||
2. Profiling the application to identify the exact timing of scroll position resets
|
||||
3. Considering if the current table implementation could be simplified
|
||||
4. Exploring if the data update patterns could be optimized to reduce re-renders
|
||||
|
||||
## Conclusion
|
||||
|
||||
The scroll position issue has proven resistant to multiple solution attempts. Each approach has either failed to maintain scroll position, introduced new issues, or in some cases (like the portal-based approach) prevented the table from rendering entirely. A deeper investigation into the component lifecycle and data flow may be necessary to identify the root cause.
|
||||
@@ -1,5 +1,209 @@
|
||||
// ecosystem.config.js
|
||||
const path = require('path');
|
||||
const dotenv = require('dotenv');
|
||||
|
||||
// Load environment variables safely with error handling
|
||||
const loadEnvFile = (envPath) => {
|
||||
try {
|
||||
console.log('Loading env from:', envPath);
|
||||
const result = dotenv.config({ path: envPath });
|
||||
if (result.error) {
|
||||
console.warn(`Warning: .env file not found or invalid at ${envPath}:`, result.error.message);
|
||||
return {};
|
||||
}
|
||||
console.log('Env variables loaded from', envPath, ':', Object.keys(result.parsed || {}));
|
||||
return result.parsed || {};
|
||||
} catch (error) {
|
||||
console.warn(`Warning: Error loading .env file at ${envPath}:`, error.message);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
// Load environment variables for each server
|
||||
const authEnv = loadEnvFile(path.resolve(__dirname, 'dashboard/auth-server/.env'));
|
||||
const aircallEnv = loadEnvFile(path.resolve(__dirname, 'dashboard/aircall-server/.env'));
|
||||
const klaviyoEnv = loadEnvFile(path.resolve(__dirname, 'dashboard/klaviyo-server/.env'));
|
||||
const metaEnv = loadEnvFile(path.resolve(__dirname, 'dashboard/meta-server/.env'));
|
||||
const googleAnalyticsEnv = require('dotenv').config({
|
||||
path: path.resolve(__dirname, 'dashboard/google-server/.env')
|
||||
}).parsed || {};
|
||||
const typeformEnv = loadEnvFile(path.resolve(__dirname, 'dashboard/typeform-server/.env'));
|
||||
const inventoryEnv = loadEnvFile(path.resolve(__dirname, 'inventory/.env'));
|
||||
|
||||
// Common log settings for all apps
|
||||
const logSettings = {
|
||||
log_rotate: true,
|
||||
max_size: '10M',
|
||||
retain: '10',
|
||||
log_date_format: 'YYYY-MM-DD HH:mm:ss'
|
||||
};
|
||||
|
||||
// Common app settings
|
||||
const commonSettings = {
|
||||
instances: 1,
|
||||
exec_mode: 'fork',
|
||||
autorestart: true,
|
||||
watch: false,
|
||||
max_memory_restart: '1G',
|
||||
time: true,
|
||||
...logSettings,
|
||||
ignore_watch: [
|
||||
'node_modules',
|
||||
'logs',
|
||||
'.git',
|
||||
'*.log'
|
||||
],
|
||||
min_uptime: 5000,
|
||||
max_restarts: 5,
|
||||
restart_delay: 4000,
|
||||
listen_timeout: 50000,
|
||||
kill_timeout: 5000,
|
||||
node_args: '--max-old-space-size=1536'
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
apps: [
|
||||
{
|
||||
...commonSettings,
|
||||
name: 'auth-server',
|
||||
script: './dashboard/auth-server/index.js',
|
||||
env: {
|
||||
NODE_ENV: 'production',
|
||||
PORT: 3003,
|
||||
...authEnv
|
||||
},
|
||||
error_file: 'dashboard/auth-server/logs/pm2/err.log',
|
||||
out_file: 'dashboard/auth-server/logs/pm2/out.log',
|
||||
log_file: 'dashboard/auth-server/logs/pm2/combined.log',
|
||||
env_production: {
|
||||
NODE_ENV: 'production',
|
||||
PORT: 3003
|
||||
},
|
||||
env_development: {
|
||||
NODE_ENV: 'development',
|
||||
PORT: 3003
|
||||
}
|
||||
},
|
||||
{
|
||||
...commonSettings,
|
||||
name: 'aircall-server',
|
||||
script: './dashboard/aircall-server/server.js',
|
||||
env: {
|
||||
NODE_ENV: 'production',
|
||||
AIRCALL_PORT: 3002,
|
||||
...aircallEnv
|
||||
},
|
||||
error_file: 'dashboard/aircall-server/logs/pm2/err.log',
|
||||
out_file: 'dashboard/aircall-server/logs/pm2/out.log',
|
||||
log_file: 'dashboard/aircall-server/logs/pm2/combined.log',
|
||||
env_production: {
|
||||
NODE_ENV: 'production',
|
||||
AIRCALL_PORT: 3002
|
||||
}
|
||||
},
|
||||
{
|
||||
...commonSettings,
|
||||
name: 'klaviyo-server',
|
||||
script: './dashboard/klaviyo-server/server.js',
|
||||
env: {
|
||||
NODE_ENV: 'production',
|
||||
KLAVIYO_PORT: 3004,
|
||||
...klaviyoEnv
|
||||
},
|
||||
error_file: 'dashboard/klaviyo-server/logs/pm2/err.log',
|
||||
out_file: 'dashboard/klaviyo-server/logs/pm2/out.log',
|
||||
log_file: 'dashboard/klaviyo-server/logs/pm2/combined.log',
|
||||
env_production: {
|
||||
NODE_ENV: 'production',
|
||||
KLAVIYO_PORT: 3004
|
||||
}
|
||||
},
|
||||
{
|
||||
...commonSettings,
|
||||
name: 'meta-server',
|
||||
script: './dashboard/meta-server/server.js',
|
||||
env: {
|
||||
NODE_ENV: 'production',
|
||||
PORT: 3005,
|
||||
...metaEnv
|
||||
},
|
||||
error_file: 'dashboard/meta-server/logs/pm2/err.log',
|
||||
out_file: 'dashboard/meta-server/logs/pm2/out.log',
|
||||
log_file: 'dashboard/meta-server/logs/pm2/combined.log',
|
||||
env_production: {
|
||||
NODE_ENV: 'production',
|
||||
PORT: 3005
|
||||
}
|
||||
},
|
||||
{
|
||||
name: "gorgias-server",
|
||||
script: "./dashboard/gorgias-server/server.js",
|
||||
env: {
|
||||
NODE_ENV: "development",
|
||||
PORT: 3006
|
||||
},
|
||||
env_production: {
|
||||
NODE_ENV: "production",
|
||||
PORT: 3006
|
||||
},
|
||||
error_file: "dashboard/logs/gorgias-server-error.log",
|
||||
out_file: "dashboard/logs/gorgias-server-out.log",
|
||||
log_file: "dashboard/logs/gorgias-server-combined.log",
|
||||
time: true
|
||||
},
|
||||
{
|
||||
...commonSettings,
|
||||
name: 'google-server',
|
||||
script: path.resolve(__dirname, 'dashboard/google-server/server.js'),
|
||||
watch: false,
|
||||
env: {
|
||||
NODE_ENV: 'production',
|
||||
GOOGLE_ANALYTICS_PORT: 3007,
|
||||
...googleAnalyticsEnv
|
||||
},
|
||||
error_file: path.resolve(__dirname, 'dashboard/google-server/logs/pm2/err.log'),
|
||||
out_file: path.resolve(__dirname, 'dashboard/google-server/logs/pm2/out.log'),
|
||||
log_file: path.resolve(__dirname, 'dashboard/google-server/logs/pm2/combined.log'),
|
||||
env_production: {
|
||||
NODE_ENV: 'production',
|
||||
GOOGLE_ANALYTICS_PORT: 3007
|
||||
}
|
||||
},
|
||||
{
|
||||
...commonSettings,
|
||||
name: 'typeform-server',
|
||||
script: './dashboard/typeform-server/server.js',
|
||||
env: {
|
||||
NODE_ENV: 'production',
|
||||
TYPEFORM_PORT: 3008,
|
||||
...typeformEnv
|
||||
},
|
||||
error_file: 'dashboard/typeform-server/logs/pm2/err.log',
|
||||
out_file: 'dashboard/typeform-server/logs/pm2/out.log',
|
||||
log_file: 'dashboard/typeform-server/logs/pm2/combined.log',
|
||||
env_production: {
|
||||
NODE_ENV: 'production',
|
||||
TYPEFORM_PORT: 3008
|
||||
}
|
||||
},
|
||||
{
|
||||
...commonSettings,
|
||||
name: 'inventory-server',
|
||||
script: './inventory/src/server.js',
|
||||
env: {
|
||||
NODE_ENV: 'production',
|
||||
PORT: 3010,
|
||||
...inventoryEnv
|
||||
},
|
||||
error_file: 'inventory/logs/pm2/err.log',
|
||||
out_file: 'inventory/logs/pm2/out.log',
|
||||
log_file: 'inventory/logs/pm2/combined.log',
|
||||
env_production: {
|
||||
NODE_ENV: 'production',
|
||||
PORT: 3010,
|
||||
...inventoryEnv
|
||||
}
|
||||
},
|
||||
{
|
||||
...commonSettings,
|
||||
name: 'new-auth-server',
|
||||
@@ -7,16 +211,12 @@ module.exports = {
|
||||
env: {
|
||||
NODE_ENV: 'production',
|
||||
AUTH_PORT: 3011,
|
||||
...inventoryEnv,
|
||||
JWT_SECRET: process.env.JWT_SECRET
|
||||
},
|
||||
error_file: 'inventory-server/auth/logs/pm2/err.log',
|
||||
out_file: 'inventory-server/auth/logs/pm2/out.log',
|
||||
log_file: 'inventory-server/auth/logs/pm2/combined.log',
|
||||
env_production: {
|
||||
NODE_ENV: 'production',
|
||||
AUTH_PORT: 3011,
|
||||
JWT_SECRET: process.env.JWT_SECRET
|
||||
}
|
||||
log_file: 'inventory-server/auth/logs/pm2/combined.log'
|
||||
}
|
||||
]
|
||||
};
|
||||
};
|
||||
|
||||
103
inventory-server/auth/add-user.js
Normal file
103
inventory-server/auth/add-user.js
Normal file
@@ -0,0 +1,103 @@
|
||||
require('dotenv').config({ path: '../.env' });
|
||||
const bcrypt = require('bcrypt');
|
||||
const { Pool } = require('pg');
|
||||
const inquirer = require('inquirer');
|
||||
|
||||
// Log connection details for debugging (remove in production)
|
||||
console.log('Attempting to connect with:', {
|
||||
host: process.env.DB_HOST,
|
||||
user: process.env.DB_USER,
|
||||
database: process.env.DB_NAME,
|
||||
port: process.env.DB_PORT
|
||||
});
|
||||
|
||||
const pool = new Pool({
|
||||
host: process.env.DB_HOST,
|
||||
user: process.env.DB_USER,
|
||||
password: process.env.DB_PASSWORD,
|
||||
database: process.env.DB_NAME,
|
||||
port: process.env.DB_PORT,
|
||||
});
|
||||
|
||||
async function promptUser() {
|
||||
const questions = [
|
||||
{
|
||||
type: 'input',
|
||||
name: 'username',
|
||||
message: 'Enter username:',
|
||||
validate: (input) => {
|
||||
if (input.length < 3) {
|
||||
return 'Username must be at least 3 characters long';
|
||||
}
|
||||
return true;
|
||||
}
|
||||
},
|
||||
{
|
||||
type: 'password',
|
||||
name: 'password',
|
||||
message: 'Enter password:',
|
||||
mask: '*',
|
||||
validate: (input) => {
|
||||
if (input.length < 8) {
|
||||
return 'Password must be at least 8 characters long';
|
||||
}
|
||||
return true;
|
||||
}
|
||||
},
|
||||
{
|
||||
type: 'password',
|
||||
name: 'confirmPassword',
|
||||
message: 'Confirm password:',
|
||||
mask: '*',
|
||||
validate: (input, answers) => {
|
||||
if (input !== answers.password) {
|
||||
return 'Passwords do not match';
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
return inquirer.prompt(questions);
|
||||
}
|
||||
|
||||
async function addUser() {
|
||||
try {
|
||||
// Get user input
|
||||
const answers = await promptUser();
|
||||
const { username, password } = answers;
|
||||
|
||||
// Hash password
|
||||
const saltRounds = 10;
|
||||
const hashedPassword = await bcrypt.hash(password, saltRounds);
|
||||
|
||||
// Check if user already exists
|
||||
const checkResult = await pool.query(
|
||||
'SELECT id FROM users WHERE username = $1',
|
||||
[username]
|
||||
);
|
||||
|
||||
if (checkResult.rows.length > 0) {
|
||||
console.error('Error: Username already exists');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Insert new user
|
||||
const result = await pool.query(
|
||||
'INSERT INTO users (username, password) VALUES ($1, $2) RETURNING id',
|
||||
[username, hashedPassword]
|
||||
);
|
||||
|
||||
console.log(`User ${username} created successfully with id ${result.rows[0].id}`);
|
||||
} catch (error) {
|
||||
console.error('Error creating user:', error);
|
||||
console.error('Error details:', error.message);
|
||||
if (error.code) {
|
||||
console.error('Error code:', error.code);
|
||||
}
|
||||
} finally {
|
||||
await pool.end();
|
||||
}
|
||||
}
|
||||
|
||||
addUser();
|
||||
@@ -1,41 +0,0 @@
|
||||
const bcrypt = require('bcrypt');
|
||||
const mysql = require('mysql2/promise');
|
||||
const readline = require('readline').createInterface({
|
||||
input: process.stdin,
|
||||
output: process.stdout,
|
||||
});
|
||||
require('dotenv').config({ path: '../.env' });
|
||||
|
||||
const dbConfig = {
|
||||
host: process.env.DB_HOST,
|
||||
user: process.env.DB_USER,
|
||||
password: process.env.DB_PASSWORD,
|
||||
database: process.env.DB_NAME,
|
||||
};
|
||||
|
||||
async function addUser() {
|
||||
const username = await askQuestion('Enter username: ');
|
||||
const password = await askQuestion('Enter password: ');
|
||||
|
||||
const hashedPassword = await bcrypt.hash(password, 10);
|
||||
|
||||
const connection = await mysql.createConnection(dbConfig);
|
||||
|
||||
try {
|
||||
await connection.query('INSERT INTO users (username, password) VALUES (?, ?)', [username, hashedPassword]);
|
||||
console.log(`User ${username} added successfully.`);
|
||||
} catch (error) {
|
||||
console.error('Error adding user:', error);
|
||||
} finally {
|
||||
connection.end();
|
||||
readline.close();
|
||||
}
|
||||
}
|
||||
|
||||
function askQuestion(query) {
|
||||
return new Promise(resolve => readline.question(query, ans => {
|
||||
resolve(ans);
|
||||
}));
|
||||
}
|
||||
|
||||
addUser();
|
||||
880
inventory-server/auth/package-lock.json
generated
880
inventory-server/auth/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,21 +1,19 @@
|
||||
{
|
||||
"name": "auth-server",
|
||||
"name": "inventory-auth-server",
|
||||
"version": "1.0.0",
|
||||
"description": "Authentication server for inventory management",
|
||||
"description": "Authentication server for inventory management system",
|
||||
"main": "server.js",
|
||||
"scripts": {
|
||||
"start": "node server.js",
|
||||
"dev": "nodemon server.js",
|
||||
"add_user": "node add_user.js"
|
||||
"start": "node server.js"
|
||||
},
|
||||
"dependencies": {
|
||||
"bcrypt": "^5.1.1",
|
||||
"cors": "^2.8.5",
|
||||
"dotenv": "^16.4.5",
|
||||
"dotenv": "^16.4.7",
|
||||
"express": "^4.18.2",
|
||||
"jsonwebtoken": "^9.0.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"nodemon": "^3.1.0"
|
||||
"inquirer": "^8.2.6",
|
||||
"jsonwebtoken": "^9.0.2",
|
||||
"morgan": "^1.10.0",
|
||||
"pg": "^8.11.3"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
128
inventory-server/auth/permissions.js
Normal file
128
inventory-server/auth/permissions.js
Normal file
@@ -0,0 +1,128 @@
|
||||
// Get pool from global or create a new one if not available
|
||||
let pool;
|
||||
if (typeof global.pool !== 'undefined') {
|
||||
pool = global.pool;
|
||||
} else {
|
||||
// If global pool is not available, create a new connection
|
||||
const { Pool } = require('pg');
|
||||
pool = new Pool({
|
||||
host: process.env.DB_HOST,
|
||||
user: process.env.DB_USER,
|
||||
password: process.env.DB_PASSWORD,
|
||||
database: process.env.DB_NAME,
|
||||
port: process.env.DB_PORT,
|
||||
});
|
||||
console.log('Created new database pool in permissions.js');
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a user has a specific permission
|
||||
* @param {number} userId - The user ID to check
|
||||
* @param {string} permissionCode - The permission code to check
|
||||
* @returns {Promise<boolean>} - Whether the user has the permission
|
||||
*/
|
||||
async function checkPermission(userId, permissionCode) {
|
||||
try {
|
||||
// First check if the user is an admin
|
||||
const adminResult = await pool.query(
|
||||
'SELECT is_admin FROM users WHERE id = $1',
|
||||
[userId]
|
||||
);
|
||||
|
||||
// If user is admin, automatically grant permission
|
||||
if (adminResult.rows.length > 0 && adminResult.rows[0].is_admin) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Otherwise check for specific permission
|
||||
const result = await pool.query(
|
||||
`SELECT COUNT(*) AS has_permission
|
||||
FROM user_permissions up
|
||||
JOIN permissions p ON up.permission_id = p.id
|
||||
WHERE up.user_id = $1 AND p.code = $2`,
|
||||
[userId, permissionCode]
|
||||
);
|
||||
|
||||
return result.rows[0].has_permission > 0;
|
||||
} catch (error) {
|
||||
console.error('Error checking permission:', error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Middleware to require a specific permission
|
||||
* @param {string} permissionCode - The permission code required
|
||||
* @returns {Function} - Express middleware function
|
||||
*/
|
||||
function requirePermission(permissionCode) {
|
||||
return async (req, res, next) => {
|
||||
try {
|
||||
// Check if user is authenticated
|
||||
if (!req.user || !req.user.id) {
|
||||
return res.status(401).json({ error: 'Authentication required' });
|
||||
}
|
||||
|
||||
const hasPermission = await checkPermission(req.user.id, permissionCode);
|
||||
|
||||
if (!hasPermission) {
|
||||
return res.status(403).json({
|
||||
error: 'Insufficient permissions',
|
||||
requiredPermission: permissionCode
|
||||
});
|
||||
}
|
||||
|
||||
next();
|
||||
} catch (error) {
|
||||
console.error('Permission middleware error:', error);
|
||||
res.status(500).json({ error: 'Server error checking permissions' });
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all permissions for a user
|
||||
* @param {number} userId - The user ID
|
||||
* @returns {Promise<string[]>} - Array of permission codes
|
||||
*/
|
||||
async function getUserPermissions(userId) {
|
||||
try {
|
||||
// Check if user is admin
|
||||
const adminResult = await pool.query(
|
||||
'SELECT is_admin FROM users WHERE id = $1',
|
||||
[userId]
|
||||
);
|
||||
|
||||
if (adminResult.rows.length === 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const isAdmin = adminResult.rows[0].is_admin;
|
||||
|
||||
if (isAdmin) {
|
||||
// Admin gets all permissions
|
||||
const allPermissions = await pool.query('SELECT code FROM permissions');
|
||||
return allPermissions.rows.map(p => p.code);
|
||||
} else {
|
||||
// Get assigned permissions
|
||||
const permissions = await pool.query(
|
||||
`SELECT p.code
|
||||
FROM permissions p
|
||||
JOIN user_permissions up ON p.id = up.permission_id
|
||||
WHERE up.user_id = $1`,
|
||||
[userId]
|
||||
);
|
||||
|
||||
return permissions.rows.map(p => p.code);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error getting user permissions:', error);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
checkPermission,
|
||||
requirePermission,
|
||||
getUserPermissions
|
||||
};
|
||||
513
inventory-server/auth/routes.js
Normal file
513
inventory-server/auth/routes.js
Normal file
@@ -0,0 +1,513 @@
|
||||
const express = require('express');
|
||||
const router = express.Router();
|
||||
const bcrypt = require('bcrypt');
|
||||
const jwt = require('jsonwebtoken');
|
||||
const { requirePermission, getUserPermissions } = require('./permissions');
|
||||
|
||||
// Get pool from global or create a new one if not available
|
||||
let pool;
|
||||
if (typeof global.pool !== 'undefined') {
|
||||
pool = global.pool;
|
||||
} else {
|
||||
// If global pool is not available, create a new connection
|
||||
const { Pool } = require('pg');
|
||||
pool = new Pool({
|
||||
host: process.env.DB_HOST,
|
||||
user: process.env.DB_USER,
|
||||
password: process.env.DB_PASSWORD,
|
||||
database: process.env.DB_NAME,
|
||||
port: process.env.DB_PORT,
|
||||
});
|
||||
console.log('Created new database pool in routes.js');
|
||||
}
|
||||
|
||||
// Authentication middleware
|
||||
const authenticate = async (req, res, next) => {
|
||||
try {
|
||||
const authHeader = req.headers.authorization;
|
||||
if (!authHeader || !authHeader.startsWith('Bearer ')) {
|
||||
return res.status(401).json({ error: 'Authentication required' });
|
||||
}
|
||||
|
||||
const token = authHeader.split(' ')[1];
|
||||
const decoded = jwt.verify(token, process.env.JWT_SECRET);
|
||||
|
||||
// Get user from database
|
||||
const result = await pool.query(
|
||||
'SELECT id, username, is_admin FROM users WHERE id = $1',
|
||||
[decoded.userId]
|
||||
);
|
||||
|
||||
if (result.rows.length === 0) {
|
||||
return res.status(401).json({ error: 'User not found' });
|
||||
}
|
||||
|
||||
// Attach user to request
|
||||
req.user = result.rows[0];
|
||||
next();
|
||||
} catch (error) {
|
||||
console.error('Authentication error:', error);
|
||||
res.status(401).json({ error: 'Invalid token' });
|
||||
}
|
||||
};
|
||||
|
||||
// Login route
|
||||
router.post('/login', async (req, res) => {
|
||||
try {
|
||||
const { username, password } = req.body;
|
||||
|
||||
// Get user from database
|
||||
const result = await pool.query(
|
||||
'SELECT id, username, password, is_admin, is_active FROM users WHERE username = $1',
|
||||
[username]
|
||||
);
|
||||
|
||||
if (result.rows.length === 0) {
|
||||
return res.status(401).json({ error: 'Invalid username or password' });
|
||||
}
|
||||
|
||||
const user = result.rows[0];
|
||||
|
||||
// Check if user is active
|
||||
if (!user.is_active) {
|
||||
return res.status(403).json({ error: 'Account is inactive' });
|
||||
}
|
||||
|
||||
// Verify password
|
||||
const validPassword = await bcrypt.compare(password, user.password);
|
||||
if (!validPassword) {
|
||||
return res.status(401).json({ error: 'Invalid username or password' });
|
||||
}
|
||||
|
||||
// Update last login
|
||||
await pool.query(
|
||||
'UPDATE users SET last_login = CURRENT_TIMESTAMP WHERE id = $1',
|
||||
[user.id]
|
||||
);
|
||||
|
||||
// Generate JWT
|
||||
const token = jwt.sign(
|
||||
{ userId: user.id, username: user.username },
|
||||
process.env.JWT_SECRET,
|
||||
{ expiresIn: '8h' }
|
||||
);
|
||||
|
||||
// Get user permissions
|
||||
const permissions = await getUserPermissions(user.id);
|
||||
|
||||
res.json({
|
||||
token,
|
||||
user: {
|
||||
id: user.id,
|
||||
username: user.username,
|
||||
is_admin: user.is_admin,
|
||||
permissions
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Login error:', error);
|
||||
res.status(500).json({ error: 'Server error' });
|
||||
}
|
||||
});
|
||||
|
||||
// Get current user
|
||||
router.get('/me', authenticate, async (req, res) => {
|
||||
try {
|
||||
// Get user permissions
|
||||
const permissions = await getUserPermissions(req.user.id);
|
||||
|
||||
res.json({
|
||||
id: req.user.id,
|
||||
username: req.user.username,
|
||||
is_admin: req.user.is_admin,
|
||||
permissions
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Error getting current user:', error);
|
||||
res.status(500).json({ error: 'Server error' });
|
||||
}
|
||||
});
|
||||
|
||||
// Get all users
|
||||
router.get('/users', authenticate, requirePermission('view:users'), async (req, res) => {
|
||||
try {
|
||||
const result = await pool.query(`
|
||||
SELECT id, username, email, is_admin, is_active, created_at, last_login
|
||||
FROM users
|
||||
ORDER BY username
|
||||
`);
|
||||
|
||||
res.json(result.rows);
|
||||
} catch (error) {
|
||||
console.error('Error getting users:', error);
|
||||
res.status(500).json({ error: 'Server error' });
|
||||
}
|
||||
});
|
||||
|
||||
// Get user with permissions
|
||||
router.get('/users/:id', authenticate, requirePermission('view:users'), async (req, res) => {
|
||||
try {
|
||||
const userId = req.params.id;
|
||||
|
||||
// Get user details
|
||||
const userResult = await pool.query(`
|
||||
SELECT id, username, email, is_admin, is_active, created_at, last_login
|
||||
FROM users
|
||||
WHERE id = $1
|
||||
`, [userId]);
|
||||
|
||||
if (userResult.rows.length === 0) {
|
||||
return res.status(404).json({ error: 'User not found' });
|
||||
}
|
||||
|
||||
// Get user permissions
|
||||
const permissionsResult = await pool.query(`
|
||||
SELECT p.id, p.name, p.code, p.category, p.description
|
||||
FROM permissions p
|
||||
JOIN user_permissions up ON p.id = up.permission_id
|
||||
WHERE up.user_id = $1
|
||||
ORDER BY p.category, p.name
|
||||
`, [userId]);
|
||||
|
||||
// Combine user and permissions
|
||||
const user = {
|
||||
...userResult.rows[0],
|
||||
permissions: permissionsResult.rows
|
||||
};
|
||||
|
||||
res.json(user);
|
||||
} catch (error) {
|
||||
console.error('Error getting user:', error);
|
||||
res.status(500).json({ error: 'Server error' });
|
||||
}
|
||||
});
|
||||
|
||||
// Create new user
|
||||
router.post('/users', authenticate, requirePermission('create:users'), async (req, res) => {
|
||||
const client = await pool.connect();
|
||||
|
||||
try {
|
||||
const { username, email, password, is_admin, is_active, permissions } = req.body;
|
||||
|
||||
console.log("Create user request:", {
|
||||
username,
|
||||
email,
|
||||
is_admin,
|
||||
is_active,
|
||||
permissions: permissions || []
|
||||
});
|
||||
|
||||
// Validate required fields
|
||||
if (!username || !password) {
|
||||
return res.status(400).json({ error: 'Username and password are required' });
|
||||
}
|
||||
|
||||
// Check if username is taken
|
||||
const existingUser = await client.query(
|
||||
'SELECT id FROM users WHERE username = $1',
|
||||
[username]
|
||||
);
|
||||
|
||||
if (existingUser.rows.length > 0) {
|
||||
return res.status(400).json({ error: 'Username already exists' });
|
||||
}
|
||||
|
||||
// Start transaction
|
||||
await client.query('BEGIN');
|
||||
|
||||
// Hash password
|
||||
const saltRounds = 10;
|
||||
const hashedPassword = await bcrypt.hash(password, saltRounds);
|
||||
|
||||
// Insert new user
|
||||
const userResult = await client.query(`
|
||||
INSERT INTO users (username, email, password, is_admin, is_active, created_at)
|
||||
VALUES ($1, $2, $3, $4, $5, CURRENT_TIMESTAMP)
|
||||
RETURNING id
|
||||
`, [username, email || null, hashedPassword, !!is_admin, is_active !== false]);
|
||||
|
||||
const userId = userResult.rows[0].id;
|
||||
|
||||
// Assign permissions if provided and not admin
|
||||
if (!is_admin && Array.isArray(permissions) && permissions.length > 0) {
|
||||
console.log("Adding permissions for new user:", userId);
|
||||
console.log("Permissions received:", permissions);
|
||||
|
||||
// Check permission format
|
||||
const permissionIds = permissions.map(p => {
|
||||
if (typeof p === 'object' && p.id) {
|
||||
console.log("Permission is an object with ID:", p.id);
|
||||
return parseInt(p.id, 10);
|
||||
} else if (typeof p === 'number') {
|
||||
console.log("Permission is a number:", p);
|
||||
return p;
|
||||
} else if (typeof p === 'string' && !isNaN(parseInt(p, 10))) {
|
||||
console.log("Permission is a string that can be parsed as a number:", p);
|
||||
return parseInt(p, 10);
|
||||
} else {
|
||||
console.log("Unknown permission format:", typeof p, p);
|
||||
// If it's a permission code, we need to look up the ID
|
||||
return null;
|
||||
}
|
||||
}).filter(id => id !== null);
|
||||
|
||||
console.log("Filtered permission IDs:", permissionIds);
|
||||
|
||||
if (permissionIds.length > 0) {
|
||||
const permissionValues = permissionIds
|
||||
.map(permId => `(${userId}, ${permId})`)
|
||||
.join(',');
|
||||
|
||||
console.log("Inserting permission values:", permissionValues);
|
||||
|
||||
try {
|
||||
await client.query(`
|
||||
INSERT INTO user_permissions (user_id, permission_id)
|
||||
VALUES ${permissionValues}
|
||||
ON CONFLICT DO NOTHING
|
||||
`);
|
||||
console.log("Successfully inserted permissions for new user:", userId);
|
||||
} catch (err) {
|
||||
console.error("Error inserting permissions for new user:", err);
|
||||
throw err;
|
||||
}
|
||||
} else {
|
||||
console.log("No valid permission IDs found to insert for new user");
|
||||
}
|
||||
} else {
|
||||
console.log("Not adding permissions: is_admin =", is_admin, "permissions array:", Array.isArray(permissions), "length:", permissions ? permissions.length : 0);
|
||||
}
|
||||
|
||||
await client.query('COMMIT');
|
||||
|
||||
res.status(201).json({
|
||||
id: userId,
|
||||
message: 'User created successfully'
|
||||
});
|
||||
} catch (error) {
|
||||
await client.query('ROLLBACK');
|
||||
console.error('Error creating user:', error);
|
||||
res.status(500).json({ error: 'Server error' });
|
||||
} finally {
|
||||
client.release();
|
||||
}
|
||||
});
|
||||
|
||||
// Update user
|
||||
router.put('/users/:id', authenticate, requirePermission('edit:users'), async (req, res) => {
|
||||
const client = await pool.connect();
|
||||
|
||||
try {
|
||||
const userId = req.params.id;
|
||||
const { username, email, password, is_admin, is_active, permissions } = req.body;
|
||||
|
||||
console.log("Update user request:", {
|
||||
userId,
|
||||
username,
|
||||
email,
|
||||
is_admin,
|
||||
is_active,
|
||||
permissions: permissions || []
|
||||
});
|
||||
|
||||
// Check if user exists
|
||||
const userExists = await client.query(
|
||||
'SELECT id FROM users WHERE id = $1',
|
||||
[userId]
|
||||
);
|
||||
|
||||
if (userExists.rows.length === 0) {
|
||||
return res.status(404).json({ error: 'User not found' });
|
||||
}
|
||||
|
||||
// Start transaction
|
||||
await client.query('BEGIN');
|
||||
|
||||
// Build update fields
|
||||
const updateFields = [];
|
||||
const updateValues = [userId]; // First parameter is the user ID
|
||||
let paramIndex = 2;
|
||||
|
||||
if (username !== undefined) {
|
||||
updateFields.push(`username = $${paramIndex++}`);
|
||||
updateValues.push(username);
|
||||
}
|
||||
|
||||
if (email !== undefined) {
|
||||
updateFields.push(`email = $${paramIndex++}`);
|
||||
updateValues.push(email || null);
|
||||
}
|
||||
|
||||
if (is_admin !== undefined) {
|
||||
updateFields.push(`is_admin = $${paramIndex++}`);
|
||||
updateValues.push(!!is_admin);
|
||||
}
|
||||
|
||||
if (is_active !== undefined) {
|
||||
updateFields.push(`is_active = $${paramIndex++}`);
|
||||
updateValues.push(!!is_active);
|
||||
}
|
||||
|
||||
// Update password if provided
|
||||
if (password) {
|
||||
const saltRounds = 10;
|
||||
const hashedPassword = await bcrypt.hash(password, saltRounds);
|
||||
updateFields.push(`password = $${paramIndex++}`);
|
||||
updateValues.push(hashedPassword);
|
||||
}
|
||||
|
||||
// Update user if there are fields to update
|
||||
if (updateFields.length > 0) {
|
||||
updateFields.push(`updated_at = CURRENT_TIMESTAMP`);
|
||||
|
||||
await client.query(`
|
||||
UPDATE users
|
||||
SET ${updateFields.join(', ')}
|
||||
WHERE id = $1
|
||||
`, updateValues);
|
||||
}
|
||||
|
||||
// Update permissions if provided
|
||||
if (Array.isArray(permissions)) {
|
||||
console.log("Updating permissions for user:", userId);
|
||||
console.log("Permissions received:", permissions);
|
||||
|
||||
// First remove existing permissions
|
||||
await client.query(
|
||||
'DELETE FROM user_permissions WHERE user_id = $1',
|
||||
[userId]
|
||||
);
|
||||
console.log("Deleted existing permissions for user:", userId);
|
||||
|
||||
// Add new permissions if any and not admin
|
||||
const newIsAdmin = is_admin !== undefined ? is_admin : (await client.query('SELECT is_admin FROM users WHERE id = $1', [userId])).rows[0].is_admin;
|
||||
|
||||
console.log("User is admin:", newIsAdmin);
|
||||
|
||||
if (!newIsAdmin && permissions.length > 0) {
|
||||
console.log("Adding permissions:", permissions);
|
||||
|
||||
// Check permission format
|
||||
const permissionIds = permissions.map(p => {
|
||||
if (typeof p === 'object' && p.id) {
|
||||
console.log("Permission is an object with ID:", p.id);
|
||||
return parseInt(p.id, 10);
|
||||
} else if (typeof p === 'number') {
|
||||
console.log("Permission is a number:", p);
|
||||
return p;
|
||||
} else if (typeof p === 'string' && !isNaN(parseInt(p, 10))) {
|
||||
console.log("Permission is a string that can be parsed as a number:", p);
|
||||
return parseInt(p, 10);
|
||||
} else {
|
||||
console.log("Unknown permission format:", typeof p, p);
|
||||
// If it's a permission code, we need to look up the ID
|
||||
return null;
|
||||
}
|
||||
}).filter(id => id !== null);
|
||||
|
||||
console.log("Filtered permission IDs:", permissionIds);
|
||||
|
||||
if (permissionIds.length > 0) {
|
||||
const permissionValues = permissionIds
|
||||
.map(permId => `(${userId}, ${permId})`)
|
||||
.join(',');
|
||||
|
||||
console.log("Inserting permission values:", permissionValues);
|
||||
|
||||
try {
|
||||
await client.query(`
|
||||
INSERT INTO user_permissions (user_id, permission_id)
|
||||
VALUES ${permissionValues}
|
||||
ON CONFLICT DO NOTHING
|
||||
`);
|
||||
console.log("Successfully inserted permissions for user:", userId);
|
||||
} catch (err) {
|
||||
console.error("Error inserting permissions:", err);
|
||||
throw err;
|
||||
}
|
||||
} else {
|
||||
console.log("No valid permission IDs found to insert");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
await client.query('COMMIT');
|
||||
|
||||
res.json({ message: 'User updated successfully' });
|
||||
} catch (error) {
|
||||
await client.query('ROLLBACK');
|
||||
console.error('Error updating user:', error);
|
||||
res.status(500).json({ error: 'Server error' });
|
||||
} finally {
|
||||
client.release();
|
||||
}
|
||||
});
|
||||
|
||||
// Delete user
|
||||
router.delete('/users/:id', authenticate, requirePermission('delete:users'), async (req, res) => {
|
||||
try {
|
||||
const userId = req.params.id;
|
||||
|
||||
// Check that user is not deleting themselves
|
||||
if (req.user.id === parseInt(userId, 10)) {
|
||||
return res.status(400).json({ error: 'Cannot delete your own account' });
|
||||
}
|
||||
|
||||
// Delete user (this will cascade to user_permissions due to FK constraints)
|
||||
const result = await pool.query(
|
||||
'DELETE FROM users WHERE id = $1 RETURNING id',
|
||||
[userId]
|
||||
);
|
||||
|
||||
if (result.rows.length === 0) {
|
||||
return res.status(404).json({ error: 'User not found' });
|
||||
}
|
||||
|
||||
res.json({ message: 'User deleted successfully' });
|
||||
} catch (error) {
|
||||
console.error('Error deleting user:', error);
|
||||
res.status(500).json({ error: 'Server error' });
|
||||
}
|
||||
});
|
||||
|
||||
// Get all permissions grouped by category
|
||||
router.get('/permissions/categories', authenticate, requirePermission('view:users'), async (req, res) => {
|
||||
try {
|
||||
const result = await pool.query(`
|
||||
SELECT category, json_agg(
|
||||
json_build_object(
|
||||
'id', id,
|
||||
'name', name,
|
||||
'code', code,
|
||||
'description', description
|
||||
) ORDER BY name
|
||||
) as permissions
|
||||
FROM permissions
|
||||
GROUP BY category
|
||||
ORDER BY category
|
||||
`);
|
||||
|
||||
res.json(result.rows);
|
||||
} catch (error) {
|
||||
console.error('Error getting permissions:', error);
|
||||
res.status(500).json({ error: 'Server error' });
|
||||
}
|
||||
});
|
||||
|
||||
// Get all permissions
|
||||
router.get('/permissions', authenticate, requirePermission('view:users'), async (req, res) => {
|
||||
try {
|
||||
const result = await pool.query(`
|
||||
SELECT *
|
||||
FROM permissions
|
||||
ORDER BY category, name
|
||||
`);
|
||||
|
||||
res.json(result.rows);
|
||||
} catch (error) {
|
||||
console.error('Error getting permissions:', error);
|
||||
res.status(500).json({ error: 'Server error' });
|
||||
}
|
||||
});
|
||||
|
||||
module.exports = router;
|
||||
@@ -1,6 +1,89 @@
|
||||
CREATE TABLE `users` (
|
||||
`id` INT AUTO_INCREMENT PRIMARY KEY,
|
||||
`username` VARCHAR(255) NOT NULL UNIQUE,
|
||||
`password` VARCHAR(255) NOT NULL,
|
||||
`created_at` TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
CREATE TABLE users (
|
||||
id SERIAL PRIMARY KEY,
|
||||
username VARCHAR(255) NOT NULL UNIQUE,
|
||||
password VARCHAR(255) NOT NULL,
|
||||
email VARCHAR UNIQUE,
|
||||
is_admin BOOLEAN DEFAULT FALSE,
|
||||
is_active BOOLEAN DEFAULT TRUE,
|
||||
last_login TIMESTAMP WITH TIME ZONE,
|
||||
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- Function to update the updated_at timestamp
|
||||
CREATE OR REPLACE FUNCTION update_updated_at_column()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
-- Sequence and defined type for users table if not exists
|
||||
CREATE SEQUENCE IF NOT EXISTS users_id_seq;
|
||||
|
||||
-- Create permissions table
|
||||
CREATE TABLE IF NOT EXISTS "public"."permissions" (
|
||||
"id" SERIAL PRIMARY KEY,
|
||||
"name" varchar NOT NULL UNIQUE,
|
||||
"code" varchar NOT NULL UNIQUE,
|
||||
"description" text,
|
||||
"category" varchar NOT NULL,
|
||||
"created_at" timestamp with time zone DEFAULT CURRENT_TIMESTAMP,
|
||||
"updated_at" timestamp with time zone DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- Create user_permissions junction table
|
||||
CREATE TABLE IF NOT EXISTS "public"."user_permissions" (
|
||||
"user_id" int4 NOT NULL REFERENCES "public"."users"("id") ON DELETE CASCADE,
|
||||
"permission_id" int4 NOT NULL REFERENCES "public"."permissions"("id") ON DELETE CASCADE,
|
||||
"created_at" timestamp with time zone DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY ("user_id", "permission_id")
|
||||
);
|
||||
|
||||
-- Add triggers for updated_at on users and permissions
|
||||
DROP TRIGGER IF EXISTS update_users_updated_at ON users;
|
||||
CREATE TRIGGER update_users_updated_at
|
||||
BEFORE UPDATE ON users
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
DROP TRIGGER IF EXISTS update_permissions_updated_at ON permissions;
|
||||
CREATE TRIGGER update_permissions_updated_at
|
||||
BEFORE UPDATE ON permissions
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
-- Insert default permissions by page - only the ones used in application
|
||||
INSERT INTO permissions (name, code, description, category) VALUES
|
||||
('Dashboard Access', 'access:dashboard', 'Can access the Dashboard page', 'Pages'),
|
||||
('Products Access', 'access:products', 'Can access the Products page', 'Pages'),
|
||||
('Categories Access', 'access:categories', 'Can access the Categories page', 'Pages'),
|
||||
('Vendors Access', 'access:vendors', 'Can access the Vendors page', 'Pages'),
|
||||
('Analytics Access', 'access:analytics', 'Can access the Analytics page', 'Pages'),
|
||||
('Forecasting Access', 'access:forecasting', 'Can access the Forecasting page', 'Pages'),
|
||||
('Purchase Orders Access', 'access:purchase_orders', 'Can access the Purchase Orders page', 'Pages'),
|
||||
('Import Access', 'access:import', 'Can access the Import page', 'Pages'),
|
||||
('Settings Access', 'access:settings', 'Can access the Settings page', 'Pages'),
|
||||
('AI Validation Debug Access', 'access:ai_validation_debug', 'Can access the AI Validation Debug page', 'Pages')
|
||||
ON CONFLICT (code) DO NOTHING;
|
||||
|
||||
-- Settings section permissions
|
||||
INSERT INTO permissions (name, code, description, category) VALUES
|
||||
('Data Management', 'settings:data_management', 'Access to the Data Management settings section', 'Settings'),
|
||||
('Stock Management', 'settings:stock_management', 'Access to the Stock Management settings section', 'Settings'),
|
||||
('Performance Metrics', 'settings:performance_metrics', 'Access to the Performance Metrics settings section', 'Settings'),
|
||||
('Calculation Settings', 'settings:calculation_settings', 'Access to the Calculation Settings section', 'Settings'),
|
||||
('Template Management', 'settings:templates', 'Access to the Template Management settings section', 'Settings'),
|
||||
('User Management', 'settings:user_management', 'Access to the User Management settings section', 'Settings')
|
||||
ON CONFLICT (code) DO NOTHING;
|
||||
|
||||
-- Set any existing users as admin
|
||||
UPDATE users SET is_admin = TRUE WHERE is_admin IS NULL;
|
||||
|
||||
-- Grant all permissions to admin users
|
||||
INSERT INTO user_permissions (user_id, permission_id)
|
||||
SELECT u.id, p.id
|
||||
FROM users u, permissions p
|
||||
WHERE u.is_admin = TRUE
|
||||
ON CONFLICT DO NOTHING;
|
||||
@@ -1,135 +1,164 @@
|
||||
require('dotenv').config({ path: '../.env' });
|
||||
const express = require('express');
|
||||
const cors = require('cors');
|
||||
const bcrypt = require('bcrypt');
|
||||
const jwt = require('jsonwebtoken');
|
||||
const cors = require('cors');
|
||||
const mysql = require('mysql2/promise');
|
||||
require('dotenv').config({ path: '../.env' });
|
||||
const { Pool } = require('pg');
|
||||
const morgan = require('morgan');
|
||||
const authRoutes = require('./routes');
|
||||
|
||||
// Log startup configuration
|
||||
console.log('Starting auth server with config:', {
|
||||
host: process.env.DB_HOST,
|
||||
user: process.env.DB_USER,
|
||||
database: process.env.DB_NAME,
|
||||
port: process.env.DB_PORT,
|
||||
auth_port: process.env.AUTH_PORT
|
||||
});
|
||||
|
||||
const app = express();
|
||||
const PORT = process.env.AUTH_PORT || 3011;
|
||||
const port = process.env.AUTH_PORT || 3011;
|
||||
|
||||
// Database configuration
|
||||
const dbConfig = {
|
||||
const pool = new Pool({
|
||||
host: process.env.DB_HOST,
|
||||
user: process.env.DB_USER,
|
||||
password: process.env.DB_PASSWORD,
|
||||
database: process.env.DB_NAME,
|
||||
};
|
||||
port: process.env.DB_PORT,
|
||||
});
|
||||
|
||||
// Create a connection pool
|
||||
const pool = mysql.createPool(dbConfig);
|
||||
// Make pool available globally
|
||||
global.pool = pool;
|
||||
|
||||
app.use(cors({
|
||||
origin: [
|
||||
'https://inventory.kent.pw',
|
||||
'http://localhost:5173',
|
||||
'http://127.0.0.1:5173',
|
||||
/^http:\/\/192\.168\.\d+\.\d+(:\d+)?$/,
|
||||
/^http:\/\/10\.\d+\.\d+\.\d+(:\d+)?$/
|
||||
],
|
||||
methods: ['GET', 'POST', 'OPTIONS'],
|
||||
allowedHeaders: ['Content-Type', 'Authorization', 'X-Requested-With'],
|
||||
credentials: true,
|
||||
exposedHeaders: ['set-cookie']
|
||||
}));
|
||||
// Middleware
|
||||
app.use(express.json());
|
||||
|
||||
// Debug middleware to log request details
|
||||
app.use((req, res, next) => {
|
||||
console.log('Request details:', {
|
||||
method: req.method,
|
||||
url: req.url,
|
||||
origin: req.get('Origin'),
|
||||
headers: req.headers,
|
||||
body: req.body,
|
||||
});
|
||||
next();
|
||||
});
|
||||
|
||||
// Registration endpoint
|
||||
app.post('/register', async (req, res) => {
|
||||
try {
|
||||
const { username, password } = req.body;
|
||||
const hashedPassword = await bcrypt.hash(password, 10);
|
||||
|
||||
const connection = await pool.getConnection();
|
||||
await connection.query('INSERT INTO users (username, password) VALUES (?, ?)', [username, hashedPassword]);
|
||||
connection.release();
|
||||
|
||||
res.status(201).json({ message: 'User registered successfully' });
|
||||
} catch (error) {
|
||||
console.error('Registration error:', error);
|
||||
res.status(500).json({ error: 'Registration failed' });
|
||||
}
|
||||
});
|
||||
app.use(morgan('combined'));
|
||||
app.use(cors({
|
||||
origin: ['http://localhost:5175', 'http://localhost:5174', 'https://inventory.kent.pw'],
|
||||
credentials: true
|
||||
}));
|
||||
|
||||
// Login endpoint
|
||||
app.post('/login', async (req, res) => {
|
||||
const { username, password } = req.body;
|
||||
|
||||
try {
|
||||
const { username, password } = req.body;
|
||||
console.log(`Login attempt for user: ${username}`);
|
||||
|
||||
const connection = await pool.getConnection();
|
||||
const [rows] = await connection.query(
|
||||
'SELECT * FROM users WHERE username = ?',
|
||||
[username],
|
||||
// Get user from database
|
||||
const result = await pool.query(
|
||||
'SELECT id, username, password, is_admin, is_active FROM users WHERE username = $1',
|
||||
[username]
|
||||
);
|
||||
connection.release();
|
||||
|
||||
if (rows.length === 1) {
|
||||
const user = rows[0];
|
||||
const passwordMatch = await bcrypt.compare(password, user.password);
|
||||
const user = result.rows[0];
|
||||
|
||||
if (passwordMatch) {
|
||||
console.log(`User ${username} authenticated successfully`);
|
||||
const token = jwt.sign(
|
||||
{ username: user.username },
|
||||
process.env.JWT_SECRET,
|
||||
{ expiresIn: '1h' },
|
||||
);
|
||||
res.json({ token });
|
||||
} else {
|
||||
console.error(`Invalid password for user: ${username}`);
|
||||
res.status(401).json({ error: 'Invalid credentials' });
|
||||
}
|
||||
} else {
|
||||
console.error(`User not found: ${username}`);
|
||||
res.status(401).json({ error: 'Invalid credentials' });
|
||||
// Check if user exists and password is correct
|
||||
if (!user || !(await bcrypt.compare(password, user.password))) {
|
||||
return res.status(401).json({ error: 'Invalid username or password' });
|
||||
}
|
||||
|
||||
// Check if user is active
|
||||
if (!user.is_active) {
|
||||
return res.status(403).json({ error: 'Account is inactive' });
|
||||
}
|
||||
|
||||
// Generate JWT token
|
||||
const token = jwt.sign(
|
||||
{ userId: user.id, username: user.username },
|
||||
process.env.JWT_SECRET,
|
||||
{ expiresIn: '24h' }
|
||||
);
|
||||
|
||||
// Get user permissions for the response
|
||||
const permissionsResult = await pool.query(`
|
||||
SELECT code
|
||||
FROM permissions p
|
||||
JOIN user_permissions up ON p.id = up.permission_id
|
||||
WHERE up.user_id = $1
|
||||
`, [user.id]);
|
||||
|
||||
const permissions = permissionsResult.rows.map(row => row.code);
|
||||
|
||||
res.json({
|
||||
token,
|
||||
user: {
|
||||
id: user.id,
|
||||
username: user.username,
|
||||
is_admin: user.is_admin,
|
||||
permissions: user.is_admin ? [] : permissions
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Login error:', error);
|
||||
res.status(500).json({ error: 'Login failed' });
|
||||
res.status(500).json({ error: 'Internal server error' });
|
||||
}
|
||||
});
|
||||
|
||||
// Protected endpoint example
|
||||
app.get('/protected', async (req, res) => {
|
||||
// User info endpoint
|
||||
app.get('/me', async (req, res) => {
|
||||
const authHeader = req.headers.authorization;
|
||||
if (!authHeader) {
|
||||
return res.status(401).json({ error: 'Unauthorized' });
|
||||
|
||||
if (!authHeader || !authHeader.startsWith('Bearer ')) {
|
||||
return res.status(401).json({ error: 'No token provided' });
|
||||
}
|
||||
|
||||
const token = authHeader.split(' ')[1];
|
||||
try {
|
||||
const token = authHeader.split(' ')[1];
|
||||
const decoded = jwt.verify(token, process.env.JWT_SECRET);
|
||||
|
||||
// Optionally, you can fetch the user from the database here
|
||||
// to verify that the user still exists or to get more user information
|
||||
const connection = await pool.getConnection();
|
||||
const [rows] = await connection.query('SELECT * FROM users WHERE username = ?', [decoded.username]);
|
||||
connection.release();
|
||||
|
||||
if (rows.length === 0) {
|
||||
return res.status(401).json({ error: 'User not found' });
|
||||
|
||||
// Get user details from database
|
||||
const userResult = await pool.query(
|
||||
'SELECT id, username, email, is_admin, is_active FROM users WHERE id = $1',
|
||||
[decoded.userId]
|
||||
);
|
||||
|
||||
if (userResult.rows.length === 0) {
|
||||
return res.status(404).json({ error: 'User not found' });
|
||||
}
|
||||
|
||||
res.json({ message: 'Protected resource accessed', user: decoded });
|
||||
|
||||
const user = userResult.rows[0];
|
||||
|
||||
// Get user permissions
|
||||
let permissions = [];
|
||||
if (!user.is_admin) {
|
||||
const permissionsResult = await pool.query(`
|
||||
SELECT code
|
||||
FROM permissions p
|
||||
JOIN user_permissions up ON p.id = up.permission_id
|
||||
WHERE up.user_id = $1
|
||||
`, [user.id]);
|
||||
|
||||
permissions = permissionsResult.rows.map(row => row.code);
|
||||
}
|
||||
|
||||
res.json({
|
||||
id: user.id,
|
||||
username: user.username,
|
||||
email: user.email,
|
||||
is_admin: user.is_admin,
|
||||
permissions: permissions
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Protected endpoint error:', error);
|
||||
res.status(403).json({ error: 'Invalid token' });
|
||||
console.error('Token verification error:', error);
|
||||
res.status(401).json({ error: 'Invalid token' });
|
||||
}
|
||||
});
|
||||
|
||||
app.listen(PORT, "0.0.0.0", () => {
|
||||
console.log(`Auth server running on port ${PORT}`);
|
||||
});
|
||||
// Mount all routes from routes.js
|
||||
app.use('/', authRoutes);
|
||||
|
||||
// Health check endpoint
|
||||
app.get('/health', (req, res) => {
|
||||
res.json({ status: 'healthy' });
|
||||
});
|
||||
|
||||
// Error handling middleware
|
||||
app.use((err, req, res, next) => {
|
||||
console.error(err.stack);
|
||||
res.status(500).json({ error: 'Something broke!' });
|
||||
});
|
||||
|
||||
// Start server
|
||||
app.listen(port, () => {
|
||||
console.log(`Auth server running on port ${port}`);
|
||||
});
|
||||
|
||||
881
inventory-server/chat/db-convert/mongo_to_postgres_converter.py
Normal file
881
inventory-server/chat/db-convert/mongo_to_postgres_converter.py
Normal file
@@ -0,0 +1,881 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
MongoDB to PostgreSQL Converter for Rocket.Chat
|
||||
Converts MongoDB BSON export files to PostgreSQL database
|
||||
|
||||
Usage:
|
||||
python3 mongo_to_postgres_converter.py \
|
||||
--mongo-path db/database/62df06d44234d20001289144 \
|
||||
--pg-database rocketchat_converted \
|
||||
--pg-user rocketchat_user \
|
||||
--pg-password your_password \
|
||||
--debug
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import struct
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, List, Optional
|
||||
import argparse
|
||||
import traceback
|
||||
|
||||
# Auto-install dependencies if needed
|
||||
try:
|
||||
import bson
|
||||
import psycopg2
|
||||
except ImportError:
|
||||
print("Installing required packages...")
|
||||
subprocess.check_call([sys.executable, "-m", "pip", "install", "pymongo", "psycopg2-binary"])
|
||||
import bson
|
||||
import psycopg2
|
||||
|
||||
class MongoToPostgresConverter:
|
||||
def __init__(self, mongo_db_path: str, postgres_config: Dict[str, str], debug_mode: bool = False, debug_collections: List[str] = None):
|
||||
self.mongo_db_path = Path(mongo_db_path)
|
||||
self.postgres_config = postgres_config
|
||||
self.debug_mode = debug_mode
|
||||
self.debug_collections = debug_collections or []
|
||||
self.collections = {}
|
||||
self.schema_info = {}
|
||||
self.error_log = {}
|
||||
|
||||
def log_debug(self, message: str, collection: str = None):
|
||||
"""Log debug messages if debug mode is enabled and collection is in debug list"""
|
||||
if self.debug_mode and (not self.debug_collections or collection in self.debug_collections):
|
||||
print(f"DEBUG: {message}")
|
||||
|
||||
def log_error(self, collection: str, error_type: str, details: str):
|
||||
"""Log detailed error information"""
|
||||
if collection not in self.error_log:
|
||||
self.error_log[collection] = []
|
||||
self.error_log[collection].append({
|
||||
'type': error_type,
|
||||
'details': details,
|
||||
'timestamp': datetime.now().isoformat()
|
||||
})
|
||||
|
||||
def sample_documents(self, collection_name: str, max_samples: int = 3) -> List[Dict]:
|
||||
"""Sample documents from a collection for debugging"""
|
||||
if not self.debug_mode or (self.debug_collections and collection_name not in self.debug_collections):
|
||||
return []
|
||||
|
||||
print(f"\n🔍 Sampling documents from {collection_name}:")
|
||||
|
||||
bson_file = self.collections[collection_name]['bson_file']
|
||||
if bson_file.stat().st_size == 0:
|
||||
print(" Collection is empty")
|
||||
return []
|
||||
|
||||
samples = []
|
||||
|
||||
try:
|
||||
with open(bson_file, 'rb') as f:
|
||||
sample_count = 0
|
||||
while sample_count < max_samples:
|
||||
try:
|
||||
doc_size = int.from_bytes(f.read(4), byteorder='little')
|
||||
if doc_size <= 0:
|
||||
break
|
||||
f.seek(-4, 1)
|
||||
doc_bytes = f.read(doc_size)
|
||||
if len(doc_bytes) != doc_size:
|
||||
break
|
||||
|
||||
doc = bson.decode(doc_bytes)
|
||||
samples.append(doc)
|
||||
sample_count += 1
|
||||
|
||||
print(f" Sample {sample_count} - Keys: {list(doc.keys())}")
|
||||
# Show a few key fields with their types and truncated values
|
||||
for key, value in list(doc.items())[:3]:
|
||||
value_preview = str(value)[:50] + "..." if len(str(value)) > 50 else str(value)
|
||||
print(f" {key}: {type(value).__name__} = {value_preview}")
|
||||
if len(doc) > 3:
|
||||
print(f" ... and {len(doc) - 3} more fields")
|
||||
print()
|
||||
|
||||
except (bson.InvalidBSON, struct.error, OSError) as e:
|
||||
self.log_error(collection_name, 'document_parsing', str(e))
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
self.log_error(collection_name, 'file_reading', str(e))
|
||||
print(f" Error reading collection: {e}")
|
||||
|
||||
return samples
|
||||
|
||||
def discover_collections(self):
|
||||
"""Discover all BSON files and their metadata"""
|
||||
print("Discovering MongoDB collections...")
|
||||
|
||||
for bson_file in self.mongo_db_path.glob("*.bson"):
|
||||
collection_name = bson_file.stem
|
||||
metadata_file = bson_file.with_suffix(".metadata.json")
|
||||
|
||||
# Read metadata if available
|
||||
metadata = {}
|
||||
if metadata_file.exists():
|
||||
try:
|
||||
with open(metadata_file, 'r', encoding='utf-8') as f:
|
||||
metadata = json.load(f)
|
||||
except (UnicodeDecodeError, json.JSONDecodeError) as e:
|
||||
print(f"Warning: Could not read metadata for {collection_name}: {e}")
|
||||
metadata = {}
|
||||
|
||||
# Get file size and document count estimate
|
||||
file_size = bson_file.stat().st_size
|
||||
doc_count = self._estimate_document_count(bson_file)
|
||||
|
||||
self.collections[collection_name] = {
|
||||
'bson_file': bson_file,
|
||||
'metadata': metadata,
|
||||
'file_size': file_size,
|
||||
'estimated_docs': doc_count
|
||||
}
|
||||
|
||||
print(f"Found {len(self.collections)} collections")
|
||||
for name, info in self.collections.items():
|
||||
print(f" - {name}: {info['file_size']/1024/1024:.1f}MB (~{info['estimated_docs']} docs)")
|
||||
|
||||
def _estimate_document_count(self, bson_file: Path) -> int:
|
||||
"""Estimate document count by reading first few documents"""
|
||||
if bson_file.stat().st_size == 0:
|
||||
return 0
|
||||
|
||||
try:
|
||||
with open(bson_file, 'rb') as f:
|
||||
docs_sampled = 0
|
||||
bytes_sampled = 0
|
||||
max_sample_size = min(1024 * 1024, bson_file.stat().st_size) # 1MB or file size
|
||||
|
||||
while bytes_sampled < max_sample_size:
|
||||
try:
|
||||
doc_size = int.from_bytes(f.read(4), byteorder='little')
|
||||
if doc_size <= 0 or doc_size > 16 * 1024 * 1024: # MongoDB doc size limit
|
||||
break
|
||||
f.seek(-4, 1) # Go back
|
||||
doc_bytes = f.read(doc_size)
|
||||
if len(doc_bytes) != doc_size:
|
||||
break
|
||||
bson.decode(doc_bytes) # Validate it's a valid BSON document
|
||||
docs_sampled += 1
|
||||
bytes_sampled += doc_size
|
||||
except (bson.InvalidBSON, struct.error, OSError):
|
||||
break
|
||||
|
||||
if docs_sampled > 0 and bytes_sampled > 0:
|
||||
avg_doc_size = bytes_sampled / docs_sampled
|
||||
return int(bson_file.stat().st_size / avg_doc_size)
|
||||
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return 0
|
||||
|
||||
def analyze_schema(self, collection_name: str, sample_size: int = 100) -> Dict[str, Any]:
|
||||
"""Analyze collection schema by sampling documents"""
|
||||
print(f"Analyzing schema for {collection_name}...")
|
||||
|
||||
bson_file = self.collections[collection_name]['bson_file']
|
||||
if bson_file.stat().st_size == 0:
|
||||
return {}
|
||||
|
||||
schema = {}
|
||||
docs_analyzed = 0
|
||||
|
||||
try:
|
||||
with open(bson_file, 'rb') as f:
|
||||
while docs_analyzed < sample_size:
|
||||
try:
|
||||
doc_size = int.from_bytes(f.read(4), byteorder='little')
|
||||
if doc_size <= 0:
|
||||
break
|
||||
f.seek(-4, 1)
|
||||
doc_bytes = f.read(doc_size)
|
||||
if len(doc_bytes) != doc_size:
|
||||
break
|
||||
|
||||
doc = bson.decode(doc_bytes)
|
||||
self._analyze_document_schema(doc, schema)
|
||||
docs_analyzed += 1
|
||||
|
||||
except (bson.InvalidBSON, struct.error, OSError):
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error analyzing {collection_name}: {e}")
|
||||
|
||||
self.schema_info[collection_name] = schema
|
||||
return schema
|
||||
|
||||
def _analyze_document_schema(self, doc: Dict[str, Any], schema: Dict[str, Any], prefix: str = ""):
|
||||
"""Recursively analyze document structure"""
|
||||
for key, value in doc.items():
|
||||
full_key = f"{prefix}.{key}" if prefix else key
|
||||
|
||||
if full_key not in schema:
|
||||
schema[full_key] = {
|
||||
'types': set(),
|
||||
'null_count': 0,
|
||||
'total_count': 0,
|
||||
'is_array': False,
|
||||
'nested_schema': {}
|
||||
}
|
||||
|
||||
schema[full_key]['total_count'] += 1
|
||||
|
||||
if value is None:
|
||||
schema[full_key]['null_count'] += 1
|
||||
schema[full_key]['types'].add('null')
|
||||
elif isinstance(value, dict):
|
||||
schema[full_key]['types'].add('object')
|
||||
if 'nested_schema' not in schema[full_key]:
|
||||
schema[full_key]['nested_schema'] = {}
|
||||
self._analyze_document_schema(value, schema[full_key]['nested_schema'])
|
||||
elif isinstance(value, list):
|
||||
schema[full_key]['types'].add('array')
|
||||
schema[full_key]['is_array'] = True
|
||||
if value and isinstance(value[0], dict):
|
||||
if 'array_item_schema' not in schema[full_key]:
|
||||
schema[full_key]['array_item_schema'] = {}
|
||||
for item in value[:5]: # Sample first 5 items
|
||||
if isinstance(item, dict):
|
||||
self._analyze_document_schema(item, schema[full_key]['array_item_schema'])
|
||||
else:
|
||||
schema[full_key]['types'].add(type(value).__name__)
|
||||
|
||||
def generate_postgres_schema(self) -> Dict[str, str]:
|
||||
"""Generate PostgreSQL CREATE TABLE statements"""
|
||||
print("Generating PostgreSQL schema...")
|
||||
|
||||
table_definitions = {}
|
||||
|
||||
for collection_name, schema in self.schema_info.items():
|
||||
if not schema: # Empty collection
|
||||
continue
|
||||
|
||||
table_name = self._sanitize_table_name(collection_name)
|
||||
columns = []
|
||||
|
||||
# Always add an id column (PostgreSQL doesn't use _id like MongoDB)
|
||||
columns.append("id SERIAL PRIMARY KEY")
|
||||
|
||||
for field_name, field_info in schema.items():
|
||||
if field_name == '_id':
|
||||
columns.append("mongo_id TEXT") # Always allow NULL for mongo_id
|
||||
continue
|
||||
|
||||
col_name = self._sanitize_column_name(field_name)
|
||||
|
||||
# Handle conflicts with PostgreSQL auto-generated columns
|
||||
if col_name in ['id', 'mongo_id', 'created_at', 'updated_at']:
|
||||
col_name = f"field_{col_name}"
|
||||
|
||||
col_type = self._determine_postgres_type(field_info)
|
||||
|
||||
# Make all fields nullable by default to avoid constraint violations
|
||||
columns.append(f"{col_name} {col_type}")
|
||||
|
||||
# Add metadata columns
|
||||
columns.extend([
|
||||
"created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP",
|
||||
"updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP"
|
||||
])
|
||||
|
||||
column_definitions = ',\n '.join(columns)
|
||||
table_sql = f"""
|
||||
CREATE TABLE IF NOT EXISTS {table_name} (
|
||||
{column_definitions}
|
||||
);
|
||||
|
||||
-- Create indexes based on MongoDB indexes
|
||||
"""
|
||||
|
||||
# Get list of actual columns that will exist in the table
|
||||
existing_columns = set(['id', 'mongo_id', 'created_at', 'updated_at'])
|
||||
for field_name in schema.keys():
|
||||
if field_name != '_id':
|
||||
col_name = self._sanitize_column_name(field_name)
|
||||
# Handle conflicts with PostgreSQL auto-generated columns
|
||||
if col_name in ['id', 'mongo_id', 'created_at', 'updated_at']:
|
||||
col_name = f"field_{col_name}"
|
||||
existing_columns.add(col_name)
|
||||
|
||||
# Add indexes from MongoDB metadata
|
||||
metadata = self.collections[collection_name].get('metadata', {})
|
||||
indexes = metadata.get('indexes', [])
|
||||
|
||||
for index in indexes:
|
||||
if index['name'] != '_id_': # Skip the default _id index
|
||||
# Sanitize index name - remove special characters
|
||||
sanitized_index_name = re.sub(r'[^a-zA-Z0-9_]', '_', index['name'])
|
||||
index_name = f"idx_{table_name}_{sanitized_index_name}"
|
||||
index_keys = list(index['key'].keys())
|
||||
if index_keys:
|
||||
sanitized_keys = []
|
||||
for key in index_keys:
|
||||
if key != '_id':
|
||||
sanitized_key = self._sanitize_column_name(key)
|
||||
# Handle conflicts with PostgreSQL auto-generated columns
|
||||
if sanitized_key in ['id', 'mongo_id', 'created_at', 'updated_at']:
|
||||
sanitized_key = f"field_{sanitized_key}"
|
||||
# Only add if the column actually exists in our table
|
||||
if sanitized_key in existing_columns:
|
||||
sanitized_keys.append(sanitized_key)
|
||||
|
||||
if sanitized_keys:
|
||||
table_sql += f"CREATE INDEX IF NOT EXISTS {index_name} ON {table_name} ({', '.join(sanitized_keys)});\n"
|
||||
|
||||
table_definitions[collection_name] = table_sql
|
||||
|
||||
return table_definitions
|
||||
|
||||
def _sanitize_table_name(self, name: str) -> str:
|
||||
"""Convert MongoDB collection name to PostgreSQL table name"""
|
||||
# Remove rocketchat_ prefix if present
|
||||
if name.startswith('rocketchat_'):
|
||||
name = name[11:]
|
||||
|
||||
# Replace special characters with underscores
|
||||
name = re.sub(r'[^a-zA-Z0-9_]', '_', name)
|
||||
|
||||
# Ensure it starts with a letter
|
||||
if name and name[0].isdigit():
|
||||
name = 'table_' + name
|
||||
|
||||
return name.lower()
|
||||
|
||||
def _sanitize_column_name(self, name: str) -> str:
|
||||
"""Convert MongoDB field name to PostgreSQL column name"""
|
||||
# Handle nested field names (convert dots to underscores)
|
||||
name = name.replace('.', '_')
|
||||
|
||||
# Replace special characters with underscores
|
||||
name = re.sub(r'[^a-zA-Z0-9_]', '_', name)
|
||||
|
||||
# Ensure it starts with a letter or underscore
|
||||
if name and name[0].isdigit():
|
||||
name = 'col_' + name
|
||||
|
||||
# Handle PostgreSQL reserved words
|
||||
reserved = {
|
||||
'user', 'order', 'group', 'table', 'index', 'key', 'value', 'date', 'time', 'timestamp',
|
||||
'default', 'select', 'from', 'where', 'insert', 'update', 'delete', 'create', 'drop',
|
||||
'alter', 'grant', 'revoke', 'commit', 'rollback', 'begin', 'end', 'case', 'when',
|
||||
'then', 'else', 'if', 'null', 'not', 'and', 'or', 'in', 'exists', 'between',
|
||||
'like', 'limit', 'offset', 'union', 'join', 'inner', 'outer', 'left', 'right',
|
||||
'full', 'cross', 'natural', 'on', 'using', 'distinct', 'all', 'any', 'some',
|
||||
'desc', 'asc', 'primary', 'foreign', 'references', 'constraint', 'unique',
|
||||
'check', 'cascade', 'restrict', 'action', 'match', 'partial', 'full'
|
||||
}
|
||||
if name.lower() in reserved:
|
||||
name = name + '_col'
|
||||
|
||||
return name.lower()
|
||||
|
||||
def _determine_postgres_type(self, field_info: Dict[str, Any]) -> str:
|
||||
"""Determine PostgreSQL column type from MongoDB field analysis with improved logic"""
|
||||
types = field_info['types']
|
||||
|
||||
# Convert set to list for easier checking
|
||||
type_list = list(types)
|
||||
|
||||
# If there's only one type (excluding null), use specific typing
|
||||
non_null_types = [t for t in type_list if t != 'null']
|
||||
|
||||
if len(non_null_types) == 1:
|
||||
single_type = non_null_types[0]
|
||||
if single_type == 'bool':
|
||||
return 'BOOLEAN'
|
||||
elif single_type == 'int':
|
||||
return 'INTEGER'
|
||||
elif single_type == 'float':
|
||||
return 'NUMERIC'
|
||||
elif single_type == 'str':
|
||||
return 'TEXT'
|
||||
elif single_type == 'datetime':
|
||||
return 'TIMESTAMP'
|
||||
elif single_type == 'ObjectId':
|
||||
return 'TEXT'
|
||||
|
||||
# Handle mixed types more conservatively
|
||||
if 'array' in types or field_info.get('is_array', False):
|
||||
return 'JSONB' # Arrays always go to JSONB
|
||||
elif 'object' in types:
|
||||
return 'JSONB' # Objects always go to JSONB
|
||||
elif len(non_null_types) > 1:
|
||||
# Multiple non-null types - check for common combinations
|
||||
if set(non_null_types) <= {'int', 'float'}:
|
||||
return 'NUMERIC' # Can handle both int and float
|
||||
elif set(non_null_types) <= {'bool', 'str'}:
|
||||
return 'TEXT' # Convert everything to text
|
||||
elif set(non_null_types) <= {'str', 'ObjectId'}:
|
||||
return 'TEXT' # Both are string-like
|
||||
else:
|
||||
return 'JSONB' # Complex mixed types go to JSONB
|
||||
elif 'ObjectId' in types:
|
||||
return 'TEXT'
|
||||
elif 'datetime' in types:
|
||||
return 'TIMESTAMP'
|
||||
elif 'bool' in types:
|
||||
return 'BOOLEAN'
|
||||
elif 'int' in types:
|
||||
return 'INTEGER'
|
||||
elif 'float' in types:
|
||||
return 'NUMERIC'
|
||||
elif 'str' in types:
|
||||
return 'TEXT'
|
||||
else:
|
||||
return 'TEXT' # Default fallback
|
||||
|
||||
def create_postgres_database(self, table_definitions: Dict[str, str]):
|
||||
"""Create PostgreSQL database and tables"""
|
||||
print("Creating PostgreSQL database schema...")
|
||||
|
||||
try:
|
||||
# Connect to PostgreSQL
|
||||
conn = psycopg2.connect(**self.postgres_config)
|
||||
conn.autocommit = True
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Create tables
|
||||
for collection_name, table_sql in table_definitions.items():
|
||||
print(f"Creating table for {collection_name}...")
|
||||
cursor.execute(table_sql)
|
||||
|
||||
cursor.close()
|
||||
conn.close()
|
||||
print("Database schema created successfully!")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error creating database schema: {e}")
|
||||
raise
|
||||
|
||||
def convert_and_insert_data(self, batch_size: int = 1000):
|
||||
"""Convert BSON data and insert into PostgreSQL"""
|
||||
print("Converting and inserting data...")
|
||||
|
||||
try:
|
||||
conn = psycopg2.connect(**self.postgres_config)
|
||||
conn.autocommit = False
|
||||
|
||||
for collection_name in self.collections:
|
||||
print(f"Processing {collection_name}...")
|
||||
self._convert_collection(conn, collection_name, batch_size)
|
||||
|
||||
conn.close()
|
||||
print("Data conversion completed successfully!")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error converting data: {e}")
|
||||
raise
|
||||
|
||||
def _convert_collection(self, conn, collection_name: str, batch_size: int):
|
||||
"""Convert a single collection"""
|
||||
bson_file = self.collections[collection_name]['bson_file']
|
||||
|
||||
if bson_file.stat().st_size == 0:
|
||||
print(f" Skipping empty collection {collection_name}")
|
||||
return
|
||||
|
||||
table_name = self._sanitize_table_name(collection_name)
|
||||
cursor = conn.cursor()
|
||||
|
||||
batch = []
|
||||
total_inserted = 0
|
||||
errors = 0
|
||||
|
||||
try:
|
||||
with open(bson_file, 'rb') as f:
|
||||
while True:
|
||||
try:
|
||||
doc_size = int.from_bytes(f.read(4), byteorder='little')
|
||||
if doc_size <= 0:
|
||||
break
|
||||
f.seek(-4, 1)
|
||||
doc_bytes = f.read(doc_size)
|
||||
if len(doc_bytes) != doc_size:
|
||||
break
|
||||
|
||||
doc = bson.decode(doc_bytes)
|
||||
batch.append(doc)
|
||||
|
||||
if len(batch) >= batch_size:
|
||||
inserted, batch_errors = self._insert_batch(cursor, table_name, batch, collection_name)
|
||||
total_inserted += inserted
|
||||
errors += batch_errors
|
||||
batch = []
|
||||
conn.commit()
|
||||
if total_inserted % 5000 == 0: # Less frequent progress updates
|
||||
print(f" Inserted {total_inserted} documents...")
|
||||
|
||||
except (bson.InvalidBSON, struct.error, OSError):
|
||||
break
|
||||
|
||||
# Insert remaining documents
|
||||
if batch:
|
||||
inserted, batch_errors = self._insert_batch(cursor, table_name, batch, collection_name)
|
||||
total_inserted += inserted
|
||||
errors += batch_errors
|
||||
conn.commit()
|
||||
|
||||
if errors > 0:
|
||||
print(f" Completed {collection_name}: {total_inserted} documents inserted ({errors} errors)")
|
||||
else:
|
||||
print(f" Completed {collection_name}: {total_inserted} documents inserted")
|
||||
|
||||
except Exception as e:
|
||||
print(f" Error processing {collection_name}: {e}")
|
||||
conn.rollback()
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def _insert_batch(self, cursor, table_name: str, documents: List[Dict], collection_name: str):
|
||||
"""Insert a batch of documents with proper transaction handling"""
|
||||
if not documents:
|
||||
return 0, 0
|
||||
|
||||
# Get schema info for this collection
|
||||
schema = self.schema_info.get(collection_name, {})
|
||||
|
||||
# Build column list
|
||||
columns = ['mongo_id']
|
||||
for field_name in schema.keys():
|
||||
if field_name != '_id':
|
||||
col_name = self._sanitize_column_name(field_name)
|
||||
# Handle conflicts with PostgreSQL auto-generated columns
|
||||
if col_name in ['id', 'mongo_id', 'created_at', 'updated_at']:
|
||||
col_name = f"field_{col_name}"
|
||||
columns.append(col_name)
|
||||
|
||||
# Build INSERT statement
|
||||
placeholders = ', '.join(['%s'] * len(columns))
|
||||
sql = f"INSERT INTO {table_name} ({', '.join(columns)}) VALUES ({placeholders})"
|
||||
|
||||
self.log_debug(f"SQL: {sql}", collection_name)
|
||||
|
||||
# Convert documents to tuples
|
||||
rows = []
|
||||
errors = 0
|
||||
|
||||
for doc_idx, doc in enumerate(documents):
|
||||
try:
|
||||
row = []
|
||||
|
||||
# Add mongo_id
|
||||
row.append(str(doc.get('_id', '')))
|
||||
|
||||
# Add other fields
|
||||
for field_name in schema.keys():
|
||||
if field_name != '_id':
|
||||
try:
|
||||
value = self._get_nested_value(doc, field_name)
|
||||
converted_value = self._convert_value_for_postgres(value, field_name, schema)
|
||||
row.append(converted_value)
|
||||
except Exception as e:
|
||||
self.log_error(collection_name, 'field_conversion',
|
||||
f"Field '{field_name}' in doc {doc_idx}: {str(e)}")
|
||||
# Only show debug for collections we're focusing on
|
||||
if collection_name in self.debug_collections:
|
||||
print(f" ⚠️ Error converting field '{field_name}': {e}")
|
||||
row.append(None) # Use NULL for problematic fields
|
||||
|
||||
rows.append(tuple(row))
|
||||
|
||||
except Exception as e:
|
||||
self.log_error(collection_name, 'document_conversion', f"Document {doc_idx}: {str(e)}")
|
||||
errors += 1
|
||||
continue
|
||||
|
||||
# Execute batch insert
|
||||
if rows:
|
||||
try:
|
||||
cursor.executemany(sql, rows)
|
||||
return len(rows), errors
|
||||
except Exception as batch_error:
|
||||
self.log_error(collection_name, 'batch_insert', str(batch_error))
|
||||
|
||||
# Only show detailed debugging for targeted collections
|
||||
if collection_name in self.debug_collections:
|
||||
print(f" 🔴 Batch insert failed for {collection_name}: {batch_error}")
|
||||
print(" Trying individual inserts with rollback handling...")
|
||||
|
||||
# Rollback the failed transaction
|
||||
cursor.connection.rollback()
|
||||
|
||||
# Try inserting one by one in individual transactions
|
||||
success_count = 0
|
||||
for row_idx, row in enumerate(rows):
|
||||
try:
|
||||
cursor.execute(sql, row)
|
||||
cursor.connection.commit() # Commit each successful insert
|
||||
success_count += 1
|
||||
except Exception as row_error:
|
||||
cursor.connection.rollback() # Rollback failed insert
|
||||
self.log_error(collection_name, 'row_insert', f"Row {row_idx}: {str(row_error)}")
|
||||
|
||||
# Show detailed error only for the first few failures and only for targeted collections
|
||||
if collection_name in self.debug_collections and errors < 3:
|
||||
print(f" Row {row_idx} failed: {row_error}")
|
||||
print(f" Row data: {len(row)} values, expected {len(columns)} columns")
|
||||
|
||||
errors += 1
|
||||
continue
|
||||
return success_count, errors
|
||||
|
||||
return 0, errors
|
||||
|
||||
def _get_nested_value(self, doc: Dict, field_path: str):
|
||||
"""Get value from nested document using dot notation"""
|
||||
keys = field_path.split('.')
|
||||
value = doc
|
||||
|
||||
for key in keys:
|
||||
if isinstance(value, dict) and key in value:
|
||||
value = value[key]
|
||||
else:
|
||||
return None
|
||||
|
||||
return value
|
||||
|
||||
def _convert_value_for_postgres(self, value, field_name: str = None, schema: Dict = None):
|
||||
"""Convert MongoDB value to PostgreSQL compatible value with schema-aware conversion"""
|
||||
if value is None:
|
||||
return None
|
||||
|
||||
# Get the expected PostgreSQL type for this field if available
|
||||
expected_type = None
|
||||
if schema and field_name and field_name in schema:
|
||||
field_info = schema[field_name]
|
||||
expected_type = self._determine_postgres_type(field_info)
|
||||
|
||||
# Handle conversion based on expected type
|
||||
if expected_type == 'BOOLEAN':
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
elif isinstance(value, str):
|
||||
return value.lower() in ('true', '1', 'yes', 'on')
|
||||
elif isinstance(value, (int, float)):
|
||||
return bool(value)
|
||||
else:
|
||||
return None
|
||||
elif expected_type == 'INTEGER':
|
||||
if isinstance(value, int):
|
||||
return value
|
||||
elif isinstance(value, float):
|
||||
return int(value)
|
||||
elif isinstance(value, str) and value.isdigit():
|
||||
return int(value)
|
||||
elif isinstance(value, bool):
|
||||
return int(value)
|
||||
else:
|
||||
return None
|
||||
elif expected_type == 'NUMERIC':
|
||||
if isinstance(value, (int, float)):
|
||||
return value
|
||||
elif isinstance(value, str):
|
||||
try:
|
||||
return float(value)
|
||||
except ValueError:
|
||||
return None
|
||||
elif isinstance(value, bool):
|
||||
return float(value)
|
||||
else:
|
||||
return None
|
||||
elif expected_type == 'TEXT':
|
||||
if isinstance(value, str):
|
||||
return value
|
||||
elif value is not None:
|
||||
str_value = str(value)
|
||||
# Handle very long strings
|
||||
if len(str_value) > 65535:
|
||||
return str_value[:65535]
|
||||
return str_value
|
||||
else:
|
||||
return None
|
||||
elif expected_type == 'TIMESTAMP':
|
||||
if hasattr(value, 'isoformat'):
|
||||
return value.isoformat()
|
||||
elif isinstance(value, str):
|
||||
return value
|
||||
else:
|
||||
return str(value) if value is not None else None
|
||||
elif expected_type == 'JSONB':
|
||||
if isinstance(value, (dict, list)):
|
||||
return json.dumps(value, default=self._json_serializer)
|
||||
elif isinstance(value, str):
|
||||
# Check if it's already valid JSON
|
||||
try:
|
||||
json.loads(value)
|
||||
return value
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
# Not valid JSON, wrap it
|
||||
return json.dumps(value)
|
||||
else:
|
||||
return json.dumps(value, default=self._json_serializer)
|
||||
|
||||
# Fallback to original logic if no expected type or type not recognized
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
elif isinstance(value, (int, float)):
|
||||
return value
|
||||
elif isinstance(value, str):
|
||||
return value
|
||||
elif isinstance(value, (dict, list)):
|
||||
return json.dumps(value, default=self._json_serializer)
|
||||
elif hasattr(value, 'isoformat'): # datetime
|
||||
return value.isoformat()
|
||||
elif hasattr(value, '__str__'):
|
||||
str_value = str(value)
|
||||
if len(str_value) > 65535:
|
||||
return str_value[:65535]
|
||||
return str_value
|
||||
else:
|
||||
return str(value)
|
||||
|
||||
def _json_serializer(self, obj):
|
||||
"""Custom JSON serializer for complex objects with better error handling"""
|
||||
try:
|
||||
if hasattr(obj, 'isoformat'): # datetime
|
||||
return obj.isoformat()
|
||||
elif hasattr(obj, '__str__'):
|
||||
return str(obj)
|
||||
else:
|
||||
return None
|
||||
except Exception as e:
|
||||
self.log_debug(f"JSON serialization error: {e}")
|
||||
return str(obj)
|
||||
|
||||
def run_conversion(self, sample_size: int = 100, batch_size: int = 1000):
|
||||
"""Run the full conversion process with focused debugging"""
|
||||
print("Starting MongoDB to PostgreSQL conversion...")
|
||||
print("This will convert your Rocket.Chat database from MongoDB to PostgreSQL")
|
||||
if self.debug_mode:
|
||||
if self.debug_collections:
|
||||
print(f"🐛 DEBUG MODE: Focusing on collections: {', '.join(self.debug_collections)}")
|
||||
else:
|
||||
print("🐛 DEBUG MODE: All collections")
|
||||
print("=" * 70)
|
||||
|
||||
# Step 1: Discover collections
|
||||
self.discover_collections()
|
||||
|
||||
# Step 2: Analyze schemas
|
||||
print("\nAnalyzing collection schemas...")
|
||||
for collection_name in self.collections:
|
||||
self.analyze_schema(collection_name, sample_size)
|
||||
|
||||
# Sample problematic collections if debugging
|
||||
if self.debug_mode and self.debug_collections:
|
||||
for coll in self.debug_collections:
|
||||
if coll in self.collections:
|
||||
self.sample_documents(coll, 2)
|
||||
|
||||
# Step 3: Generate PostgreSQL schema
|
||||
table_definitions = self.generate_postgres_schema()
|
||||
|
||||
# Step 4: Create database schema
|
||||
self.create_postgres_database(table_definitions)
|
||||
|
||||
# Step 5: Convert and insert data
|
||||
self.convert_and_insert_data(batch_size)
|
||||
|
||||
# Step 6: Show error summary
|
||||
self._print_error_summary()
|
||||
|
||||
print("=" * 70)
|
||||
print("✅ Conversion completed!")
|
||||
print(f" Database: {self.postgres_config['database']}")
|
||||
print(f" Tables created: {len(table_definitions)}")
|
||||
|
||||
def _print_error_summary(self):
|
||||
"""Print a focused summary of errors"""
|
||||
if not self.error_log:
|
||||
print("\n✅ No errors encountered during conversion!")
|
||||
return
|
||||
|
||||
print("\n⚠️ ERROR SUMMARY:")
|
||||
print("=" * 50)
|
||||
|
||||
# Sort by error count descending
|
||||
sorted_collections = sorted(self.error_log.items(),
|
||||
key=lambda x: len(x[1]), reverse=True)
|
||||
|
||||
for collection, errors in sorted_collections:
|
||||
error_types = {}
|
||||
for error in errors:
|
||||
error_type = error['type']
|
||||
if error_type not in error_types:
|
||||
error_types[error_type] = []
|
||||
error_types[error_type].append(error['details'])
|
||||
|
||||
print(f"\n🔴 {collection} ({len(errors)} total errors):")
|
||||
for error_type, details_list in error_types.items():
|
||||
print(f" {error_type}: {len(details_list)} errors")
|
||||
|
||||
# Show sample errors for critical collections
|
||||
if collection in ['rocketchat_settings', 'rocketchat_room'] and len(details_list) > 0:
|
||||
print(f" Sample: {details_list[0][:100]}...")
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Convert MongoDB BSON export to PostgreSQL',
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
# Basic usage
|
||||
python3 mongo_to_postgres_converter.py \\
|
||||
--mongo-path db/database/62df06d44234d20001289144 \\
|
||||
--pg-database rocketchat_converted \\
|
||||
--pg-user rocketchat_user \\
|
||||
--pg-password mypassword
|
||||
|
||||
# Debug specific failing collections
|
||||
python3 mongo_to_postgres_converter.py \\
|
||||
--mongo-path db/database/62df06d44234d20001289144 \\
|
||||
--pg-database rocketchat_converted \\
|
||||
--pg-user rocketchat_user \\
|
||||
--pg-password mypassword \\
|
||||
--debug-collections rocketchat_settings rocketchat_room
|
||||
|
||||
Before running this script:
|
||||
1. Run: sudo -u postgres psql -f reset_database.sql
|
||||
2. Update the password in reset_database.sql
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument('--mongo-path', required=True, help='Path to MongoDB export directory')
|
||||
parser.add_argument('--pg-host', default='localhost', help='PostgreSQL host (default: localhost)')
|
||||
parser.add_argument('--pg-port', default='5432', help='PostgreSQL port (default: 5432)')
|
||||
parser.add_argument('--pg-database', required=True, help='PostgreSQL database name')
|
||||
parser.add_argument('--pg-user', required=True, help='PostgreSQL username')
|
||||
parser.add_argument('--pg-password', required=True, help='PostgreSQL password')
|
||||
parser.add_argument('--sample-size', type=int, default=100, help='Number of documents to sample for schema analysis (default: 100)')
|
||||
parser.add_argument('--batch-size', type=int, default=1000, help='Batch size for data insertion (default: 1000)')
|
||||
parser.add_argument('--debug', action='store_true', help='Enable debug mode with detailed error logging')
|
||||
parser.add_argument('--debug-collections', nargs='*', help='Specific collections to debug (e.g., rocketchat_settings rocketchat_room)')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
postgres_config = {
|
||||
'host': args.pg_host,
|
||||
'port': args.pg_port,
|
||||
'database': args.pg_database,
|
||||
'user': args.pg_user,
|
||||
'password': args.pg_password
|
||||
}
|
||||
|
||||
# Enable debug mode if debug collections are specified
|
||||
debug_mode = args.debug or (args.debug_collections is not None)
|
||||
|
||||
converter = MongoToPostgresConverter(args.mongo_path, postgres_config, debug_mode, args.debug_collections)
|
||||
converter.run_conversion(args.sample_size, args.batch_size)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
41
inventory-server/chat/db-convert/reset_database.sql
Normal file
41
inventory-server/chat/db-convert/reset_database.sql
Normal file
@@ -0,0 +1,41 @@
|
||||
-- PostgreSQL Database Reset Script for Rocket.Chat Import
|
||||
-- Run as: sudo -u postgres psql -f reset_database.sql
|
||||
|
||||
-- Terminate all connections to the database (force disconnect users)
|
||||
SELECT pg_terminate_backend(pid)
|
||||
FROM pg_stat_activity
|
||||
WHERE datname = 'rocketchat_converted' AND pid <> pg_backend_pid();
|
||||
|
||||
-- Drop the database if it exists
|
||||
DROP DATABASE IF EXISTS rocketchat_converted;
|
||||
|
||||
-- Create fresh database
|
||||
CREATE DATABASE rocketchat_converted;
|
||||
|
||||
-- Create user (if not exists)
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT FROM pg_user WHERE usename = 'rocketchat_user') THEN
|
||||
CREATE USER rocketchat_user WITH PASSWORD 'HKjLgt23gWuPXzEAn3rW';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Grant database privileges
|
||||
GRANT CONNECT ON DATABASE rocketchat_converted TO rocketchat_user;
|
||||
GRANT CREATE ON DATABASE rocketchat_converted TO rocketchat_user;
|
||||
|
||||
-- Connect to the new database
|
||||
\c rocketchat_converted;
|
||||
|
||||
-- Grant schema privileges
|
||||
GRANT CREATE ON SCHEMA public TO rocketchat_user;
|
||||
GRANT USAGE ON SCHEMA public TO rocketchat_user;
|
||||
|
||||
-- Grant privileges on all future tables and sequences
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO rocketchat_user;
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT USAGE, SELECT ON SEQUENCES TO rocketchat_user;
|
||||
|
||||
-- Display success message
|
||||
\echo 'Database reset completed successfully!'
|
||||
\echo 'You can now run the converter with:'
|
||||
\echo 'python3 mongo_to_postgres_converter.py --mongo-path db/database/62df06d44234d20001289144 --pg-database rocketchat_converted --pg-user rocketchat_user --pg-password your_password'
|
||||
54
inventory-server/chat/db-convert/test_converter.py
Normal file
54
inventory-server/chat/db-convert/test_converter.py
Normal file
@@ -0,0 +1,54 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Quick test script to verify the converter fixes work for problematic collections
|
||||
"""
|
||||
|
||||
from mongo_to_postgres_converter import MongoToPostgresConverter
|
||||
|
||||
def test_problematic_collections():
|
||||
print("🧪 Testing converter fixes for problematic collections...")
|
||||
|
||||
postgres_config = {
|
||||
'host': 'localhost',
|
||||
'port': '5432',
|
||||
'database': 'rocketchat_test',
|
||||
'user': 'rocketchat_user',
|
||||
'password': 'password123'
|
||||
}
|
||||
|
||||
converter = MongoToPostgresConverter(
|
||||
'db/database/62df06d44234d20001289144',
|
||||
postgres_config,
|
||||
debug_mode=True,
|
||||
debug_collections=['rocketchat_settings', 'rocketchat_room']
|
||||
)
|
||||
|
||||
# Test just discovery and schema analysis
|
||||
print("\n1. Testing collection discovery...")
|
||||
converter.discover_collections()
|
||||
|
||||
print("\n2. Testing schema analysis...")
|
||||
if 'rocketchat_settings' in converter.collections:
|
||||
settings_schema = converter.analyze_schema('rocketchat_settings', 10)
|
||||
print(f"Settings schema fields: {len(settings_schema)}")
|
||||
|
||||
# Check specific problematic fields
|
||||
if 'packageValue' in settings_schema:
|
||||
packagevalue_info = settings_schema['packageValue']
|
||||
pg_type = converter._determine_postgres_type(packagevalue_info)
|
||||
print(f"packageValue types: {packagevalue_info['types']} -> PostgreSQL: {pg_type}")
|
||||
|
||||
if 'rocketchat_room' in converter.collections:
|
||||
room_schema = converter.analyze_schema('rocketchat_room', 10)
|
||||
print(f"Room schema fields: {len(room_schema)}")
|
||||
|
||||
# Check specific problematic fields
|
||||
if 'sysMes' in room_schema:
|
||||
sysmes_info = room_schema['sysMes']
|
||||
pg_type = converter._determine_postgres_type(sysmes_info)
|
||||
print(f"sysMes types: {sysmes_info['types']} -> PostgreSQL: {pg_type}")
|
||||
|
||||
print("\n✅ Test completed - check the type mappings above!")
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_problematic_collections()
|
||||
1447
inventory-server/chat/package-lock.json
generated
Normal file
1447
inventory-server/chat/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
20
inventory-server/chat/package.json
Normal file
20
inventory-server/chat/package.json
Normal file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"name": "chat-server",
|
||||
"version": "1.0.0",
|
||||
"description": "Chat archive server for Rocket.Chat data",
|
||||
"main": "server.js",
|
||||
"scripts": {
|
||||
"start": "node server.js",
|
||||
"dev": "nodemon server.js"
|
||||
},
|
||||
"dependencies": {
|
||||
"express": "^4.18.2",
|
||||
"cors": "^2.8.5",
|
||||
"pg": "^8.11.0",
|
||||
"dotenv": "^16.0.3",
|
||||
"morgan": "^1.10.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"nodemon": "^2.0.22"
|
||||
}
|
||||
}
|
||||
649
inventory-server/chat/routes.js
Normal file
649
inventory-server/chat/routes.js
Normal file
@@ -0,0 +1,649 @@
|
||||
const express = require('express');
|
||||
const path = require('path');
|
||||
const router = express.Router();
|
||||
|
||||
// Serve uploaded files with proper mapping from database paths to actual file locations
|
||||
router.get('/files/uploads/*', async (req, res) => {
|
||||
try {
|
||||
// Extract the path from the URL (everything after /files/uploads/)
|
||||
const requestPath = req.params[0];
|
||||
|
||||
// The URL path will be like: ufs/AmazonS3:Uploads/274Mf9CyHNG72oF86/filename.jpg
|
||||
// We need to extract the mongo_id (274Mf9CyHNG72oF86) from this path
|
||||
const pathParts = requestPath.split('/');
|
||||
let mongoId = null;
|
||||
|
||||
// Find the mongo_id in the path structure
|
||||
for (let i = 0; i < pathParts.length; i++) {
|
||||
if (pathParts[i].includes('AmazonS3:Uploads') && i + 1 < pathParts.length) {
|
||||
mongoId = pathParts[i + 1];
|
||||
break;
|
||||
}
|
||||
// Sometimes the mongo_id might be the last part of ufs/AmazonS3:Uploads/mongoId
|
||||
if (pathParts[i] === 'AmazonS3:Uploads' && i + 1 < pathParts.length) {
|
||||
mongoId = pathParts[i + 1];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!mongoId) {
|
||||
// Try to get mongo_id from database by matching the full path
|
||||
const result = await global.pool.query(`
|
||||
SELECT mongo_id, name, type
|
||||
FROM uploads
|
||||
WHERE path = $1 OR url = $1
|
||||
LIMIT 1
|
||||
`, [`/ufs/AmazonS3:Uploads/${requestPath}`, `/ufs/AmazonS3:Uploads/${requestPath}`]);
|
||||
|
||||
if (result.rows.length > 0) {
|
||||
mongoId = result.rows[0].mongo_id;
|
||||
}
|
||||
}
|
||||
|
||||
if (!mongoId) {
|
||||
return res.status(404).json({ error: 'File not found' });
|
||||
}
|
||||
|
||||
// The actual file is stored with just the mongo_id as filename
|
||||
const filePath = path.join(__dirname, 'db-convert/db/files/uploads', mongoId);
|
||||
|
||||
// Get file info from database for proper content-type
|
||||
const fileInfo = await global.pool.query(`
|
||||
SELECT name, type
|
||||
FROM uploads
|
||||
WHERE mongo_id = $1
|
||||
LIMIT 1
|
||||
`, [mongoId]);
|
||||
|
||||
if (fileInfo.rows.length === 0) {
|
||||
return res.status(404).json({ error: 'File metadata not found' });
|
||||
}
|
||||
|
||||
const { name, type } = fileInfo.rows[0];
|
||||
|
||||
// Set proper content type
|
||||
if (type) {
|
||||
res.set('Content-Type', type);
|
||||
}
|
||||
|
||||
// Set content disposition with original filename
|
||||
if (name) {
|
||||
res.set('Content-Disposition', `inline; filename="${name}"`);
|
||||
}
|
||||
|
||||
// Send the file
|
||||
res.sendFile(filePath, (err) => {
|
||||
if (err) {
|
||||
console.error('Error serving file:', err);
|
||||
if (!res.headersSent) {
|
||||
res.status(404).json({ error: 'File not found on disk' });
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
} catch (error) {
|
||||
console.error('Error serving upload:', error);
|
||||
res.status(500).json({ error: 'Server error' });
|
||||
}
|
||||
});
|
||||
|
||||
// Also serve files directly by mongo_id for simpler access
|
||||
router.get('/files/by-id/:mongoId', async (req, res) => {
|
||||
try {
|
||||
const { mongoId } = req.params;
|
||||
|
||||
// Get file info from database
|
||||
const fileInfo = await global.pool.query(`
|
||||
SELECT name, type
|
||||
FROM uploads
|
||||
WHERE mongo_id = $1
|
||||
LIMIT 1
|
||||
`, [mongoId]);
|
||||
|
||||
if (fileInfo.rows.length === 0) {
|
||||
return res.status(404).json({ error: 'File not found' });
|
||||
}
|
||||
|
||||
const { name, type } = fileInfo.rows[0];
|
||||
const filePath = path.join(__dirname, 'db-convert/db/files/uploads', mongoId);
|
||||
|
||||
// Set proper content type and filename
|
||||
if (type) {
|
||||
res.set('Content-Type', type);
|
||||
}
|
||||
|
||||
if (name) {
|
||||
res.set('Content-Disposition', `inline; filename="${name}"`);
|
||||
}
|
||||
|
||||
// Send the file
|
||||
res.sendFile(filePath, (err) => {
|
||||
if (err) {
|
||||
console.error('Error serving file:', err);
|
||||
if (!res.headersSent) {
|
||||
res.status(404).json({ error: 'File not found on disk' });
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
} catch (error) {
|
||||
console.error('Error serving upload by ID:', error);
|
||||
res.status(500).json({ error: 'Server error' });
|
||||
}
|
||||
});
|
||||
|
||||
// Serve user avatars by mongo_id
|
||||
router.get('/avatar/:mongoId', async (req, res) => {
|
||||
try {
|
||||
const { mongoId } = req.params;
|
||||
|
||||
console.log(`[Avatar Debug] Looking up avatar for user mongo_id: ${mongoId}`);
|
||||
|
||||
// First try to find avatar by user's avataretag
|
||||
const userResult = await global.pool.query(`
|
||||
SELECT avataretag, username FROM users WHERE mongo_id = $1
|
||||
`, [mongoId]);
|
||||
|
||||
let avatarPath = null;
|
||||
|
||||
if (userResult.rows.length > 0) {
|
||||
const username = userResult.rows[0].username;
|
||||
const avataretag = userResult.rows[0].avataretag;
|
||||
|
||||
// Try method 1: Look up by avataretag -> etag (for users with avataretag set)
|
||||
if (avataretag) {
|
||||
console.log(`[Avatar Debug] Found user ${username} with avataretag: ${avataretag}`);
|
||||
|
||||
const avatarResult = await global.pool.query(`
|
||||
SELECT url, path FROM avatars WHERE etag = $1
|
||||
`, [avataretag]);
|
||||
|
||||
if (avatarResult.rows.length > 0) {
|
||||
const dbPath = avatarResult.rows[0].path || avatarResult.rows[0].url;
|
||||
console.log(`[Avatar Debug] Found avatar record with path: ${dbPath}`);
|
||||
|
||||
if (dbPath) {
|
||||
const pathParts = dbPath.split('/');
|
||||
for (let i = 0; i < pathParts.length; i++) {
|
||||
if (pathParts[i].includes('AmazonS3:Avatars') && i + 1 < pathParts.length) {
|
||||
const avatarMongoId = pathParts[i + 1];
|
||||
avatarPath = path.join(__dirname, 'db-convert/db/files/avatars', avatarMongoId);
|
||||
console.log(`[Avatar Debug] Extracted avatar mongo_id: ${avatarMongoId}, full path: ${avatarPath}`);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
console.log(`[Avatar Debug] No avatar record found for etag: ${avataretag}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Try method 2: Look up by userid directly (for users without avataretag)
|
||||
if (!avatarPath) {
|
||||
console.log(`[Avatar Debug] Trying direct userid lookup for user ${username} (${mongoId})`);
|
||||
|
||||
const avatarResult = await global.pool.query(`
|
||||
SELECT url, path FROM avatars WHERE userid = $1
|
||||
`, [mongoId]);
|
||||
|
||||
if (avatarResult.rows.length > 0) {
|
||||
const dbPath = avatarResult.rows[0].path || avatarResult.rows[0].url;
|
||||
console.log(`[Avatar Debug] Found avatar record by userid with path: ${dbPath}`);
|
||||
|
||||
if (dbPath) {
|
||||
const pathParts = dbPath.split('/');
|
||||
for (let i = 0; i < pathParts.length; i++) {
|
||||
if (pathParts[i].includes('AmazonS3:Avatars') && i + 1 < pathParts.length) {
|
||||
const avatarMongoId = pathParts[i + 1];
|
||||
avatarPath = path.join(__dirname, 'db-convert/db/files/avatars', avatarMongoId);
|
||||
console.log(`[Avatar Debug] Extracted avatar mongo_id: ${avatarMongoId}, full path: ${avatarPath}`);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
console.log(`[Avatar Debug] No avatar record found for userid: ${mongoId}`);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
console.log(`[Avatar Debug] No user found for mongo_id: ${mongoId}`);
|
||||
}
|
||||
|
||||
// Fallback: try direct lookup by user mongo_id
|
||||
if (!avatarPath) {
|
||||
avatarPath = path.join(__dirname, 'db-convert/db/files/avatars', mongoId);
|
||||
console.log(`[Avatar Debug] Using fallback path: ${avatarPath}`);
|
||||
}
|
||||
|
||||
// Set proper content type for images
|
||||
res.set('Content-Type', 'image/jpeg'); // Most avatars are likely JPEG
|
||||
|
||||
// Send the file
|
||||
res.sendFile(avatarPath, (err) => {
|
||||
if (err) {
|
||||
// If avatar doesn't exist, send a default 404 or generate initials
|
||||
console.log(`[Avatar Debug] Avatar file not found at path: ${avatarPath}, error:`, err.message);
|
||||
if (!res.headersSent) {
|
||||
res.status(404).json({ error: 'Avatar not found' });
|
||||
}
|
||||
} else {
|
||||
console.log(`[Avatar Debug] Successfully served avatar from: ${avatarPath}`);
|
||||
}
|
||||
});
|
||||
|
||||
} catch (error) {
|
||||
console.error('Error serving avatar:', error);
|
||||
res.status(500).json({ error: 'Server error' });
|
||||
}
|
||||
});
|
||||
|
||||
// Serve avatars statically as fallback
|
||||
router.use('/files/avatars', express.static(path.join(__dirname, 'db-convert/db/files/avatars')));
|
||||
|
||||
// Get all users for the "view as" dropdown (active and inactive)
|
||||
router.get('/users', async (req, res) => {
|
||||
try {
|
||||
const result = await global.pool.query(`
|
||||
SELECT id, username, name, type, active, status, lastlogin,
|
||||
statustext, utcoffset, statusconnection, mongo_id, avataretag
|
||||
FROM users
|
||||
WHERE type = 'user'
|
||||
ORDER BY
|
||||
active DESC, -- Active users first
|
||||
CASE
|
||||
WHEN status = 'online' THEN 1
|
||||
WHEN status = 'away' THEN 2
|
||||
WHEN status = 'busy' THEN 3
|
||||
ELSE 4
|
||||
END,
|
||||
name ASC
|
||||
`);
|
||||
|
||||
res.json({
|
||||
status: 'success',
|
||||
users: result.rows
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Error fetching users:', error);
|
||||
res.status(500).json({
|
||||
status: 'error',
|
||||
error: 'Failed to fetch users',
|
||||
details: error.message
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Get rooms for a specific user with enhanced room names for direct messages
|
||||
router.get('/users/:userId/rooms', async (req, res) => {
|
||||
const { userId } = req.params;
|
||||
|
||||
try {
|
||||
// Get the current user's mongo_id for filtering
|
||||
const userResult = await global.pool.query(`
|
||||
SELECT mongo_id, username FROM users WHERE id = $1
|
||||
`, [userId]);
|
||||
|
||||
if (userResult.rows.length === 0) {
|
||||
return res.status(404).json({
|
||||
status: 'error',
|
||||
error: 'User not found'
|
||||
});
|
||||
}
|
||||
|
||||
const currentUserMongoId = userResult.rows[0].mongo_id;
|
||||
const currentUsername = userResult.rows[0].username;
|
||||
|
||||
// Get rooms where the user is a member with proper naming from subscription table
|
||||
// Include archived and closed rooms but sort them at the bottom
|
||||
const result = await global.pool.query(`
|
||||
SELECT DISTINCT
|
||||
r.id,
|
||||
r.mongo_id as room_mongo_id,
|
||||
r.name,
|
||||
r.fname,
|
||||
r.t as type,
|
||||
r.msgs,
|
||||
r.lm as last_message_date,
|
||||
r.usernames,
|
||||
r.uids,
|
||||
r.userscount,
|
||||
r.description,
|
||||
r.teamid,
|
||||
r.archived,
|
||||
s.open,
|
||||
-- Use the subscription's name for direct messages (excludes current user)
|
||||
-- For channels/groups, use room's fname or name
|
||||
CASE
|
||||
WHEN r.t = 'd' THEN COALESCE(s.fname, s.name, 'Unknown User')
|
||||
ELSE COALESCE(r.fname, r.name, 'Unnamed Room')
|
||||
END as display_name
|
||||
FROM room r
|
||||
JOIN subscription s ON s.rid = r.mongo_id
|
||||
WHERE s.u->>'_id' = $1
|
||||
ORDER BY
|
||||
s.open DESC NULLS LAST, -- Open rooms first
|
||||
r.archived NULLS FIRST, -- Non-archived first (nulls treated as false)
|
||||
r.lm DESC NULLS LAST
|
||||
LIMIT 50
|
||||
`, [currentUserMongoId]);
|
||||
|
||||
// Enhance rooms with participant information for direct messages
|
||||
const enhancedRooms = await Promise.all(result.rows.map(async (room) => {
|
||||
if (room.type === 'd' && room.uids) {
|
||||
// Get participant info (excluding current user) for direct messages
|
||||
const participantResult = await global.pool.query(`
|
||||
SELECT u.username, u.name, u.mongo_id, u.avataretag
|
||||
FROM users u
|
||||
WHERE u.mongo_id = ANY($1::text[])
|
||||
AND u.mongo_id != $2
|
||||
`, [room.uids, currentUserMongoId]);
|
||||
|
||||
room.participants = participantResult.rows;
|
||||
}
|
||||
return room;
|
||||
}));
|
||||
|
||||
res.json({
|
||||
status: 'success',
|
||||
rooms: enhancedRooms
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Error fetching user rooms:', error);
|
||||
res.status(500).json({
|
||||
status: 'error',
|
||||
error: 'Failed to fetch user rooms',
|
||||
details: error.message
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Get room details including participants
|
||||
router.get('/rooms/:roomId', async (req, res) => {
|
||||
const { roomId } = req.params;
|
||||
const { userId } = req.query; // Accept current user ID as query parameter
|
||||
|
||||
try {
|
||||
const result = await global.pool.query(`
|
||||
SELECT r.id, r.name, r.fname, r.t as type, r.msgs, r.description,
|
||||
r.lm as last_message_date, r.usernames, r.uids, r.userscount, r.teamid
|
||||
FROM room r
|
||||
WHERE r.id = $1
|
||||
`, [roomId]);
|
||||
|
||||
if (result.rows.length === 0) {
|
||||
return res.status(404).json({
|
||||
status: 'error',
|
||||
error: 'Room not found'
|
||||
});
|
||||
}
|
||||
|
||||
const room = result.rows[0];
|
||||
|
||||
// For direct messages, get the proper display name based on current user
|
||||
if (room.type === 'd' && room.uids && userId) {
|
||||
// Get current user's mongo_id
|
||||
const userResult = await global.pool.query(`
|
||||
SELECT mongo_id FROM users WHERE id = $1
|
||||
`, [userId]);
|
||||
|
||||
if (userResult.rows.length > 0) {
|
||||
const currentUserMongoId = userResult.rows[0].mongo_id;
|
||||
|
||||
// Get display name from subscription table for this user
|
||||
// Use room mongo_id to match with subscription.rid
|
||||
const roomMongoResult = await global.pool.query(`
|
||||
SELECT mongo_id FROM room WHERE id = $1
|
||||
`, [roomId]);
|
||||
|
||||
if (roomMongoResult.rows.length > 0) {
|
||||
const roomMongoId = roomMongoResult.rows[0].mongo_id;
|
||||
|
||||
const subscriptionResult = await global.pool.query(`
|
||||
SELECT fname, name FROM subscription
|
||||
WHERE rid = $1 AND u->>'_id' = $2
|
||||
`, [roomMongoId, currentUserMongoId]);
|
||||
|
||||
if (subscriptionResult.rows.length > 0) {
|
||||
const sub = subscriptionResult.rows[0];
|
||||
room.display_name = sub.fname || sub.name || 'Unknown User';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get all participants for additional info
|
||||
const participantResult = await global.pool.query(`
|
||||
SELECT username, name
|
||||
FROM users
|
||||
WHERE mongo_id = ANY($1::text[])
|
||||
`, [room.uids]);
|
||||
|
||||
room.participants = participantResult.rows;
|
||||
} else {
|
||||
// For channels/groups, use room's fname or name
|
||||
room.display_name = room.fname || room.name || 'Unnamed Room';
|
||||
}
|
||||
|
||||
res.json({
|
||||
status: 'success',
|
||||
room: room
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Error fetching room details:', error);
|
||||
res.status(500).json({
|
||||
status: 'error',
|
||||
error: 'Failed to fetch room details',
|
||||
details: error.message
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Get messages for a specific room (fast, without attachments)
|
||||
router.get('/rooms/:roomId/messages', async (req, res) => {
|
||||
const { roomId } = req.params;
|
||||
const { limit = 50, offset = 0, before } = req.query;
|
||||
|
||||
try {
|
||||
// Fast query - just get messages without expensive attachment joins
|
||||
let query = `
|
||||
SELECT m.id, m.msg, m.ts, m.u, m._updatedat, m.urls, m.mentions, m.md
|
||||
FROM message m
|
||||
JOIN room r ON m.rid = r.mongo_id
|
||||
WHERE r.id = $1
|
||||
`;
|
||||
|
||||
const params = [roomId];
|
||||
|
||||
if (before) {
|
||||
query += ` AND m.ts < $${params.length + 1}`;
|
||||
params.push(before);
|
||||
}
|
||||
|
||||
query += ` ORDER BY m.ts DESC LIMIT $${params.length + 1} OFFSET $${params.length + 2}`;
|
||||
params.push(limit, offset);
|
||||
|
||||
const result = await global.pool.query(query, params);
|
||||
|
||||
// Add empty attachments array for now - attachments will be loaded separately if needed
|
||||
const messages = result.rows.map(msg => ({
|
||||
...msg,
|
||||
attachments: []
|
||||
}));
|
||||
|
||||
res.json({
|
||||
status: 'success',
|
||||
messages: messages.reverse() // Reverse to show oldest first
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Error fetching messages:', error);
|
||||
res.status(500).json({
|
||||
status: 'error',
|
||||
error: 'Failed to fetch messages',
|
||||
details: error.message
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Get attachments for specific messages (called separately for performance)
|
||||
router.post('/messages/attachments', async (req, res) => {
|
||||
const { messageIds } = req.body;
|
||||
|
||||
if (!messageIds || !Array.isArray(messageIds) || messageIds.length === 0) {
|
||||
return res.json({ status: 'success', attachments: {} });
|
||||
}
|
||||
|
||||
try {
|
||||
// Get room mongo_id from first message to limit search scope
|
||||
const roomQuery = await global.pool.query(`
|
||||
SELECT r.mongo_id as room_mongo_id
|
||||
FROM message m
|
||||
JOIN room r ON m.rid = r.mongo_id
|
||||
WHERE m.id = $1
|
||||
LIMIT 1
|
||||
`, [messageIds[0]]);
|
||||
|
||||
if (roomQuery.rows.length === 0) {
|
||||
return res.json({ status: 'success', attachments: {} });
|
||||
}
|
||||
|
||||
const roomMongoId = roomQuery.rows[0].room_mongo_id;
|
||||
|
||||
// Get messages and their upload timestamps
|
||||
const messagesQuery = await global.pool.query(`
|
||||
SELECT m.id, m.ts, m.u->>'_id' as user_id
|
||||
FROM message m
|
||||
WHERE m.id = ANY($1::int[])
|
||||
`, [messageIds]);
|
||||
|
||||
if (messagesQuery.rows.length === 0) {
|
||||
return res.json({ status: 'success', attachments: {} });
|
||||
}
|
||||
|
||||
// Build a map of user_id -> array of message timestamps for efficient lookup
|
||||
const userTimeMap = {};
|
||||
const messageMap = {};
|
||||
messagesQuery.rows.forEach(msg => {
|
||||
if (!userTimeMap[msg.user_id]) {
|
||||
userTimeMap[msg.user_id] = [];
|
||||
}
|
||||
userTimeMap[msg.user_id].push(msg.ts);
|
||||
messageMap[msg.id] = { ts: msg.ts, user_id: msg.user_id };
|
||||
});
|
||||
|
||||
// Get attachments for this room and these users
|
||||
const uploadsQuery = await global.pool.query(`
|
||||
SELECT mongo_id, name, size, type, url, path, typegroup, identify,
|
||||
userid, uploadedat
|
||||
FROM uploads
|
||||
WHERE rid = $1
|
||||
AND userid = ANY($2::text[])
|
||||
ORDER BY uploadedat
|
||||
`, [roomMongoId, Object.keys(userTimeMap)]);
|
||||
|
||||
// Match attachments to messages based on timestamp proximity (within 5 minutes)
|
||||
const attachmentsByMessage = {};
|
||||
|
||||
uploadsQuery.rows.forEach(upload => {
|
||||
const uploadTime = new Date(upload.uploadedat).getTime();
|
||||
|
||||
// Find the closest message from this user within 5 minutes
|
||||
let closestMessageId = null;
|
||||
let closestTimeDiff = Infinity;
|
||||
|
||||
Object.entries(messageMap).forEach(([msgId, msgData]) => {
|
||||
if (msgData.user_id === upload.userid) {
|
||||
const msgTime = new Date(msgData.ts).getTime();
|
||||
const timeDiff = Math.abs(uploadTime - msgTime);
|
||||
|
||||
if (timeDiff < 300000 && timeDiff < closestTimeDiff) { // 5 minutes = 300000ms
|
||||
closestMessageId = msgId;
|
||||
closestTimeDiff = timeDiff;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (closestMessageId) {
|
||||
if (!attachmentsByMessage[closestMessageId]) {
|
||||
attachmentsByMessage[closestMessageId] = [];
|
||||
}
|
||||
|
||||
attachmentsByMessage[closestMessageId].push({
|
||||
id: upload.id,
|
||||
mongo_id: upload.mongo_id,
|
||||
name: upload.name,
|
||||
size: upload.size,
|
||||
type: upload.type,
|
||||
url: upload.url,
|
||||
path: upload.path,
|
||||
typegroup: upload.typegroup,
|
||||
identify: upload.identify
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
res.json({
|
||||
status: 'success',
|
||||
attachments: attachmentsByMessage
|
||||
});
|
||||
|
||||
} catch (error) {
|
||||
console.error('Error fetching message attachments:', error);
|
||||
res.status(500).json({
|
||||
status: 'error',
|
||||
error: 'Failed to fetch attachments',
|
||||
details: error.message
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Search messages in accessible rooms for a user
|
||||
router.get('/users/:userId/search', async (req, res) => {
|
||||
const { userId } = req.params;
|
||||
const { q, limit = 20 } = req.query;
|
||||
|
||||
if (!q || q.length < 2) {
|
||||
return res.status(400).json({
|
||||
status: 'error',
|
||||
error: 'Search query must be at least 2 characters'
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
const userResult = await global.pool.query(`
|
||||
SELECT mongo_id FROM users WHERE id = $1
|
||||
`, [userId]);
|
||||
|
||||
if (userResult.rows.length === 0) {
|
||||
return res.status(404).json({
|
||||
status: 'error',
|
||||
error: 'User not found'
|
||||
});
|
||||
}
|
||||
|
||||
const currentUserMongoId = userResult.rows[0].mongo_id;
|
||||
|
||||
const result = await global.pool.query(`
|
||||
SELECT m.id, m.msg, m.ts, m.u, r.id as room_id, r.name as room_name, r.fname as room_fname, r.t as room_type
|
||||
FROM message m
|
||||
JOIN room r ON m.rid = r.mongo_id
|
||||
JOIN subscription s ON s.rid = r.mongo_id AND s.u->>'_id' = $1
|
||||
WHERE m.msg ILIKE $2
|
||||
AND r.archived IS NOT TRUE
|
||||
ORDER BY m.ts DESC
|
||||
LIMIT $3
|
||||
`, [currentUserMongoId, `%${q}%`, limit]);
|
||||
|
||||
res.json({
|
||||
status: 'success',
|
||||
results: result.rows
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Error searching messages:', error);
|
||||
res.status(500).json({
|
||||
status: 'error',
|
||||
error: 'Failed to search messages',
|
||||
details: error.message
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
module.exports = router;
|
||||
83
inventory-server/chat/server.js
Normal file
83
inventory-server/chat/server.js
Normal file
@@ -0,0 +1,83 @@
|
||||
require('dotenv').config({ path: '../.env' });
|
||||
const express = require('express');
|
||||
const cors = require('cors');
|
||||
const { Pool } = require('pg');
|
||||
const morgan = require('morgan');
|
||||
const chatRoutes = require('./routes');
|
||||
|
||||
// Log startup configuration
|
||||
console.log('Starting chat server with config:', {
|
||||
host: process.env.CHAT_DB_HOST,
|
||||
user: process.env.CHAT_DB_USER,
|
||||
database: process.env.CHAT_DB_NAME || 'rocketchat_converted',
|
||||
port: process.env.CHAT_DB_PORT,
|
||||
chat_port: process.env.CHAT_PORT || 3014
|
||||
});
|
||||
|
||||
const app = express();
|
||||
const port = process.env.CHAT_PORT || 3014;
|
||||
|
||||
// Database configuration for rocketchat_converted database
|
||||
const pool = new Pool({
|
||||
host: process.env.CHAT_DB_HOST,
|
||||
user: process.env.CHAT_DB_USER,
|
||||
password: process.env.CHAT_DB_PASSWORD,
|
||||
database: process.env.CHAT_DB_NAME || 'rocketchat_converted',
|
||||
port: process.env.CHAT_DB_PORT,
|
||||
});
|
||||
|
||||
// Make pool available globally
|
||||
global.pool = pool;
|
||||
|
||||
// Middleware
|
||||
app.use(express.json());
|
||||
app.use(morgan('combined'));
|
||||
app.use(cors({
|
||||
origin: ['http://localhost:5175', 'http://localhost:5174', 'https://inventory.kent.pw'],
|
||||
credentials: true
|
||||
}));
|
||||
|
||||
// Test database connection endpoint
|
||||
app.get('/test-db', async (req, res) => {
|
||||
try {
|
||||
const result = await pool.query('SELECT COUNT(*) as user_count FROM users WHERE active = true');
|
||||
const messageResult = await pool.query('SELECT COUNT(*) as message_count FROM message');
|
||||
const roomResult = await pool.query('SELECT COUNT(*) as room_count FROM room');
|
||||
|
||||
res.json({
|
||||
status: 'success',
|
||||
database: 'rocketchat_converted',
|
||||
stats: {
|
||||
active_users: parseInt(result.rows[0].user_count),
|
||||
total_messages: parseInt(messageResult.rows[0].message_count),
|
||||
total_rooms: parseInt(roomResult.rows[0].room_count)
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Database test error:', error);
|
||||
res.status(500).json({
|
||||
status: 'error',
|
||||
error: 'Database connection failed',
|
||||
details: error.message
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Mount all routes from routes.js
|
||||
app.use('/', chatRoutes);
|
||||
|
||||
// Health check endpoint
|
||||
app.get('/health', (req, res) => {
|
||||
res.json({ status: 'healthy' });
|
||||
});
|
||||
|
||||
// Error handling middleware
|
||||
app.use((err, req, res, next) => {
|
||||
console.error(err.stack);
|
||||
res.status(500).json({ error: 'Something broke!' });
|
||||
});
|
||||
|
||||
// Start server
|
||||
app.listen(port, () => {
|
||||
console.log(`Chat server running on port ${port}`);
|
||||
});
|
||||
196
inventory-server/db/config-schema-new.sql
Normal file
196
inventory-server/db/config-schema-new.sql
Normal file
@@ -0,0 +1,196 @@
|
||||
-- Create function for updating timestamps if it doesn't exist
|
||||
CREATE OR REPLACE FUNCTION update_updated_column()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
-- Create function for updating updated_at timestamps
|
||||
CREATE OR REPLACE FUNCTION update_updated_at_column()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
-- Drop tables in reverse order of dependency
|
||||
DROP TABLE IF EXISTS public.settings_product CASCADE;
|
||||
DROP TABLE IF EXISTS public.settings_vendor CASCADE;
|
||||
DROP TABLE IF EXISTS public.settings_global CASCADE;
|
||||
|
||||
-- Table Definition: settings_global
|
||||
CREATE TABLE public.settings_global (
|
||||
setting_key VARCHAR PRIMARY KEY,
|
||||
setting_value VARCHAR NOT NULL,
|
||||
description TEXT,
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- Table Definition: settings_vendor
|
||||
CREATE TABLE public.settings_vendor (
|
||||
vendor VARCHAR PRIMARY KEY, -- Matches products.vendor
|
||||
default_lead_time_days INT,
|
||||
default_days_of_stock INT,
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
-- Index for faster lookups if needed (PK usually sufficient)
|
||||
-- CREATE INDEX idx_settings_vendor_vendor ON public.settings_vendor(vendor);
|
||||
|
||||
-- Table Definition: settings_product
|
||||
CREATE TABLE public.settings_product (
|
||||
pid INT8 PRIMARY KEY,
|
||||
lead_time_days INT, -- Overrides vendor/global
|
||||
days_of_stock INT, -- Overrides vendor/global
|
||||
safety_stock INT DEFAULT 0, -- Minimum desired stock level
|
||||
forecast_method VARCHAR DEFAULT 'standard', -- e.g., 'standard', 'seasonal'
|
||||
exclude_from_forecast BOOLEAN DEFAULT FALSE,
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
CONSTRAINT fk_settings_product_pid FOREIGN KEY (pid) REFERENCES public.products(pid) ON DELETE CASCADE ON UPDATE CASCADE
|
||||
);
|
||||
|
||||
|
||||
-- Description: Inserts or updates standard default global settings.
|
||||
-- Safe to rerun; will update existing keys with these default values.
|
||||
-- Dependencies: `settings_global` table must exist.
|
||||
-- Frequency: Run once initially, or rerun if you want to reset global defaults.
|
||||
|
||||
INSERT INTO public.settings_global (setting_key, setting_value, description) VALUES
|
||||
('abc_revenue_threshold_a', '0.80', 'Revenue percentage for Class A (cumulative)'),
|
||||
('abc_revenue_threshold_b', '0.95', 'Revenue percentage for Class B (cumulative)'),
|
||||
('abc_calculation_basis', 'revenue_30d', 'Metric for ABC calc (revenue_30d, sales_30d, lifetime_revenue)'),
|
||||
('abc_calculation_period', '30', 'Days period for ABC calculation if not lifetime'),
|
||||
('default_forecast_method', 'standard', 'Default forecast method (standard, seasonal)'),
|
||||
('default_lead_time_days', '14', 'Global default lead time in days'),
|
||||
('default_days_of_stock', '30', 'Global default days of stock coverage target'),
|
||||
-- Set default safety stock to 0 units. Can be overridden per product.
|
||||
-- If you wanted safety stock in days, you'd store 'days' here and calculate units later.
|
||||
('default_safety_stock_units', '0', 'Global default safety stock in units')
|
||||
ON CONFLICT (setting_key) DO UPDATE SET
|
||||
setting_value = EXCLUDED.setting_value,
|
||||
description = EXCLUDED.description,
|
||||
updated_at = CURRENT_TIMESTAMP; -- Update timestamp if default value changes
|
||||
|
||||
|
||||
|
||||
-- Description: Creates placeholder rows in `settings_vendor` for each unique vendor
|
||||
-- found in the `products` table. Does NOT set specific overrides.
|
||||
-- Safe to rerun; will NOT overwrite existing vendor settings.
|
||||
-- Dependencies: `settings_vendor` table must exist, `products` table populated.
|
||||
-- Frequency: Run once after initial product load, or periodically if new vendors are added.
|
||||
|
||||
INSERT INTO public.settings_vendor (
|
||||
vendor,
|
||||
default_lead_time_days,
|
||||
default_days_of_stock
|
||||
-- updated_at will use its default CURRENT_TIMESTAMP on insert
|
||||
)
|
||||
SELECT
|
||||
DISTINCT p.vendor,
|
||||
-- Explicitly cast NULL to INTEGER to resolve type mismatch
|
||||
CAST(NULL AS INTEGER),
|
||||
CAST(NULL AS INTEGER)
|
||||
FROM
|
||||
public.products p
|
||||
WHERE
|
||||
p.vendor IS NOT NULL
|
||||
AND p.vendor <> '' -- Exclude blank vendors if necessary
|
||||
|
||||
ON CONFLICT (vendor) DO NOTHING; -- IMPORTANT: Do not overwrite existing vendor settings
|
||||
|
||||
SELECT COUNT(*) FROM public.settings_vendor; -- Verify rows were inserted
|
||||
|
||||
|
||||
-- Description: Creates placeholder rows in `settings_product` for each unique product
|
||||
-- found in the `products` table. Sets basic defaults but no specific overrides.
|
||||
-- Safe to rerun; will NOT overwrite existing product settings.
|
||||
-- Dependencies: `settings_product` table must exist, `products` table populated.
|
||||
-- Frequency: Run once after initial product load, or periodically if new products are added.
|
||||
|
||||
INSERT INTO public.settings_product (
|
||||
pid,
|
||||
lead_time_days, -- NULL = Inherit from Vendor/Global
|
||||
days_of_stock, -- NULL = Inherit from Vendor/Global
|
||||
safety_stock, -- Default to 0 units initially
|
||||
forecast_method, -- NULL = Inherit from Global ('standard')
|
||||
exclude_from_forecast -- Default to FALSE
|
||||
-- updated_at will use its default CURRENT_TIMESTAMP on insert
|
||||
)
|
||||
SELECT
|
||||
p.pid,
|
||||
CAST(NULL AS INTEGER), -- Explicitly cast NULL to INTEGER
|
||||
CAST(NULL AS INTEGER), -- Explicitly cast NULL to INTEGER
|
||||
COALESCE((SELECT setting_value::int FROM settings_global WHERE setting_key = 'default_safety_stock_units'), 0), -- Use global default safety stock units
|
||||
CAST(NULL AS VARCHAR), -- Cast NULL to VARCHAR for forecast_method (already varchar, but explicit)
|
||||
FALSE -- Default: Include in forecast
|
||||
FROM
|
||||
public.products p
|
||||
|
||||
ON CONFLICT (pid) DO NOTHING; -- IMPORTANT: Do not overwrite existing product-specific settings
|
||||
|
||||
|
||||
-- History and status tables
|
||||
CREATE TABLE IF NOT EXISTS calculate_history (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
start_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
end_time TIMESTAMP WITH TIME ZONE NULL,
|
||||
duration_seconds INTEGER,
|
||||
duration_minutes DECIMAL(10,2) GENERATED ALWAYS AS (duration_seconds::decimal / 60.0) STORED,
|
||||
total_products INTEGER DEFAULT 0,
|
||||
total_orders INTEGER DEFAULT 0,
|
||||
total_purchase_orders INTEGER DEFAULT 0,
|
||||
processed_products INTEGER DEFAULT 0,
|
||||
processed_orders INTEGER DEFAULT 0,
|
||||
processed_purchase_orders INTEGER DEFAULT 0,
|
||||
status calculation_status DEFAULT 'running',
|
||||
error_message TEXT,
|
||||
additional_info JSONB
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS calculate_status (
|
||||
module_name text PRIMARY KEY,
|
||||
last_calculation_timestamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS sync_status (
|
||||
table_name TEXT PRIMARY KEY,
|
||||
last_sync_timestamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
last_sync_id BIGINT
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS import_history (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
table_name VARCHAR(50) NOT NULL,
|
||||
start_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
end_time TIMESTAMP WITH TIME ZONE NULL,
|
||||
duration_seconds INTEGER,
|
||||
duration_minutes DECIMAL(10,2) GENERATED ALWAYS AS (duration_seconds::decimal / 60.0) STORED,
|
||||
records_added INTEGER DEFAULT 0,
|
||||
records_updated INTEGER DEFAULT 0,
|
||||
records_deleted INTEGER DEFAULT 0,
|
||||
records_skipped INTEGER DEFAULT 0,
|
||||
total_processed INTEGER DEFAULT 0,
|
||||
is_incremental BOOLEAN DEFAULT FALSE,
|
||||
status calculation_status DEFAULT 'running',
|
||||
error_message TEXT,
|
||||
additional_info JSONB
|
||||
);
|
||||
|
||||
-- Create all indexes after tables are fully created
|
||||
CREATE INDEX IF NOT EXISTS idx_last_calc ON calculate_status(last_calculation_timestamp);
|
||||
CREATE INDEX IF NOT EXISTS idx_last_sync ON sync_status(last_sync_timestamp);
|
||||
CREATE INDEX IF NOT EXISTS idx_table_time ON import_history(table_name, start_time);
|
||||
CREATE INDEX IF NOT EXISTS idx_import_history_status ON import_history(status);
|
||||
CREATE INDEX IF NOT EXISTS idx_calculate_history_status ON calculate_history(status);
|
||||
|
||||
-- Add comments for documentation
|
||||
COMMENT ON TABLE import_history IS 'Tracks history of data import operations with detailed statistics';
|
||||
COMMENT ON COLUMN import_history.records_deleted IS 'Number of records deleted during this import';
|
||||
COMMENT ON COLUMN import_history.records_skipped IS 'Number of records skipped (e.g., unchanged, invalid)';
|
||||
COMMENT ON COLUMN import_history.total_processed IS 'Total number of records examined/processed, including skipped';
|
||||
|
||||
COMMENT ON TABLE calculate_history IS 'Tracks history of metrics calculation runs with performance data';
|
||||
COMMENT ON COLUMN calculate_history.duration_seconds IS 'Total duration of the calculation in seconds';
|
||||
COMMENT ON COLUMN calculate_history.additional_info IS 'JSON object containing step timings, row counts, and other detailed metrics';
|
||||
@@ -1,229 +0,0 @@
|
||||
-- Configuration tables schema
|
||||
|
||||
-- Stock threshold configurations
|
||||
CREATE TABLE IF NOT EXISTS stock_thresholds (
|
||||
id INT NOT NULL,
|
||||
category_id BIGINT, -- NULL means default/global threshold
|
||||
vendor VARCHAR(100), -- NULL means applies to all vendors
|
||||
critical_days INT NOT NULL DEFAULT 7,
|
||||
reorder_days INT NOT NULL DEFAULT 14,
|
||||
overstock_days INT NOT NULL DEFAULT 90,
|
||||
low_stock_threshold INT NOT NULL DEFAULT 5,
|
||||
min_reorder_quantity INT NOT NULL DEFAULT 1,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (id),
|
||||
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
|
||||
UNIQUE KEY unique_category_vendor (category_id, vendor),
|
||||
INDEX idx_st_metrics (category_id, vendor)
|
||||
);
|
||||
|
||||
-- Lead time threshold configurations
|
||||
CREATE TABLE IF NOT EXISTS lead_time_thresholds (
|
||||
id INT NOT NULL,
|
||||
category_id BIGINT, -- NULL means default/global threshold
|
||||
vendor VARCHAR(100), -- NULL means applies to all vendors
|
||||
target_days INT NOT NULL DEFAULT 14,
|
||||
warning_days INT NOT NULL DEFAULT 21,
|
||||
critical_days INT NOT NULL DEFAULT 30,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (id),
|
||||
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
|
||||
UNIQUE KEY unique_category_vendor (category_id, vendor)
|
||||
);
|
||||
|
||||
-- Sales velocity window configurations
|
||||
CREATE TABLE IF NOT EXISTS sales_velocity_config (
|
||||
id INT NOT NULL,
|
||||
category_id BIGINT, -- NULL means default/global threshold
|
||||
vendor VARCHAR(100), -- NULL means applies to all vendors
|
||||
daily_window_days INT NOT NULL DEFAULT 30,
|
||||
weekly_window_days INT NOT NULL DEFAULT 7,
|
||||
monthly_window_days INT NOT NULL DEFAULT 90,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (id),
|
||||
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
|
||||
UNIQUE KEY unique_category_vendor (category_id, vendor),
|
||||
INDEX idx_sv_metrics (category_id, vendor)
|
||||
);
|
||||
|
||||
-- ABC Classification configurations
|
||||
CREATE TABLE IF NOT EXISTS abc_classification_config (
|
||||
id INT NOT NULL PRIMARY KEY,
|
||||
a_threshold DECIMAL(5,2) NOT NULL DEFAULT 20.0,
|
||||
b_threshold DECIMAL(5,2) NOT NULL DEFAULT 50.0,
|
||||
classification_period_days INT NOT NULL DEFAULT 90,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- Safety stock configurations
|
||||
CREATE TABLE IF NOT EXISTS safety_stock_config (
|
||||
id INT NOT NULL,
|
||||
category_id BIGINT, -- NULL means default/global threshold
|
||||
vendor VARCHAR(100), -- NULL means applies to all vendors
|
||||
coverage_days INT NOT NULL DEFAULT 14,
|
||||
service_level DECIMAL(5,2) NOT NULL DEFAULT 95.0,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (id),
|
||||
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
|
||||
UNIQUE KEY unique_category_vendor (category_id, vendor),
|
||||
INDEX idx_ss_metrics (category_id, vendor)
|
||||
);
|
||||
|
||||
-- Turnover rate configurations
|
||||
CREATE TABLE IF NOT EXISTS turnover_config (
|
||||
id INT NOT NULL,
|
||||
category_id BIGINT, -- NULL means default/global threshold
|
||||
vendor VARCHAR(100), -- NULL means applies to all vendors
|
||||
calculation_period_days INT NOT NULL DEFAULT 30,
|
||||
target_rate DECIMAL(10,2) NOT NULL DEFAULT 1.0,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (id),
|
||||
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
|
||||
UNIQUE KEY unique_category_vendor (category_id, vendor)
|
||||
);
|
||||
|
||||
-- Create table for sales seasonality factors
|
||||
CREATE TABLE IF NOT EXISTS sales_seasonality (
|
||||
month INT NOT NULL,
|
||||
seasonality_factor DECIMAL(5,3) DEFAULT 0,
|
||||
last_updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (month),
|
||||
CHECK (month BETWEEN 1 AND 12),
|
||||
CHECK (seasonality_factor BETWEEN -1.0 AND 1.0)
|
||||
);
|
||||
|
||||
-- Insert default global thresholds if not exists
|
||||
INSERT INTO stock_thresholds (id, category_id, vendor, critical_days, reorder_days, overstock_days)
|
||||
VALUES (1, NULL, NULL, 7, 14, 90)
|
||||
ON DUPLICATE KEY UPDATE
|
||||
critical_days = VALUES(critical_days),
|
||||
reorder_days = VALUES(reorder_days),
|
||||
overstock_days = VALUES(overstock_days);
|
||||
|
||||
INSERT INTO lead_time_thresholds (id, category_id, vendor, target_days, warning_days, critical_days)
|
||||
VALUES (1, NULL, NULL, 14, 21, 30)
|
||||
ON DUPLICATE KEY UPDATE
|
||||
target_days = VALUES(target_days),
|
||||
warning_days = VALUES(warning_days),
|
||||
critical_days = VALUES(critical_days);
|
||||
|
||||
INSERT INTO sales_velocity_config (id, category_id, vendor, daily_window_days, weekly_window_days, monthly_window_days)
|
||||
VALUES (1, NULL, NULL, 30, 7, 90)
|
||||
ON DUPLICATE KEY UPDATE
|
||||
daily_window_days = VALUES(daily_window_days),
|
||||
weekly_window_days = VALUES(weekly_window_days),
|
||||
monthly_window_days = VALUES(monthly_window_days);
|
||||
|
||||
INSERT INTO abc_classification_config (id, a_threshold, b_threshold, classification_period_days)
|
||||
VALUES (1, 20.0, 50.0, 90)
|
||||
ON DUPLICATE KEY UPDATE
|
||||
a_threshold = VALUES(a_threshold),
|
||||
b_threshold = VALUES(b_threshold),
|
||||
classification_period_days = VALUES(classification_period_days);
|
||||
|
||||
INSERT INTO safety_stock_config (id, category_id, vendor, coverage_days, service_level)
|
||||
VALUES (1, NULL, NULL, 14, 95.0)
|
||||
ON DUPLICATE KEY UPDATE
|
||||
coverage_days = VALUES(coverage_days),
|
||||
service_level = VALUES(service_level);
|
||||
|
||||
INSERT INTO turnover_config (id, category_id, vendor, calculation_period_days, target_rate)
|
||||
VALUES (1, NULL, NULL, 30, 1.0)
|
||||
ON DUPLICATE KEY UPDATE
|
||||
calculation_period_days = VALUES(calculation_period_days),
|
||||
target_rate = VALUES(target_rate);
|
||||
|
||||
-- Insert default seasonality factors (neutral)
|
||||
INSERT INTO sales_seasonality (month, seasonality_factor)
|
||||
VALUES
|
||||
(1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0),
|
||||
(7, 0), (8, 0), (9, 0), (10, 0), (11, 0), (12, 0)
|
||||
ON DUPLICATE KEY UPDATE last_updated = CURRENT_TIMESTAMP;
|
||||
|
||||
-- View to show thresholds with category names
|
||||
CREATE OR REPLACE VIEW stock_thresholds_view AS
|
||||
SELECT
|
||||
st.*,
|
||||
c.name as category_name,
|
||||
CASE
|
||||
WHEN st.category_id IS NULL AND st.vendor IS NULL THEN 'Global Default'
|
||||
WHEN st.category_id IS NULL THEN CONCAT('Vendor: ', st.vendor)
|
||||
WHEN st.vendor IS NULL THEN CONCAT('Category: ', c.name)
|
||||
ELSE CONCAT('Category: ', c.name, ' / Vendor: ', st.vendor)
|
||||
END as threshold_scope
|
||||
FROM
|
||||
stock_thresholds st
|
||||
LEFT JOIN
|
||||
categories c ON st.category_id = c.cat_id
|
||||
ORDER BY
|
||||
CASE
|
||||
WHEN st.category_id IS NULL AND st.vendor IS NULL THEN 1
|
||||
WHEN st.category_id IS NULL THEN 2
|
||||
WHEN st.vendor IS NULL THEN 3
|
||||
ELSE 4
|
||||
END,
|
||||
c.name,
|
||||
st.vendor;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS calculate_history (
|
||||
id BIGINT AUTO_INCREMENT PRIMARY KEY,
|
||||
start_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
end_time TIMESTAMP NULL,
|
||||
duration_seconds INT,
|
||||
duration_minutes DECIMAL(10,2) GENERATED ALWAYS AS (duration_seconds / 60.0) STORED,
|
||||
total_products INT DEFAULT 0,
|
||||
total_orders INT DEFAULT 0,
|
||||
total_purchase_orders INT DEFAULT 0,
|
||||
processed_products INT DEFAULT 0,
|
||||
processed_orders INT DEFAULT 0,
|
||||
processed_purchase_orders INT DEFAULT 0,
|
||||
status ENUM('running', 'completed', 'failed', 'cancelled') DEFAULT 'running',
|
||||
error_message TEXT,
|
||||
additional_info JSON,
|
||||
INDEX idx_status_time (status, start_time)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS calculate_status (
|
||||
module_name ENUM(
|
||||
'product_metrics',
|
||||
'time_aggregates',
|
||||
'financial_metrics',
|
||||
'vendor_metrics',
|
||||
'category_metrics',
|
||||
'brand_metrics',
|
||||
'sales_forecasts',
|
||||
'abc_classification'
|
||||
) PRIMARY KEY,
|
||||
last_calculation_timestamp TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
INDEX idx_last_calc (last_calculation_timestamp)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS sync_status (
|
||||
table_name VARCHAR(50) PRIMARY KEY,
|
||||
last_sync_timestamp TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
last_sync_id BIGINT,
|
||||
INDEX idx_last_sync (last_sync_timestamp)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS import_history (
|
||||
id BIGINT AUTO_INCREMENT PRIMARY KEY,
|
||||
table_name VARCHAR(50) NOT NULL,
|
||||
start_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
end_time TIMESTAMP NULL,
|
||||
duration_seconds INT,
|
||||
duration_minutes DECIMAL(10,2) GENERATED ALWAYS AS (duration_seconds / 60.0) STORED,
|
||||
records_added INT DEFAULT 0,
|
||||
records_updated INT DEFAULT 0,
|
||||
is_incremental BOOLEAN DEFAULT FALSE,
|
||||
status ENUM('running', 'completed', 'failed', 'cancelled') DEFAULT 'running',
|
||||
error_message TEXT,
|
||||
additional_info JSON,
|
||||
INDEX idx_table_time (table_name, start_time),
|
||||
INDEX idx_status (status)
|
||||
);
|
||||
344
inventory-server/db/metrics-schema-new.sql
Normal file
344
inventory-server/db/metrics-schema-new.sql
Normal file
@@ -0,0 +1,344 @@
|
||||
-- Drop tables in reverse order of dependency
|
||||
DROP TABLE IF EXISTS public.product_metrics CASCADE;
|
||||
DROP TABLE IF EXISTS public.daily_product_snapshots CASCADE;
|
||||
|
||||
-- Table Definition: daily_product_snapshots
|
||||
CREATE TABLE public.daily_product_snapshots (
|
||||
snapshot_date DATE NOT NULL,
|
||||
pid INT8 NOT NULL,
|
||||
sku VARCHAR, -- Copied for convenience
|
||||
|
||||
-- Inventory Metrics (End of Day / Last Snapshot of Day)
|
||||
eod_stock_quantity INT NOT NULL DEFAULT 0,
|
||||
eod_stock_cost NUMERIC(14, 4) NOT NULL DEFAULT 0.00, -- Increased precision
|
||||
eod_stock_retail NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
|
||||
eod_stock_gross NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
|
||||
stockout_flag BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
|
||||
-- Sales Metrics (Aggregated for the snapshot_date)
|
||||
units_sold INT NOT NULL DEFAULT 0,
|
||||
units_returned INT NOT NULL DEFAULT 0,
|
||||
gross_revenue NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
|
||||
discounts NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
|
||||
returns_revenue NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
|
||||
net_revenue NUMERIC(14, 4) NOT NULL DEFAULT 0.00, -- gross_revenue - discounts
|
||||
cogs NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
|
||||
gross_regular_revenue NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
|
||||
profit NUMERIC(14, 4) NOT NULL DEFAULT 0.00, -- net_revenue - cogs
|
||||
|
||||
-- Receiving Metrics (Aggregated for the snapshot_date)
|
||||
units_received INT NOT NULL DEFAULT 0,
|
||||
cost_received NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
|
||||
|
||||
calculation_timestamp TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
|
||||
PRIMARY KEY (snapshot_date, pid) -- Composite primary key
|
||||
-- CONSTRAINT fk_daily_snapshot_pid FOREIGN KEY (pid) REFERENCES public.products(pid) ON DELETE CASCADE ON UPDATE CASCADE -- FK Optional on snapshot table
|
||||
);
|
||||
|
||||
-- Add Indexes for daily_product_snapshots
|
||||
CREATE INDEX idx_daily_snapshot_pid_date ON public.daily_product_snapshots(pid, snapshot_date); -- Useful for product-specific time series
|
||||
|
||||
|
||||
-- Table Definition: product_metrics
|
||||
CREATE TABLE public.product_metrics (
|
||||
pid INT8 PRIMARY KEY,
|
||||
last_calculated TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
|
||||
-- Product Info (Copied for convenience/performance)
|
||||
sku VARCHAR,
|
||||
title VARCHAR,
|
||||
brand VARCHAR,
|
||||
vendor VARCHAR,
|
||||
image_url VARCHAR, -- (e.g., products.image_175)
|
||||
is_visible BOOLEAN,
|
||||
is_replenishable BOOLEAN,
|
||||
|
||||
-- Additional product fields
|
||||
barcode VARCHAR,
|
||||
harmonized_tariff_code VARCHAR,
|
||||
vendor_reference VARCHAR,
|
||||
notions_reference VARCHAR,
|
||||
line VARCHAR,
|
||||
subline VARCHAR,
|
||||
artist VARCHAR,
|
||||
moq INT,
|
||||
rating NUMERIC(10, 2),
|
||||
reviews INT,
|
||||
weight NUMERIC(14, 4),
|
||||
length NUMERIC(14, 4),
|
||||
width NUMERIC(14, 4),
|
||||
height NUMERIC(14, 4),
|
||||
country_of_origin VARCHAR,
|
||||
location VARCHAR,
|
||||
baskets INT,
|
||||
notifies INT,
|
||||
preorder_count INT,
|
||||
notions_inv_count INT,
|
||||
|
||||
-- Current Status (Refreshed Hourly)
|
||||
current_price NUMERIC(10, 2),
|
||||
current_regular_price NUMERIC(10, 2),
|
||||
current_cost_price NUMERIC(10, 4), -- Increased precision for cost
|
||||
current_landing_cost_price NUMERIC(10, 4), -- Increased precision for cost
|
||||
current_stock INT NOT NULL DEFAULT 0,
|
||||
current_stock_cost NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
|
||||
current_stock_retail NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
|
||||
current_stock_gross NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
|
||||
on_order_qty INT NOT NULL DEFAULT 0,
|
||||
on_order_cost NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
|
||||
on_order_retail NUMERIC(14, 4) NOT NULL DEFAULT 0.00,
|
||||
earliest_expected_date DATE,
|
||||
-- total_received_lifetime INT NOT NULL DEFAULT 0, -- Can calc if needed
|
||||
|
||||
-- Historical Dates (Calculated Once/Periodically)
|
||||
date_created DATE,
|
||||
date_first_received DATE,
|
||||
date_last_received DATE,
|
||||
date_first_sold DATE,
|
||||
date_last_sold DATE,
|
||||
age_days INT, -- Calculated based on LEAST(date_created, date_first_sold)
|
||||
|
||||
-- Rolling Period Metrics (Refreshed Hourly from daily_product_snapshots)
|
||||
sales_7d INT, revenue_7d NUMERIC(14, 4),
|
||||
sales_14d INT, revenue_14d NUMERIC(14, 4),
|
||||
sales_30d INT, revenue_30d NUMERIC(14, 4),
|
||||
cogs_30d NUMERIC(14, 4), profit_30d NUMERIC(14, 4),
|
||||
returns_units_30d INT, returns_revenue_30d NUMERIC(14, 4),
|
||||
discounts_30d NUMERIC(14, 4),
|
||||
gross_revenue_30d NUMERIC(14, 4), gross_regular_revenue_30d NUMERIC(14, 4),
|
||||
stockout_days_30d INT,
|
||||
sales_365d INT, revenue_365d NUMERIC(14, 4),
|
||||
avg_stock_units_30d NUMERIC(10, 2), avg_stock_cost_30d NUMERIC(14, 4),
|
||||
avg_stock_retail_30d NUMERIC(14, 4), avg_stock_gross_30d NUMERIC(14, 4),
|
||||
received_qty_30d INT, received_cost_30d NUMERIC(14, 4),
|
||||
|
||||
-- Lifetime Metrics (Recalculated Hourly/Daily from daily_product_snapshots)
|
||||
lifetime_sales INT,
|
||||
lifetime_revenue NUMERIC(16, 4),
|
||||
lifetime_revenue_quality VARCHAR(10), -- 'exact', 'partial', 'estimated'
|
||||
|
||||
-- First Period Metrics (Calculated Once/Periodically from daily_product_snapshots)
|
||||
first_7_days_sales INT, first_7_days_revenue NUMERIC(14, 4),
|
||||
first_30_days_sales INT, first_30_days_revenue NUMERIC(14, 4),
|
||||
first_60_days_sales INT, first_60_days_revenue NUMERIC(14, 4),
|
||||
first_90_days_sales INT, first_90_days_revenue NUMERIC(14, 4),
|
||||
|
||||
-- Calculated KPIs (Refreshed Hourly based on rolling metrics)
|
||||
asp_30d NUMERIC(10, 2), -- revenue_30d / sales_30d
|
||||
acp_30d NUMERIC(10, 4), -- cogs_30d / sales_30d
|
||||
avg_ros_30d NUMERIC(10, 4), -- profit_30d / sales_30d
|
||||
avg_sales_per_day_30d NUMERIC(10, 2), -- sales_30d / 30.0
|
||||
avg_sales_per_month_30d NUMERIC(10, 2), -- sales_30d (assuming 30d = 1 month for this metric)
|
||||
margin_30d NUMERIC(8, 2), -- (profit_30d / revenue_30d) * 100
|
||||
markup_30d NUMERIC(8, 2), -- (profit_30d / cogs_30d) * 100
|
||||
gmroi_30d NUMERIC(10, 2), -- profit_30d / avg_stock_cost_30d
|
||||
stockturn_30d NUMERIC(10, 2), -- sales_30d / avg_stock_units_30d
|
||||
return_rate_30d NUMERIC(8, 2), -- returns_units_30d / (sales_30d + returns_units_30d) * 100
|
||||
discount_rate_30d NUMERIC(8, 2), -- discounts_30d / gross_revenue_30d * 100
|
||||
stockout_rate_30d NUMERIC(8, 2), -- stockout_days_30d / 30.0 * 100
|
||||
markdown_30d NUMERIC(14, 4), -- gross_regular_revenue_30d - gross_revenue_30d
|
||||
markdown_rate_30d NUMERIC(8, 2), -- markdown_30d / gross_regular_revenue_30d * 100
|
||||
sell_through_30d NUMERIC(8, 2), -- sales_30d / (current_stock + sales_30d) * 100
|
||||
avg_lead_time_days INT, -- Calculated Periodically from purchase_orders
|
||||
|
||||
-- Forecasting & Replenishment (Refreshed Hourly)
|
||||
abc_class CHAR(1), -- Updated Periodically (e.g., Weekly)
|
||||
sales_velocity_daily NUMERIC(10, 4), -- sales_30d / (30.0 - stockout_days_30d)
|
||||
config_lead_time INT, -- From settings tables
|
||||
config_days_of_stock INT, -- From settings tables
|
||||
config_safety_stock INT, -- From settings_product
|
||||
planning_period_days INT, -- config_lead_time + config_days_of_stock
|
||||
lead_time_forecast_units NUMERIC(10, 2), -- sales_velocity_daily * config_lead_time
|
||||
days_of_stock_forecast_units NUMERIC(10, 2), -- sales_velocity_daily * config_days_of_stock
|
||||
planning_period_forecast_units NUMERIC(10, 2), -- lead_time_forecast_units + days_of_stock_forecast_units
|
||||
lead_time_closing_stock NUMERIC(10, 2), -- current_stock + on_order_qty - lead_time_forecast_units
|
||||
days_of_stock_closing_stock NUMERIC(10, 2), -- lead_time_closing_stock - days_of_stock_forecast_units
|
||||
replenishment_needed_raw NUMERIC(10, 2), -- planning_period_forecast_units + config_safety_stock - current_stock - on_order_qty
|
||||
replenishment_units INT, -- CEILING(GREATEST(0, replenishment_needed_raw))
|
||||
replenishment_cost NUMERIC(14, 4), -- replenishment_units * COALESCE(current_landing_cost_price, current_cost_price)
|
||||
replenishment_retail NUMERIC(14, 4), -- replenishment_units * current_price
|
||||
replenishment_profit NUMERIC(14, 4), -- replenishment_units * (current_price - COALESCE(current_landing_cost_price, current_cost_price))
|
||||
to_order_units INT, -- Apply MOQ/UOM logic to replenishment_units
|
||||
forecast_lost_sales_units NUMERIC(10, 2), -- GREATEST(0, -lead_time_closing_stock)
|
||||
forecast_lost_revenue NUMERIC(14, 4), -- forecast_lost_sales_units * current_price
|
||||
stock_cover_in_days NUMERIC(10, 1), -- current_stock / sales_velocity_daily
|
||||
po_cover_in_days NUMERIC(10, 1), -- on_order_qty / sales_velocity_daily
|
||||
sells_out_in_days NUMERIC(10, 1), -- (current_stock + on_order_qty) / sales_velocity_daily
|
||||
replenish_date DATE, -- Calc based on when stock hits safety stock minus lead time
|
||||
overstocked_units INT, -- GREATEST(0, current_stock - config_safety_stock - planning_period_forecast_units)
|
||||
overstocked_cost NUMERIC(14, 4), -- overstocked_units * COALESCE(current_landing_cost_price, current_cost_price)
|
||||
overstocked_retail NUMERIC(14, 4), -- overstocked_units * current_price
|
||||
is_old_stock BOOLEAN, -- Based on age, last sold, last received, on_order status
|
||||
|
||||
-- Yesterday's Metrics (Refreshed Hourly from daily_product_snapshots)
|
||||
yesterday_sales INT,
|
||||
|
||||
-- Product Status (Calculated from metrics)
|
||||
status VARCHAR, -- Stores status values like: Critical, Reorder Soon, Healthy, Overstock, At Risk, New
|
||||
|
||||
-- Growth Metrics (P3)
|
||||
sales_growth_30d_vs_prev NUMERIC(10, 2), -- % growth current 30d vs prev 30d
|
||||
revenue_growth_30d_vs_prev NUMERIC(10, 2), -- % growth current 30d vs prev 30d
|
||||
sales_growth_yoy NUMERIC(10, 2), -- Year-over-year sales growth %
|
||||
revenue_growth_yoy NUMERIC(10, 2), -- Year-over-year revenue growth %
|
||||
|
||||
-- Demand Variability Metrics (P3)
|
||||
sales_variance_30d NUMERIC(10, 2), -- Variance of daily sales
|
||||
sales_std_dev_30d NUMERIC(10, 2), -- Standard deviation of daily sales
|
||||
sales_cv_30d NUMERIC(10, 2), -- Coefficient of variation
|
||||
demand_pattern VARCHAR(20), -- 'stable', 'variable', 'sporadic', 'lumpy'
|
||||
|
||||
-- Service Level & Fill Rate (P5)
|
||||
fill_rate_30d NUMERIC(8, 2), -- % of demand fulfilled from stock
|
||||
stockout_incidents_30d INT, -- Days with stockouts
|
||||
service_level_30d NUMERIC(8, 2), -- % of days without stockouts
|
||||
lost_sales_incidents_30d INT, -- Days with potential lost sales
|
||||
|
||||
-- Seasonality (P5)
|
||||
seasonality_index NUMERIC(10, 2), -- Current vs average (100 = average)
|
||||
seasonal_pattern VARCHAR(20), -- 'none', 'weekly', 'monthly', 'quarterly', 'yearly'
|
||||
peak_season VARCHAR(20), -- e.g., 'Q4', 'summer', 'holiday'
|
||||
|
||||
CONSTRAINT fk_product_metrics_pid FOREIGN KEY (pid) REFERENCES public.products(pid) ON DELETE CASCADE ON UPDATE CASCADE
|
||||
);
|
||||
|
||||
-- Add Indexes for product_metrics (adjust based on common filtering/sorting in frontend)
|
||||
CREATE INDEX idx_product_metrics_brand ON public.product_metrics(brand);
|
||||
CREATE INDEX idx_product_metrics_vendor ON public.product_metrics(vendor);
|
||||
CREATE INDEX idx_product_metrics_sku ON public.product_metrics(sku);
|
||||
CREATE INDEX idx_product_metrics_abc_class ON public.product_metrics(abc_class);
|
||||
CREATE INDEX idx_product_metrics_revenue_30d ON public.product_metrics(revenue_30d DESC NULLS LAST); -- Example sorting index
|
||||
CREATE INDEX idx_product_metrics_sales_30d ON public.product_metrics(sales_30d DESC NULLS LAST); -- Example sorting index
|
||||
CREATE INDEX idx_product_metrics_current_stock ON public.product_metrics(current_stock);
|
||||
CREATE INDEX idx_product_metrics_sells_out_in_days ON public.product_metrics(sells_out_in_days ASC NULLS LAST); -- Example sorting index
|
||||
CREATE INDEX idx_product_metrics_status ON public.product_metrics(status); -- Index for status filtering
|
||||
|
||||
-- Add new vendor, category, and brand metrics tables
|
||||
-- Drop tables in reverse order if they exist
|
||||
DROP TABLE IF EXISTS public.brand_metrics CASCADE;
|
||||
DROP TABLE IF EXISTS public.vendor_metrics CASCADE;
|
||||
DROP TABLE IF EXISTS public.category_metrics CASCADE;
|
||||
|
||||
-- ========= Category Metrics =========
|
||||
CREATE TABLE public.category_metrics (
|
||||
category_id INT8 PRIMARY KEY, -- Foreign key to categories.cat_id
|
||||
category_name VARCHAR, -- Denormalized for convenience
|
||||
category_type INT2, -- Denormalized for convenience
|
||||
parent_id INT8, -- Denormalized for convenience
|
||||
last_calculated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- ROLLED-UP METRICS (includes this category + all descendants)
|
||||
-- Counts & Basic Info
|
||||
product_count INT NOT NULL DEFAULT 0, -- Total products linked
|
||||
active_product_count INT NOT NULL DEFAULT 0, -- Visible products linked
|
||||
replenishable_product_count INT NOT NULL DEFAULT 0,-- Replenishable products linked
|
||||
|
||||
-- Current Stock Value (approximated using current product costs/prices)
|
||||
current_stock_units INT NOT NULL DEFAULT 0,
|
||||
current_stock_cost NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
current_stock_retail NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
|
||||
-- Rolling Period Aggregates (Summed from product_metrics)
|
||||
sales_7d INT NOT NULL DEFAULT 0, revenue_7d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
sales_30d INT NOT NULL DEFAULT 0, revenue_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
profit_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00, cogs_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
sales_365d INT NOT NULL DEFAULT 0, revenue_365d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
lifetime_sales INT NOT NULL DEFAULT 0, lifetime_revenue NUMERIC(18, 4) NOT NULL DEFAULT 0.00,
|
||||
|
||||
-- DIRECT METRICS (only products directly in this category)
|
||||
direct_product_count INT NOT NULL DEFAULT 0, -- Products directly in this category
|
||||
direct_active_product_count INT NOT NULL DEFAULT 0, -- Visible products directly in this category
|
||||
direct_replenishable_product_count INT NOT NULL DEFAULT 0,-- Replenishable products directly in this category
|
||||
|
||||
-- Direct Current Stock Value
|
||||
direct_current_stock_units INT NOT NULL DEFAULT 0,
|
||||
direct_stock_cost NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
direct_stock_retail NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
|
||||
-- Direct Rolling Period Aggregates
|
||||
direct_sales_7d INT NOT NULL DEFAULT 0, direct_revenue_7d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
direct_sales_30d INT NOT NULL DEFAULT 0, direct_revenue_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
direct_profit_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00, direct_cogs_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
direct_sales_365d INT NOT NULL DEFAULT 0, direct_revenue_365d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
direct_lifetime_sales INT NOT NULL DEFAULT 0, direct_lifetime_revenue NUMERIC(18, 4) NOT NULL DEFAULT 0.00,
|
||||
|
||||
-- Calculated KPIs (Based on 30d aggregates) - Apply to rolled-up metrics
|
||||
avg_margin_30d NUMERIC(7, 3), -- (profit / revenue) * 100
|
||||
stock_turn_30d NUMERIC(10, 3), -- sales_units / avg_stock_units (Needs avg stock calc)
|
||||
sales_growth_30d_vs_prev NUMERIC(10, 2), -- % growth in sales units
|
||||
revenue_growth_30d_vs_prev NUMERIC(10, 2), -- % growth in revenue
|
||||
|
||||
CONSTRAINT fk_category_metrics_cat_id FOREIGN KEY (category_id) REFERENCES public.categories(cat_id) ON DELETE CASCADE ON UPDATE CASCADE
|
||||
);
|
||||
CREATE INDEX idx_category_metrics_name ON public.category_metrics(category_name);
|
||||
CREATE INDEX idx_category_metrics_type ON public.category_metrics(category_type);
|
||||
|
||||
-- ========= Vendor Metrics =========
|
||||
CREATE TABLE public.vendor_metrics (
|
||||
vendor_name VARCHAR PRIMARY KEY, -- Matches products.vendor
|
||||
last_calculated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Counts & Basic Info
|
||||
product_count INT NOT NULL DEFAULT 0, -- Total products from this vendor
|
||||
active_product_count INT NOT NULL DEFAULT 0, -- Visible products
|
||||
replenishable_product_count INT NOT NULL DEFAULT 0,-- Replenishable products
|
||||
|
||||
-- Current Stock Value (approximated)
|
||||
current_stock_units INT NOT NULL DEFAULT 0,
|
||||
current_stock_cost NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
current_stock_retail NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
|
||||
-- On Order Value
|
||||
on_order_units INT NOT NULL DEFAULT 0,
|
||||
on_order_cost NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
|
||||
-- PO Performance (Simplified)
|
||||
po_count_365d INT NOT NULL DEFAULT 0, -- Count of distinct POs created in last year
|
||||
avg_lead_time_days INT, -- Calculated from received POs historically
|
||||
|
||||
-- Rolling Period Aggregates (Summed from product_metrics)
|
||||
sales_7d INT NOT NULL DEFAULT 0, revenue_7d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
sales_30d INT NOT NULL DEFAULT 0, revenue_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
profit_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00, cogs_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
sales_365d INT NOT NULL DEFAULT 0, revenue_365d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
lifetime_sales INT NOT NULL DEFAULT 0, lifetime_revenue NUMERIC(18, 4) NOT NULL DEFAULT 0.00,
|
||||
|
||||
-- Calculated KPIs (Based on 30d aggregates)
|
||||
avg_margin_30d NUMERIC(14, 4), -- (profit / revenue) * 100
|
||||
sales_growth_30d_vs_prev NUMERIC(10, 2), -- % growth in sales units
|
||||
revenue_growth_30d_vs_prev NUMERIC(10, 2), -- % growth in revenue
|
||||
-- Add more KPIs if needed (e.g., avg product value, sell-through rate for vendor)
|
||||
);
|
||||
CREATE INDEX idx_vendor_metrics_active_count ON public.vendor_metrics(active_product_count);
|
||||
|
||||
|
||||
-- ========= Brand Metrics =========
|
||||
CREATE TABLE public.brand_metrics (
|
||||
brand_name VARCHAR PRIMARY KEY, -- Matches products.brand (use 'Unbranded' for NULLs)
|
||||
last_calculated TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- Counts & Basic Info
|
||||
product_count INT NOT NULL DEFAULT 0, -- Total products of this brand
|
||||
active_product_count INT NOT NULL DEFAULT 0, -- Visible products
|
||||
replenishable_product_count INT NOT NULL DEFAULT 0,-- Replenishable products
|
||||
|
||||
-- Current Stock Value (approximated)
|
||||
current_stock_units INT NOT NULL DEFAULT 0,
|
||||
current_stock_cost NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
current_stock_retail NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
|
||||
-- Rolling Period Aggregates (Summed from product_metrics)
|
||||
sales_7d INT NOT NULL DEFAULT 0, revenue_7d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
sales_30d INT NOT NULL DEFAULT 0, revenue_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
profit_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00, cogs_30d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
sales_365d INT NOT NULL DEFAULT 0, revenue_365d NUMERIC(16, 4) NOT NULL DEFAULT 0.00,
|
||||
lifetime_sales INT NOT NULL DEFAULT 0, lifetime_revenue NUMERIC(18, 4) NOT NULL DEFAULT 0.00,
|
||||
|
||||
-- Calculated KPIs (Based on 30d aggregates)
|
||||
avg_margin_30d NUMERIC(7, 3), -- (profit / revenue) * 100
|
||||
sales_growth_30d_vs_prev NUMERIC(10, 2), -- % growth in sales units
|
||||
revenue_growth_30d_vs_prev NUMERIC(10, 2), -- % growth in revenue
|
||||
-- Add more KPIs if needed (e.g., avg product value, sell-through rate for brand)
|
||||
);
|
||||
CREATE INDEX idx_brand_metrics_active_count ON public.brand_metrics(active_product_count);
|
||||
@@ -1,411 +0,0 @@
|
||||
-- Disable foreign key checks
|
||||
SET FOREIGN_KEY_CHECKS = 0;
|
||||
|
||||
-- Temporary tables for batch metrics processing
|
||||
CREATE TABLE IF NOT EXISTS temp_sales_metrics (
|
||||
pid BIGINT NOT NULL,
|
||||
daily_sales_avg DECIMAL(10,3),
|
||||
weekly_sales_avg DECIMAL(10,3),
|
||||
monthly_sales_avg DECIMAL(10,3),
|
||||
total_revenue DECIMAL(10,3),
|
||||
avg_margin_percent DECIMAL(10,3),
|
||||
first_sale_date DATE,
|
||||
last_sale_date DATE,
|
||||
PRIMARY KEY (pid)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS temp_purchase_metrics (
|
||||
pid BIGINT NOT NULL,
|
||||
avg_lead_time_days INT,
|
||||
last_purchase_date DATE,
|
||||
first_received_date DATE,
|
||||
last_received_date DATE,
|
||||
PRIMARY KEY (pid)
|
||||
);
|
||||
|
||||
-- New table for product metrics
|
||||
CREATE TABLE IF NOT EXISTS product_metrics (
|
||||
pid BIGINT NOT NULL,
|
||||
last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
-- Sales velocity metrics
|
||||
daily_sales_avg DECIMAL(10,3),
|
||||
weekly_sales_avg DECIMAL(10,3),
|
||||
monthly_sales_avg DECIMAL(10,3),
|
||||
avg_quantity_per_order DECIMAL(10,3),
|
||||
number_of_orders INT,
|
||||
first_sale_date DATE,
|
||||
last_sale_date DATE,
|
||||
-- Stock metrics
|
||||
days_of_inventory INT,
|
||||
weeks_of_inventory INT,
|
||||
reorder_point INT,
|
||||
safety_stock INT,
|
||||
reorder_qty INT DEFAULT 0,
|
||||
overstocked_amt INT DEFAULT 0,
|
||||
-- Financial metrics
|
||||
avg_margin_percent DECIMAL(10,3),
|
||||
total_revenue DECIMAL(10,3),
|
||||
inventory_value DECIMAL(10,3),
|
||||
cost_of_goods_sold DECIMAL(10,3),
|
||||
gross_profit DECIMAL(10,3),
|
||||
gmroi DECIMAL(10,3),
|
||||
-- Purchase metrics
|
||||
avg_lead_time_days INT,
|
||||
last_purchase_date DATE,
|
||||
first_received_date DATE,
|
||||
last_received_date DATE,
|
||||
-- Classification metrics
|
||||
abc_class CHAR(1),
|
||||
stock_status VARCHAR(20),
|
||||
-- Turnover metrics
|
||||
turnover_rate DECIMAL(12,3),
|
||||
-- Lead time metrics
|
||||
current_lead_time INT,
|
||||
target_lead_time INT,
|
||||
lead_time_status VARCHAR(20),
|
||||
-- Forecast metrics
|
||||
forecast_accuracy DECIMAL(5,2) DEFAULT NULL,
|
||||
forecast_bias DECIMAL(5,2) DEFAULT NULL,
|
||||
last_forecast_date DATE DEFAULT NULL,
|
||||
PRIMARY KEY (pid),
|
||||
FOREIGN KEY (pid) REFERENCES products(pid) ON DELETE CASCADE,
|
||||
INDEX idx_metrics_revenue (total_revenue),
|
||||
INDEX idx_metrics_stock_status (stock_status),
|
||||
INDEX idx_metrics_lead_time (lead_time_status),
|
||||
INDEX idx_metrics_turnover (turnover_rate),
|
||||
INDEX idx_metrics_last_calculated (last_calculated_at),
|
||||
INDEX idx_metrics_abc (abc_class),
|
||||
INDEX idx_metrics_sales (daily_sales_avg, weekly_sales_avg, monthly_sales_avg),
|
||||
INDEX idx_metrics_forecast (forecast_accuracy, forecast_bias)
|
||||
);
|
||||
|
||||
-- New table for time-based aggregates
|
||||
CREATE TABLE IF NOT EXISTS product_time_aggregates (
|
||||
pid BIGINT NOT NULL,
|
||||
year INT NOT NULL,
|
||||
month INT NOT NULL,
|
||||
-- Sales metrics
|
||||
total_quantity_sold INT DEFAULT 0,
|
||||
total_revenue DECIMAL(10,3) DEFAULT 0,
|
||||
total_cost DECIMAL(10,3) DEFAULT 0,
|
||||
order_count INT DEFAULT 0,
|
||||
-- Stock changes
|
||||
stock_received INT DEFAULT 0,
|
||||
stock_ordered INT DEFAULT 0,
|
||||
-- Calculated fields
|
||||
avg_price DECIMAL(10,3),
|
||||
profit_margin DECIMAL(10,3),
|
||||
inventory_value DECIMAL(10,3),
|
||||
gmroi DECIMAL(10,3),
|
||||
PRIMARY KEY (pid, year, month),
|
||||
FOREIGN KEY (pid) REFERENCES products(pid) ON DELETE CASCADE,
|
||||
INDEX idx_date (year, month)
|
||||
);
|
||||
|
||||
-- Create vendor_details table
|
||||
CREATE TABLE vendor_details (
|
||||
vendor VARCHAR(100) PRIMARY KEY,
|
||||
contact_name VARCHAR(100),
|
||||
email VARCHAR(255),
|
||||
phone VARCHAR(50),
|
||||
status VARCHAR(20) DEFAULT 'active',
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
INDEX idx_status (status)
|
||||
) ENGINE=InnoDB;
|
||||
|
||||
-- New table for vendor metrics
|
||||
CREATE TABLE IF NOT EXISTS vendor_metrics (
|
||||
vendor VARCHAR(100) NOT NULL,
|
||||
last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
-- Performance metrics
|
||||
avg_lead_time_days DECIMAL(10,3),
|
||||
on_time_delivery_rate DECIMAL(5,2),
|
||||
order_fill_rate DECIMAL(5,2),
|
||||
total_orders INT DEFAULT 0,
|
||||
total_late_orders INT DEFAULT 0,
|
||||
total_purchase_value DECIMAL(10,3) DEFAULT 0,
|
||||
avg_order_value DECIMAL(10,3),
|
||||
-- Product metrics
|
||||
active_products INT DEFAULT 0,
|
||||
total_products INT DEFAULT 0,
|
||||
-- Financial metrics
|
||||
total_revenue DECIMAL(10,3) DEFAULT 0,
|
||||
avg_margin_percent DECIMAL(5,2),
|
||||
-- Status
|
||||
status VARCHAR(20) DEFAULT 'active',
|
||||
PRIMARY KEY (vendor),
|
||||
FOREIGN KEY (vendor) REFERENCES vendor_details(vendor) ON DELETE CASCADE,
|
||||
INDEX idx_vendor_performance (on_time_delivery_rate),
|
||||
INDEX idx_vendor_status (status),
|
||||
INDEX idx_metrics_last_calculated (last_calculated_at),
|
||||
INDEX idx_vendor_metrics_orders (total_orders, total_late_orders)
|
||||
);
|
||||
|
||||
-- New table for category metrics
|
||||
CREATE TABLE IF NOT EXISTS category_metrics (
|
||||
category_id BIGINT NOT NULL,
|
||||
last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
-- Product metrics
|
||||
product_count INT DEFAULT 0,
|
||||
active_products INT DEFAULT 0,
|
||||
-- Financial metrics
|
||||
total_value DECIMAL(15,3) DEFAULT 0,
|
||||
avg_margin DECIMAL(5,2),
|
||||
turnover_rate DECIMAL(12,3),
|
||||
growth_rate DECIMAL(5,2),
|
||||
-- Status
|
||||
status VARCHAR(20) DEFAULT 'active',
|
||||
PRIMARY KEY (category_id),
|
||||
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
|
||||
INDEX idx_category_status (status),
|
||||
INDEX idx_category_growth (growth_rate),
|
||||
INDEX idx_metrics_last_calculated (last_calculated_at),
|
||||
INDEX idx_category_metrics_products (product_count, active_products)
|
||||
);
|
||||
|
||||
-- New table for vendor time-based metrics
|
||||
CREATE TABLE IF NOT EXISTS vendor_time_metrics (
|
||||
vendor VARCHAR(100) NOT NULL,
|
||||
year INT NOT NULL,
|
||||
month INT NOT NULL,
|
||||
-- Order metrics
|
||||
total_orders INT DEFAULT 0,
|
||||
late_orders INT DEFAULT 0,
|
||||
avg_lead_time_days DECIMAL(10,3),
|
||||
-- Financial metrics
|
||||
total_purchase_value DECIMAL(10,3) DEFAULT 0,
|
||||
total_revenue DECIMAL(10,3) DEFAULT 0,
|
||||
avg_margin_percent DECIMAL(5,2),
|
||||
PRIMARY KEY (vendor, year, month),
|
||||
FOREIGN KEY (vendor) REFERENCES vendor_details(vendor) ON DELETE CASCADE,
|
||||
INDEX idx_vendor_date (year, month)
|
||||
);
|
||||
|
||||
-- New table for category time-based metrics
|
||||
CREATE TABLE IF NOT EXISTS category_time_metrics (
|
||||
category_id BIGINT NOT NULL,
|
||||
year INT NOT NULL,
|
||||
month INT NOT NULL,
|
||||
-- Product metrics
|
||||
product_count INT DEFAULT 0,
|
||||
active_products INT DEFAULT 0,
|
||||
-- Financial metrics
|
||||
total_value DECIMAL(15,3) DEFAULT 0,
|
||||
total_revenue DECIMAL(15,3) DEFAULT 0,
|
||||
avg_margin DECIMAL(5,2),
|
||||
turnover_rate DECIMAL(12,3),
|
||||
PRIMARY KEY (category_id, year, month),
|
||||
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
|
||||
INDEX idx_category_date (year, month)
|
||||
);
|
||||
|
||||
-- New table for category-based sales metrics
|
||||
CREATE TABLE IF NOT EXISTS category_sales_metrics (
|
||||
category_id BIGINT NOT NULL,
|
||||
brand VARCHAR(100) NOT NULL,
|
||||
period_start DATE NOT NULL,
|
||||
period_end DATE NOT NULL,
|
||||
avg_daily_sales DECIMAL(10,3) DEFAULT 0,
|
||||
total_sold INT DEFAULT 0,
|
||||
num_products INT DEFAULT 0,
|
||||
avg_price DECIMAL(10,3) DEFAULT 0,
|
||||
last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (category_id, brand, period_start, period_end),
|
||||
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
|
||||
INDEX idx_category_brand (category_id, brand),
|
||||
INDEX idx_period (period_start, period_end)
|
||||
);
|
||||
|
||||
-- New table for brand metrics
|
||||
CREATE TABLE IF NOT EXISTS brand_metrics (
|
||||
brand VARCHAR(100) NOT NULL,
|
||||
last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
-- Product metrics
|
||||
product_count INT DEFAULT 0,
|
||||
active_products INT DEFAULT 0,
|
||||
-- Stock metrics
|
||||
total_stock_units INT DEFAULT 0,
|
||||
total_stock_cost DECIMAL(15,2) DEFAULT 0,
|
||||
total_stock_retail DECIMAL(15,2) DEFAULT 0,
|
||||
-- Sales metrics
|
||||
total_revenue DECIMAL(15,2) DEFAULT 0,
|
||||
avg_margin DECIMAL(5,2) DEFAULT 0,
|
||||
growth_rate DECIMAL(5,2) DEFAULT 0,
|
||||
PRIMARY KEY (brand),
|
||||
INDEX idx_brand_metrics_last_calculated (last_calculated_at),
|
||||
INDEX idx_brand_metrics_revenue (total_revenue),
|
||||
INDEX idx_brand_metrics_growth (growth_rate)
|
||||
);
|
||||
|
||||
-- New table for brand time-based metrics
|
||||
CREATE TABLE IF NOT EXISTS brand_time_metrics (
|
||||
brand VARCHAR(100) NOT NULL,
|
||||
year INT NOT NULL,
|
||||
month INT NOT NULL,
|
||||
-- Product metrics
|
||||
product_count INT DEFAULT 0,
|
||||
active_products INT DEFAULT 0,
|
||||
-- Stock metrics
|
||||
total_stock_units INT DEFAULT 0,
|
||||
total_stock_cost DECIMAL(15,2) DEFAULT 0,
|
||||
total_stock_retail DECIMAL(15,2) DEFAULT 0,
|
||||
-- Sales metrics
|
||||
total_revenue DECIMAL(15,2) DEFAULT 0,
|
||||
avg_margin DECIMAL(5,2) DEFAULT 0,
|
||||
PRIMARY KEY (brand, year, month),
|
||||
INDEX idx_brand_date (year, month)
|
||||
);
|
||||
|
||||
-- New table for sales forecasts
|
||||
CREATE TABLE IF NOT EXISTS sales_forecasts (
|
||||
pid BIGINT NOT NULL,
|
||||
forecast_date DATE NOT NULL,
|
||||
forecast_units DECIMAL(10,2) DEFAULT 0,
|
||||
forecast_revenue DECIMAL(10,2) DEFAULT 0,
|
||||
confidence_level DECIMAL(5,2) DEFAULT 0,
|
||||
last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (pid, forecast_date),
|
||||
FOREIGN KEY (pid) REFERENCES products(pid) ON DELETE CASCADE,
|
||||
INDEX idx_forecast_date (forecast_date),
|
||||
INDEX idx_forecast_last_calculated (last_calculated_at)
|
||||
);
|
||||
|
||||
-- New table for category forecasts
|
||||
CREATE TABLE IF NOT EXISTS category_forecasts (
|
||||
category_id BIGINT NOT NULL,
|
||||
forecast_date DATE NOT NULL,
|
||||
forecast_units DECIMAL(10,2) DEFAULT 0,
|
||||
forecast_revenue DECIMAL(10,2) DEFAULT 0,
|
||||
confidence_level DECIMAL(5,2) DEFAULT 0,
|
||||
last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (category_id, forecast_date),
|
||||
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
|
||||
INDEX idx_category_forecast_date (forecast_date),
|
||||
INDEX idx_category_forecast_last_calculated (last_calculated_at)
|
||||
);
|
||||
|
||||
-- Create view for inventory health
|
||||
CREATE OR REPLACE VIEW inventory_health AS
|
||||
WITH product_thresholds AS (
|
||||
SELECT
|
||||
p.pid,
|
||||
COALESCE(
|
||||
-- Try category+vendor specific
|
||||
(SELECT critical_days FROM stock_thresholds st
|
||||
JOIN product_categories pc ON st.category_id = pc.cat_id
|
||||
WHERE pc.pid = p.pid
|
||||
AND st.vendor = p.vendor LIMIT 1),
|
||||
-- Try category specific
|
||||
(SELECT critical_days FROM stock_thresholds st
|
||||
JOIN product_categories pc ON st.category_id = pc.cat_id
|
||||
WHERE pc.pid = p.pid
|
||||
AND st.vendor IS NULL LIMIT 1),
|
||||
-- Try vendor specific
|
||||
(SELECT critical_days FROM stock_thresholds st
|
||||
WHERE st.category_id IS NULL
|
||||
AND st.vendor = p.vendor LIMIT 1),
|
||||
-- Fall back to default
|
||||
(SELECT critical_days FROM stock_thresholds st
|
||||
WHERE st.category_id IS NULL
|
||||
AND st.vendor IS NULL LIMIT 1),
|
||||
7
|
||||
) as critical_days,
|
||||
COALESCE(
|
||||
-- Try category+vendor specific
|
||||
(SELECT reorder_days FROM stock_thresholds st
|
||||
JOIN product_categories pc ON st.category_id = pc.cat_id
|
||||
WHERE pc.pid = p.pid
|
||||
AND st.vendor = p.vendor LIMIT 1),
|
||||
-- Try category specific
|
||||
(SELECT reorder_days FROM stock_thresholds st
|
||||
JOIN product_categories pc ON st.category_id = pc.cat_id
|
||||
WHERE pc.pid = p.pid
|
||||
AND st.vendor IS NULL LIMIT 1),
|
||||
-- Try vendor specific
|
||||
(SELECT reorder_days FROM stock_thresholds st
|
||||
WHERE st.category_id IS NULL
|
||||
AND st.vendor = p.vendor LIMIT 1),
|
||||
-- Fall back to default
|
||||
(SELECT reorder_days FROM stock_thresholds st
|
||||
WHERE st.category_id IS NULL
|
||||
AND st.vendor IS NULL LIMIT 1),
|
||||
14
|
||||
) as reorder_days,
|
||||
COALESCE(
|
||||
-- Try category+vendor specific
|
||||
(SELECT overstock_days FROM stock_thresholds st
|
||||
JOIN product_categories pc ON st.category_id = pc.cat_id
|
||||
WHERE pc.pid = p.pid
|
||||
AND st.vendor = p.vendor LIMIT 1),
|
||||
-- Try category specific
|
||||
(SELECT overstock_days FROM stock_thresholds st
|
||||
JOIN product_categories pc ON st.category_id = pc.cat_id
|
||||
WHERE pc.pid = p.pid
|
||||
AND st.vendor IS NULL LIMIT 1),
|
||||
-- Try vendor specific
|
||||
(SELECT overstock_days FROM stock_thresholds st
|
||||
WHERE st.category_id IS NULL
|
||||
AND st.vendor = p.vendor LIMIT 1),
|
||||
-- Fall back to default
|
||||
(SELECT overstock_days FROM stock_thresholds st
|
||||
WHERE st.category_id IS NULL
|
||||
AND st.vendor IS NULL LIMIT 1),
|
||||
90
|
||||
) as overstock_days
|
||||
FROM products p
|
||||
)
|
||||
SELECT
|
||||
p.pid,
|
||||
p.SKU,
|
||||
p.title,
|
||||
p.stock_quantity,
|
||||
COALESCE(pm.daily_sales_avg, 0) as daily_sales_avg,
|
||||
COALESCE(pm.days_of_inventory, 0) as days_of_inventory,
|
||||
COALESCE(pm.reorder_point, 0) as reorder_point,
|
||||
COALESCE(pm.safety_stock, 0) as safety_stock,
|
||||
CASE
|
||||
WHEN pm.daily_sales_avg = 0 THEN 'New'
|
||||
WHEN p.stock_quantity <= CEIL(pm.daily_sales_avg * pt.critical_days) THEN 'Critical'
|
||||
WHEN p.stock_quantity <= CEIL(pm.daily_sales_avg * pt.reorder_days) THEN 'Reorder'
|
||||
WHEN p.stock_quantity > (pm.daily_sales_avg * pt.overstock_days) THEN 'Overstocked'
|
||||
ELSE 'Healthy'
|
||||
END as stock_status
|
||||
FROM
|
||||
products p
|
||||
LEFT JOIN
|
||||
product_metrics pm ON p.pid = pm.pid
|
||||
LEFT JOIN
|
||||
product_thresholds pt ON p.pid = pt.pid
|
||||
WHERE
|
||||
p.managing_stock = true;
|
||||
|
||||
-- Create view for category performance trends
|
||||
CREATE OR REPLACE VIEW category_performance_trends AS
|
||||
SELECT
|
||||
c.cat_id as category_id,
|
||||
c.name,
|
||||
c.description,
|
||||
p.name as parent_name,
|
||||
c.status,
|
||||
cm.product_count,
|
||||
cm.active_products,
|
||||
cm.total_value,
|
||||
cm.avg_margin,
|
||||
cm.turnover_rate,
|
||||
cm.growth_rate,
|
||||
CASE
|
||||
WHEN cm.growth_rate >= 20 THEN 'High Growth'
|
||||
WHEN cm.growth_rate >= 5 THEN 'Growing'
|
||||
WHEN cm.growth_rate >= -5 THEN 'Stable'
|
||||
ELSE 'Declining'
|
||||
END as performance_rating
|
||||
FROM
|
||||
categories c
|
||||
LEFT JOIN
|
||||
categories p ON c.parent_id = p.cat_id
|
||||
LEFT JOIN
|
||||
category_metrics cm ON c.cat_id = cm.category_id;
|
||||
|
||||
-- Re-enable foreign key checks
|
||||
SET FOREIGN_KEY_CHECKS = 1;
|
||||
@@ -1,83 +1,113 @@
|
||||
-- Enable strict error reporting
|
||||
SET sql_mode = 'STRICT_ALL_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_ZERO_DATE,NO_ZERO_IN_DATE,NO_ENGINE_SUBSTITUTION';
|
||||
SET FOREIGN_KEY_CHECKS = 0;
|
||||
SET session_replication_role = 'replica'; -- Disable foreign key checks temporarily
|
||||
|
||||
-- Create function for updating timestamps
|
||||
CREATE OR REPLACE FUNCTION update_updated_column() RETURNS TRIGGER AS $func$
|
||||
BEGIN
|
||||
-- Check which table is being updated and use the appropriate column
|
||||
IF TG_TABLE_NAME = 'categories' THEN
|
||||
NEW.updated_at = CURRENT_TIMESTAMP;
|
||||
ELSIF TG_TABLE_NAME IN ('products', 'orders', 'purchase_orders', 'receivings') THEN
|
||||
NEW.updated = CURRENT_TIMESTAMP;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$func$ language plpgsql;
|
||||
|
||||
-- Create tables
|
||||
CREATE TABLE products (
|
||||
pid BIGINT NOT NULL,
|
||||
title VARCHAR(255) NOT NULL,
|
||||
title TEXT NOT NULL,
|
||||
description TEXT,
|
||||
SKU VARCHAR(50) NOT NULL,
|
||||
created_at TIMESTAMP NULL,
|
||||
first_received TIMESTAMP NULL,
|
||||
stock_quantity INT DEFAULT 0,
|
||||
preorder_count INT DEFAULT 0,
|
||||
notions_inv_count INT DEFAULT 0,
|
||||
price DECIMAL(10, 3) NOT NULL,
|
||||
regular_price DECIMAL(10, 3) NOT NULL,
|
||||
cost_price DECIMAL(10, 3),
|
||||
landing_cost_price DECIMAL(10, 3),
|
||||
barcode VARCHAR(50),
|
||||
harmonized_tariff_code VARCHAR(20),
|
||||
updated_at TIMESTAMP,
|
||||
sku TEXT NOT NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE,
|
||||
first_received TIMESTAMP WITH TIME ZONE,
|
||||
stock_quantity INTEGER DEFAULT 0,
|
||||
preorder_count INTEGER DEFAULT 0,
|
||||
notions_inv_count INTEGER DEFAULT 0,
|
||||
price NUMERIC(14, 4) NOT NULL,
|
||||
regular_price NUMERIC(14, 4) NOT NULL,
|
||||
cost_price NUMERIC(14, 4),
|
||||
landing_cost_price NUMERIC(14, 4),
|
||||
barcode TEXT,
|
||||
harmonized_tariff_code TEXT,
|
||||
updated_at TIMESTAMP WITH TIME ZONE,
|
||||
visible BOOLEAN DEFAULT true,
|
||||
managing_stock BOOLEAN DEFAULT true,
|
||||
replenishable BOOLEAN DEFAULT true,
|
||||
vendor VARCHAR(100),
|
||||
vendor_reference VARCHAR(100),
|
||||
notions_reference VARCHAR(100),
|
||||
permalink VARCHAR(255),
|
||||
vendor TEXT,
|
||||
vendor_reference TEXT,
|
||||
notions_reference TEXT,
|
||||
permalink TEXT,
|
||||
categories TEXT,
|
||||
image VARCHAR(255),
|
||||
image_175 VARCHAR(255),
|
||||
image_full VARCHAR(255),
|
||||
brand VARCHAR(100),
|
||||
line VARCHAR(100),
|
||||
subline VARCHAR(100),
|
||||
artist VARCHAR(100),
|
||||
image TEXT,
|
||||
image_175 TEXT,
|
||||
image_full TEXT,
|
||||
brand TEXT,
|
||||
line TEXT,
|
||||
subline TEXT,
|
||||
artist TEXT,
|
||||
options TEXT,
|
||||
tags TEXT,
|
||||
moq INT DEFAULT 1,
|
||||
uom INT DEFAULT 1,
|
||||
rating DECIMAL(10,2) DEFAULT 0.00,
|
||||
reviews INT UNSIGNED DEFAULT 0,
|
||||
weight DECIMAL(10,3),
|
||||
length DECIMAL(10,3),
|
||||
width DECIMAL(10,3),
|
||||
height DECIMAL(10,3),
|
||||
country_of_origin VARCHAR(5),
|
||||
location VARCHAR(50),
|
||||
total_sold INT UNSIGNED DEFAULT 0,
|
||||
baskets INT UNSIGNED DEFAULT 0,
|
||||
notifies INT UNSIGNED DEFAULT 0,
|
||||
moq INTEGER DEFAULT 1,
|
||||
uom INTEGER DEFAULT 1,
|
||||
rating NUMERIC(14, 4) DEFAULT 0.00,
|
||||
reviews INTEGER DEFAULT 0,
|
||||
weight NUMERIC(14, 4),
|
||||
length NUMERIC(14, 4),
|
||||
width NUMERIC(14, 4),
|
||||
height NUMERIC(14, 4),
|
||||
country_of_origin TEXT,
|
||||
location TEXT,
|
||||
total_sold INTEGER DEFAULT 0,
|
||||
baskets INTEGER DEFAULT 0,
|
||||
notifies INTEGER DEFAULT 0,
|
||||
date_last_sold DATE,
|
||||
updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (pid),
|
||||
INDEX idx_sku (SKU),
|
||||
INDEX idx_vendor (vendor),
|
||||
INDEX idx_brand (brand),
|
||||
INDEX idx_location (location),
|
||||
INDEX idx_total_sold (total_sold),
|
||||
INDEX idx_date_last_sold (date_last_sold),
|
||||
INDEX idx_updated (updated)
|
||||
) ENGINE=InnoDB;
|
||||
updated TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (pid)
|
||||
);
|
||||
|
||||
-- Create trigger for products
|
||||
CREATE TRIGGER update_products_updated
|
||||
BEFORE UPDATE ON products
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
-- Create indexes for products table
|
||||
CREATE INDEX idx_products_sku ON products(sku);
|
||||
CREATE INDEX idx_products_vendor ON products(vendor);
|
||||
CREATE INDEX idx_products_brand ON products(brand);
|
||||
CREATE INDEX idx_products_visible ON products(visible);
|
||||
CREATE INDEX idx_products_replenishable ON products(replenishable);
|
||||
CREATE INDEX idx_products_updated ON products(updated);
|
||||
|
||||
-- Create categories table with hierarchy support
|
||||
CREATE TABLE categories (
|
||||
cat_id BIGINT PRIMARY KEY,
|
||||
name VARCHAR(100) NOT NULL,
|
||||
type SMALLINT NOT NULL COMMENT '10=section, 11=category, 12=subcategory, 13=subsubcategory, 1=company, 2=line, 3=subline, 40=artist',
|
||||
name TEXT NOT NULL,
|
||||
type SMALLINT NOT NULL,
|
||||
parent_id BIGINT,
|
||||
description TEXT,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
status VARCHAR(20) DEFAULT 'active',
|
||||
FOREIGN KEY (parent_id) REFERENCES categories(cat_id),
|
||||
INDEX idx_parent (parent_id),
|
||||
INDEX idx_type (type),
|
||||
INDEX idx_status (status),
|
||||
INDEX idx_name_type (name, type)
|
||||
) ENGINE=InnoDB;
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
updated TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
status TEXT DEFAULT 'active',
|
||||
FOREIGN KEY (parent_id) REFERENCES categories(cat_id) ON DELETE SET NULL
|
||||
);
|
||||
|
||||
-- Create trigger for categories
|
||||
CREATE TRIGGER update_categories_updated_at
|
||||
BEFORE UPDATE ON categories
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
COMMENT ON COLUMN categories.type IS '10=section, 11=category, 12=subcategory, 13=subsubcategory, 1=company, 2=line, 3=subline, 40=artist';
|
||||
|
||||
CREATE INDEX idx_categories_parent ON categories(parent_id);
|
||||
CREATE INDEX idx_categories_type ON categories(type);
|
||||
CREATE INDEX idx_categories_status ON categories(status);
|
||||
CREATE INDEX idx_categories_name ON categories(name);
|
||||
CREATE INDEX idx_categories_name_type ON categories(name, type);
|
||||
|
||||
-- Create product_categories junction table
|
||||
CREATE TABLE product_categories (
|
||||
@@ -85,78 +115,190 @@ CREATE TABLE product_categories (
|
||||
pid BIGINT NOT NULL,
|
||||
PRIMARY KEY (pid, cat_id),
|
||||
FOREIGN KEY (pid) REFERENCES products(pid) ON DELETE CASCADE,
|
||||
FOREIGN KEY (cat_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
|
||||
INDEX idx_category (cat_id),
|
||||
INDEX idx_product (pid)
|
||||
) ENGINE=InnoDB;
|
||||
FOREIGN KEY (cat_id) REFERENCES categories(cat_id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE INDEX idx_product_categories_category ON product_categories(cat_id);
|
||||
|
||||
-- Create orders table with its indexes
|
||||
CREATE TABLE IF NOT EXISTS orders (
|
||||
id BIGINT NOT NULL AUTO_INCREMENT,
|
||||
order_number VARCHAR(50) NOT NULL,
|
||||
CREATE TABLE orders (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
order_number TEXT NOT NULL,
|
||||
pid BIGINT NOT NULL,
|
||||
SKU VARCHAR(50) NOT NULL,
|
||||
date DATE NOT NULL,
|
||||
price DECIMAL(10,3) NOT NULL,
|
||||
quantity INT NOT NULL,
|
||||
discount DECIMAL(10,3) DEFAULT 0.000,
|
||||
tax DECIMAL(10,3) DEFAULT 0.000,
|
||||
tax_included TINYINT(1) DEFAULT 0,
|
||||
shipping DECIMAL(10,3) DEFAULT 0.000,
|
||||
costeach DECIMAL(10,3) DEFAULT 0.000,
|
||||
customer VARCHAR(50) NOT NULL,
|
||||
customer_name VARCHAR(100),
|
||||
status VARCHAR(20) DEFAULT 'pending',
|
||||
canceled TINYINT(1) DEFAULT 0,
|
||||
updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (id),
|
||||
UNIQUE KEY unique_order_line (order_number, pid),
|
||||
KEY order_number (order_number),
|
||||
KEY pid (pid),
|
||||
KEY customer (customer),
|
||||
KEY date (date),
|
||||
KEY status (status),
|
||||
INDEX idx_orders_metrics (pid, date, canceled),
|
||||
INDEX idx_updated (updated)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
|
||||
sku TEXT NOT NULL,
|
||||
date TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
price NUMERIC(14, 4) NOT NULL,
|
||||
quantity INTEGER NOT NULL,
|
||||
discount NUMERIC(14, 4) DEFAULT 0.0000,
|
||||
tax NUMERIC(14, 4) DEFAULT 0.0000,
|
||||
tax_included BOOLEAN DEFAULT false,
|
||||
shipping NUMERIC(14, 4) DEFAULT 0.0000,
|
||||
costeach NUMERIC(14, 4) DEFAULT 0.0000,
|
||||
customer TEXT NOT NULL,
|
||||
customer_name TEXT,
|
||||
status TEXT DEFAULT 'pending',
|
||||
canceled BOOLEAN DEFAULT false,
|
||||
updated TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
UNIQUE (order_number, pid),
|
||||
FOREIGN KEY (pid) REFERENCES products(pid) ON DELETE RESTRICT
|
||||
);
|
||||
|
||||
-- Create trigger for orders
|
||||
CREATE TRIGGER update_orders_updated
|
||||
BEFORE UPDATE ON orders
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
CREATE INDEX idx_orders_number ON orders(order_number);
|
||||
CREATE INDEX idx_orders_pid ON orders(pid);
|
||||
CREATE INDEX idx_orders_sku ON orders(sku);
|
||||
CREATE INDEX idx_orders_customer ON orders(customer);
|
||||
CREATE INDEX idx_orders_date ON orders(date);
|
||||
CREATE INDEX idx_orders_status ON orders(status);
|
||||
CREATE INDEX idx_orders_pid_date ON orders(pid, date);
|
||||
CREATE INDEX idx_orders_updated ON orders(updated);
|
||||
|
||||
-- Create purchase_orders table with its indexes
|
||||
-- This table now focuses solely on purchase order intent, not receivings
|
||||
CREATE TABLE purchase_orders (
|
||||
id BIGINT AUTO_INCREMENT PRIMARY KEY,
|
||||
po_id VARCHAR(50) NOT NULL,
|
||||
vendor VARCHAR(100) NOT NULL,
|
||||
date DATE NOT NULL,
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
po_id TEXT NOT NULL,
|
||||
vendor TEXT NOT NULL,
|
||||
date TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
expected_date DATE,
|
||||
pid BIGINT NOT NULL,
|
||||
sku VARCHAR(50) NOT NULL,
|
||||
name VARCHAR(100) NOT NULL COMMENT 'Product name from products.description',
|
||||
cost_price DECIMAL(10, 3) NOT NULL,
|
||||
po_cost_price DECIMAL(10, 3) NOT NULL COMMENT 'Original cost from PO, before receiving adjustments',
|
||||
status TINYINT UNSIGNED DEFAULT 1 COMMENT '0=canceled,1=created,10=electronically_ready_send,11=ordered,12=preordered,13=electronically_sent,15=receiving_started,50=done',
|
||||
receiving_status TINYINT UNSIGNED DEFAULT 1 COMMENT '0=canceled,1=created,30=partial_received,40=full_received,50=paid',
|
||||
sku TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
po_cost_price NUMERIC(14, 4) NOT NULL,
|
||||
status TEXT DEFAULT 'created',
|
||||
notes TEXT,
|
||||
long_note TEXT,
|
||||
ordered INT NOT NULL,
|
||||
received INT DEFAULT 0,
|
||||
received_date DATE COMMENT 'Date of first receiving',
|
||||
last_received_date DATE COMMENT 'Date of most recent receiving',
|
||||
received_by VARCHAR(100) COMMENT 'Name of person who first received this PO line',
|
||||
receiving_history JSON COMMENT 'Array of receiving records with qty, date, cost, receiving_id, and alt_po flag',
|
||||
updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY (pid) REFERENCES products(pid),
|
||||
INDEX idx_po_id (po_id),
|
||||
INDEX idx_vendor (vendor),
|
||||
INDEX idx_status (status),
|
||||
INDEX idx_receiving_status (receiving_status),
|
||||
INDEX idx_purchase_orders_metrics (pid, date, status, ordered, received),
|
||||
INDEX idx_po_metrics (pid, date, receiving_status, received_date),
|
||||
INDEX idx_po_product_date (pid, date),
|
||||
INDEX idx_po_product_status (pid, status),
|
||||
INDEX idx_updated (updated),
|
||||
UNIQUE KEY unique_po_product (po_id, pid)
|
||||
) ENGINE=InnoDB;
|
||||
ordered INTEGER NOT NULL,
|
||||
supplier_id INTEGER,
|
||||
date_created TIMESTAMP WITH TIME ZONE,
|
||||
date_ordered TIMESTAMP WITH TIME ZONE,
|
||||
updated TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY (pid) REFERENCES products(pid) ON DELETE CASCADE,
|
||||
UNIQUE (po_id, pid)
|
||||
);
|
||||
|
||||
SET FOREIGN_KEY_CHECKS = 1;
|
||||
-- Create trigger for purchase_orders
|
||||
CREATE TRIGGER update_purchase_orders_updated
|
||||
BEFORE UPDATE ON purchase_orders
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
COMMENT ON COLUMN purchase_orders.name IS 'Product name from products.description';
|
||||
COMMENT ON COLUMN purchase_orders.po_cost_price IS 'Original cost from PO';
|
||||
COMMENT ON COLUMN purchase_orders.status IS 'canceled, created, electronically_ready_send, ordered, preordered, electronically_sent, receiving_started, done';
|
||||
|
||||
CREATE INDEX idx_po_id ON purchase_orders(po_id);
|
||||
CREATE INDEX idx_po_sku ON purchase_orders(sku);
|
||||
CREATE INDEX idx_po_vendor ON purchase_orders(vendor);
|
||||
CREATE INDEX idx_po_status ON purchase_orders(status);
|
||||
CREATE INDEX idx_po_expected_date ON purchase_orders(expected_date);
|
||||
CREATE INDEX idx_po_pid_status ON purchase_orders(pid, status);
|
||||
CREATE INDEX idx_po_pid_date ON purchase_orders(pid, date);
|
||||
CREATE INDEX idx_po_updated ON purchase_orders(updated);
|
||||
CREATE INDEX idx_po_supplier_id ON purchase_orders(supplier_id);
|
||||
|
||||
-- Create receivings table to track actual receipt of goods
|
||||
CREATE TABLE receivings (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
receiving_id TEXT NOT NULL,
|
||||
pid BIGINT NOT NULL,
|
||||
sku TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
vendor TEXT,
|
||||
qty_each INTEGER NOT NULL,
|
||||
qty_each_orig INTEGER,
|
||||
cost_each NUMERIC(14, 5) NOT NULL,
|
||||
cost_each_orig NUMERIC(14, 5),
|
||||
received_by INTEGER,
|
||||
received_by_name TEXT,
|
||||
received_date TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
receiving_created_date TIMESTAMP WITH TIME ZONE,
|
||||
supplier_id INTEGER,
|
||||
status TEXT DEFAULT 'created',
|
||||
updated TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY (pid) REFERENCES products(pid) ON DELETE CASCADE,
|
||||
UNIQUE (receiving_id, pid)
|
||||
);
|
||||
|
||||
-- Create trigger for receivings
|
||||
CREATE TRIGGER update_receivings_updated
|
||||
BEFORE UPDATE ON receivings
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_column();
|
||||
|
||||
COMMENT ON COLUMN receivings.status IS 'canceled, created, partial_received, full_received, paid';
|
||||
COMMENT ON COLUMN receivings.qty_each_orig IS 'Original quantity from the source system';
|
||||
COMMENT ON COLUMN receivings.cost_each_orig IS 'Original cost from the source system';
|
||||
COMMENT ON COLUMN receivings.vendor IS 'Vendor name, same as in purchase_orders';
|
||||
|
||||
CREATE INDEX idx_receivings_id ON receivings(receiving_id);
|
||||
CREATE INDEX idx_receivings_pid ON receivings(pid);
|
||||
CREATE INDEX idx_receivings_sku ON receivings(sku);
|
||||
CREATE INDEX idx_receivings_status ON receivings(status);
|
||||
CREATE INDEX idx_receivings_received_date ON receivings(received_date);
|
||||
CREATE INDEX idx_receivings_supplier_id ON receivings(supplier_id);
|
||||
CREATE INDEX idx_receivings_vendor ON receivings(vendor);
|
||||
CREATE INDEX idx_receivings_updated ON receivings(updated);
|
||||
|
||||
SET session_replication_role = 'origin'; -- Re-enable foreign key checks
|
||||
|
||||
-- Create views for common calculations
|
||||
-- product_sales_trends view moved to metrics-schema.sql
|
||||
-- product_sales_trends view moved to metrics-schema.sql
|
||||
|
||||
-- -- Historical data tables imported from production
|
||||
-- CREATE TABLE imported_product_current_prices (
|
||||
-- price_id BIGSERIAL PRIMARY KEY,
|
||||
-- pid BIGINT NOT NULL,
|
||||
-- qty_buy SMALLINT NOT NULL,
|
||||
-- is_min_qty_buy BOOLEAN NOT NULL,
|
||||
-- price_each NUMERIC(10,3) NOT NULL,
|
||||
-- qty_limit SMALLINT NOT NULL,
|
||||
-- no_promo BOOLEAN NOT NULL,
|
||||
-- checkout_offer BOOLEAN NOT NULL,
|
||||
-- active BOOLEAN NOT NULL,
|
||||
-- date_active TIMESTAMP WITH TIME ZONE,
|
||||
-- date_deactive TIMESTAMP WITH TIME ZONE,
|
||||
-- updated TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
-- );
|
||||
|
||||
-- CREATE INDEX idx_imported_product_current_prices_pid ON imported_product_current_prices(pid, active, qty_buy);
|
||||
-- CREATE INDEX idx_imported_product_current_prices_checkout ON imported_product_current_prices(checkout_offer, active);
|
||||
-- CREATE INDEX idx_imported_product_current_prices_deactive ON imported_product_current_prices(date_deactive, active);
|
||||
-- CREATE INDEX idx_imported_product_current_prices_active ON imported_product_current_prices(date_active, active);
|
||||
|
||||
-- CREATE TABLE imported_daily_inventory (
|
||||
-- date DATE NOT NULL,
|
||||
-- pid BIGINT NOT NULL,
|
||||
-- amountsold SMALLINT NOT NULL DEFAULT 0,
|
||||
-- times_sold SMALLINT NOT NULL DEFAULT 0,
|
||||
-- qtyreceived SMALLINT NOT NULL DEFAULT 0,
|
||||
-- price NUMERIC(7,2) NOT NULL DEFAULT 0,
|
||||
-- costeach NUMERIC(7,2) NOT NULL DEFAULT 0,
|
||||
-- stamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
-- updated TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
-- PRIMARY KEY (date, pid)
|
||||
-- );
|
||||
|
||||
-- CREATE INDEX idx_imported_daily_inventory_pid ON imported_daily_inventory(pid);
|
||||
|
||||
-- CREATE TABLE imported_product_stat_history (
|
||||
-- pid BIGINT NOT NULL,
|
||||
-- date DATE NOT NULL,
|
||||
-- score NUMERIC(10,2) NOT NULL,
|
||||
-- score2 NUMERIC(10,2) NOT NULL,
|
||||
-- qty_in_baskets SMALLINT NOT NULL,
|
||||
-- qty_sold SMALLINT NOT NULL,
|
||||
-- notifies_set SMALLINT NOT NULL,
|
||||
-- visibility_score NUMERIC(10,2) NOT NULL,
|
||||
-- health_score VARCHAR(5) NOT NULL,
|
||||
-- sold_view_score NUMERIC(6,3) NOT NULL,
|
||||
-- updated TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
-- PRIMARY KEY (pid, date)
|
||||
-- );
|
||||
|
||||
-- CREATE INDEX idx_imported_product_stat_history_date ON imported_product_stat_history(date);
|
||||
115
inventory-server/db/setup-schema.sql
Normal file
115
inventory-server/db/setup-schema.sql
Normal file
@@ -0,0 +1,115 @@
|
||||
-- Templates table for storing import templates
|
||||
CREATE TABLE IF NOT EXISTS templates (
|
||||
id SERIAL PRIMARY KEY,
|
||||
company TEXT NOT NULL,
|
||||
product_type TEXT NOT NULL,
|
||||
supplier TEXT,
|
||||
msrp DECIMAL(10,2),
|
||||
cost_each DECIMAL(10,2),
|
||||
qty_per_unit INTEGER,
|
||||
case_qty INTEGER,
|
||||
hts_code TEXT,
|
||||
description TEXT,
|
||||
weight DECIMAL(10,2),
|
||||
length DECIMAL(10,2),
|
||||
width DECIMAL(10,2),
|
||||
height DECIMAL(10,2),
|
||||
tax_cat TEXT,
|
||||
size_cat TEXT,
|
||||
categories TEXT[],
|
||||
ship_restrictions TEXT[],
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
UNIQUE(company, product_type)
|
||||
);
|
||||
|
||||
-- AI Prompts table for storing validation prompts
|
||||
CREATE TABLE IF NOT EXISTS ai_prompts (
|
||||
id SERIAL PRIMARY KEY,
|
||||
prompt_text TEXT NOT NULL,
|
||||
prompt_type TEXT NOT NULL CHECK (prompt_type IN ('general', 'company_specific', 'system')),
|
||||
company TEXT,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
CONSTRAINT unique_company_prompt UNIQUE (company),
|
||||
CONSTRAINT company_required_for_specific CHECK (
|
||||
(prompt_type = 'general' AND company IS NULL) OR
|
||||
(prompt_type = 'system' AND company IS NULL) OR
|
||||
(prompt_type = 'company_specific' AND company IS NOT NULL)
|
||||
)
|
||||
);
|
||||
|
||||
-- Create a unique partial index to ensure only one general prompt
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_unique_general_prompt
|
||||
ON ai_prompts (prompt_type)
|
||||
WHERE prompt_type = 'general';
|
||||
|
||||
-- Create a unique partial index to ensure only one system prompt
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_unique_system_prompt
|
||||
ON ai_prompts (prompt_type)
|
||||
WHERE prompt_type = 'system';
|
||||
|
||||
-- Reusable Images table for storing persistent images
|
||||
CREATE TABLE IF NOT EXISTS reusable_images (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
filename TEXT NOT NULL,
|
||||
file_path TEXT NOT NULL,
|
||||
image_url TEXT NOT NULL,
|
||||
is_global BOOLEAN NOT NULL DEFAULT false,
|
||||
company TEXT,
|
||||
mime_type TEXT,
|
||||
file_size INTEGER,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
CONSTRAINT company_required_for_non_global CHECK (
|
||||
(is_global = true AND company IS NULL) OR
|
||||
(is_global = false AND company IS NOT NULL)
|
||||
)
|
||||
);
|
||||
|
||||
-- Create index on company for efficient querying
|
||||
CREATE INDEX IF NOT EXISTS idx_reusable_images_company ON reusable_images(company);
|
||||
-- Create index on is_global for efficient querying
|
||||
CREATE INDEX IF NOT EXISTS idx_reusable_images_is_global ON reusable_images(is_global);
|
||||
|
||||
-- AI Validation Performance Tracking
|
||||
CREATE TABLE IF NOT EXISTS ai_validation_performance (
|
||||
id SERIAL PRIMARY KEY,
|
||||
prompt_length INTEGER NOT NULL,
|
||||
product_count INTEGER NOT NULL,
|
||||
start_time TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
end_time TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
duration_seconds DECIMAL(10,2) GENERATED ALWAYS AS (EXTRACT(EPOCH FROM (end_time - start_time))) STORED,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- Create index on prompt_length for efficient querying
|
||||
CREATE INDEX IF NOT EXISTS idx_ai_validation_prompt_length ON ai_validation_performance(prompt_length);
|
||||
|
||||
-- Function to update the updated_at timestamp
|
||||
CREATE OR REPLACE FUNCTION update_updated_at_column()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
-- Trigger to automatically update the updated_at column
|
||||
CREATE TRIGGER update_templates_updated_at
|
||||
BEFORE UPDATE ON templates
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
-- Trigger to automatically update the updated_at column for ai_prompts
|
||||
CREATE TRIGGER update_ai_prompts_updated_at
|
||||
BEFORE UPDATE ON ai_prompts
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
-- Trigger to automatically update the updated_at column for reusable_images
|
||||
CREATE TRIGGER update_reusable_images_updated_at
|
||||
BEFORE UPDATE ON reusable_images
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_at_column();
|
||||
426
inventory-server/old/backfill-snapshots.js
Normal file
426
inventory-server/old/backfill-snapshots.js
Normal file
@@ -0,0 +1,426 @@
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const progress = require('../scripts/metrics-new/utils/progress'); // Assuming progress utils are here
|
||||
const { getConnection, closePool } = require('../scripts/metrics-new/utils/db'); // Assuming db utils are here
|
||||
const os = require('os'); // For detecting number of CPU cores
|
||||
|
||||
// --- Configuration ---
|
||||
const BATCH_SIZE_DAYS = 1; // Process 1 day per database function call
|
||||
const SQL_FUNCTION_FILE = path.resolve(__dirname, 'backfill_historical_snapshots.sql'); // Correct path
|
||||
const LOG_PROGRESS_INTERVAL_MS = 5000; // Update console progress roughly every 5 seconds
|
||||
const HISTORY_TYPE = 'backfill_snapshots'; // Identifier for history table
|
||||
const MAX_WORKERS = Math.max(1, Math.floor(os.cpus().length / 2)); // Use half of available CPU cores
|
||||
const USE_PARALLEL = false; // Set to true to enable parallel processing
|
||||
const PG_STATEMENT_TIMEOUT_MS = 1800000; // 30 minutes max per query
|
||||
|
||||
// --- Cancellation Handling ---
|
||||
let isCancelled = false;
|
||||
let runningQueryPromise = null; // To potentially track the active query
|
||||
|
||||
function requestCancellation() {
|
||||
if (!isCancelled) {
|
||||
isCancelled = true;
|
||||
console.warn('\nCancellation requested. Finishing current batch then stopping...');
|
||||
// Note: We are NOT forcefully cancelling the backend query anymore.
|
||||
}
|
||||
}
|
||||
|
||||
process.on('SIGINT', requestCancellation); // Handle Ctrl+C
|
||||
process.on('SIGTERM', requestCancellation); // Handle termination signals
|
||||
|
||||
// --- Main Backfill Function ---
|
||||
async function backfillSnapshots(cmdStartDate, cmdEndDate, cmdStartBatch = 1) {
|
||||
let connection;
|
||||
const overallStartTime = Date.now();
|
||||
let calculateHistoryId = null;
|
||||
let processedDaysTotal = 0; // Track total days processed across all batches executed in this run
|
||||
let currentBatchNum = cmdStartBatch > 0 ? cmdStartBatch : 1;
|
||||
let totalBatches = 0; // Initialize totalBatches
|
||||
let totalDays = 0; // Initialize totalDays
|
||||
|
||||
console.log(`Starting snapshot backfill process...`);
|
||||
console.log(`SQL Function definition file: ${SQL_FUNCTION_FILE}`);
|
||||
if (!fs.existsSync(SQL_FUNCTION_FILE)) {
|
||||
console.error(`FATAL: SQL file not found at ${SQL_FUNCTION_FILE}`);
|
||||
process.exit(1); // Exit early if file doesn't exist
|
||||
}
|
||||
|
||||
try {
|
||||
// Set up a connection with higher memory limits
|
||||
connection = await getConnection({
|
||||
// Add performance-related settings
|
||||
application_name: 'backfill_snapshots',
|
||||
statement_timeout: PG_STATEMENT_TIMEOUT_MS, // 30 min timeout per statement
|
||||
// These parameters may need to be configured in your database:
|
||||
// work_mem: '1GB',
|
||||
// maintenance_work_mem: '2GB',
|
||||
// temp_buffers: '1GB',
|
||||
});
|
||||
|
||||
console.log('Database connection acquired.');
|
||||
|
||||
// --- Ensure Function Exists ---
|
||||
console.log('Ensuring database function is up-to-date...');
|
||||
try {
|
||||
const sqlFunctionDef = fs.readFileSync(SQL_FUNCTION_FILE, 'utf8');
|
||||
if (!sqlFunctionDef.includes('CREATE OR REPLACE FUNCTION backfill_daily_snapshots_range_final')) {
|
||||
throw new Error(`SQL file ${SQL_FUNCTION_FILE} does not seem to contain the function definition.`);
|
||||
}
|
||||
await connection.query(sqlFunctionDef); // Execute the whole file
|
||||
console.log('Database function `backfill_daily_snapshots_range_final` created/updated.');
|
||||
|
||||
// Add performance query hints to the database
|
||||
await connection.query(`
|
||||
-- Analyze tables for better query planning
|
||||
ANALYZE public.products;
|
||||
ANALYZE public.imported_daily_inventory;
|
||||
ANALYZE public.imported_product_stat_history;
|
||||
ANALYZE public.daily_product_snapshots;
|
||||
ANALYZE public.imported_product_current_prices;
|
||||
`).catch(err => {
|
||||
// Non-fatal if analyze fails
|
||||
console.warn('Failed to analyze tables (non-fatal):', err.message);
|
||||
});
|
||||
|
||||
} catch (err) {
|
||||
console.error(`Error processing SQL function file ${SQL_FUNCTION_FILE}:`, err);
|
||||
throw new Error(`Failed to create or replace DB function: ${err.message}`);
|
||||
}
|
||||
|
||||
// --- Prepare History Record ---
|
||||
console.log('Preparing calculation history record...');
|
||||
// Ensure history table exists (optional, could be done elsewhere)
|
||||
await connection.query(`
|
||||
CREATE TABLE IF NOT EXISTS public.calculate_history (
|
||||
id SERIAL PRIMARY KEY,
|
||||
start_time TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
end_time TIMESTAMPTZ,
|
||||
duration_seconds INTEGER,
|
||||
status VARCHAR(20) NOT NULL, -- e.g., 'running', 'completed', 'failed', 'cancelled'
|
||||
error_message TEXT,
|
||||
additional_info JSONB -- Store type, file, batch info etc.
|
||||
);
|
||||
`);
|
||||
// Mark previous runs of this type as potentially failed if they were left 'running'
|
||||
await connection.query(`
|
||||
UPDATE public.calculate_history
|
||||
SET status = 'failed', error_message = 'Interrupted by new run.'
|
||||
WHERE status = 'running' AND additional_info->>'type' = $1;
|
||||
`, [HISTORY_TYPE]);
|
||||
|
||||
// Create new history record
|
||||
const historyResult = await connection.query(`
|
||||
INSERT INTO public.calculate_history (start_time, status, additional_info)
|
||||
VALUES (NOW(), 'running', jsonb_build_object('type', $1::text, 'sql_file', $2::text, 'start_batch', $3::integer))
|
||||
RETURNING id;
|
||||
`, [HISTORY_TYPE, path.basename(SQL_FUNCTION_FILE), cmdStartBatch]);
|
||||
calculateHistoryId = historyResult.rows[0].id;
|
||||
console.log(`Calculation history record created with ID: ${calculateHistoryId}`);
|
||||
|
||||
|
||||
// --- Determine Date Range ---
|
||||
console.log('Determining date range...');
|
||||
let effectiveStartDate, effectiveEndDate;
|
||||
|
||||
// Use command-line dates if provided, otherwise query DB
|
||||
if (cmdStartDate) {
|
||||
effectiveStartDate = cmdStartDate;
|
||||
} else {
|
||||
const minDateResult = await connection.query(`
|
||||
SELECT LEAST(
|
||||
COALESCE((SELECT MIN(date) FROM public.imported_daily_inventory WHERE date > '1970-01-01'), CURRENT_DATE),
|
||||
COALESCE((SELECT MIN(date) FROM public.imported_product_stat_history WHERE date > '1970-01-01'), CURRENT_DATE)
|
||||
)::date as min_date;
|
||||
`);
|
||||
effectiveStartDate = minDateResult.rows[0]?.min_date || new Date().toISOString().split('T')[0]; // Fallback
|
||||
console.log(`Auto-detected start date: ${effectiveStartDate}`);
|
||||
}
|
||||
|
||||
if (cmdEndDate) {
|
||||
effectiveEndDate = cmdEndDate;
|
||||
} else {
|
||||
const maxDateResult = await connection.query(`
|
||||
SELECT GREATEST(
|
||||
COALESCE((SELECT MAX(date) FROM public.imported_daily_inventory WHERE date < CURRENT_DATE), '1970-01-01'::date),
|
||||
COALESCE((SELECT MAX(date) FROM public.imported_product_stat_history WHERE date < CURRENT_DATE), '1970-01-01'::date)
|
||||
)::date as max_date;
|
||||
`);
|
||||
// Ensure end date is not today or in the future
|
||||
effectiveEndDate = maxDateResult.rows[0]?.max_date || new Date(Date.now() - 86400000).toISOString().split('T')[0]; // Default yesterday
|
||||
if (new Date(effectiveEndDate) >= new Date(new Date().toISOString().split('T')[0])) {
|
||||
effectiveEndDate = new Date(Date.now() - 86400000).toISOString().split('T')[0]; // Set to yesterday if >= today
|
||||
}
|
||||
console.log(`Auto-detected end date: ${effectiveEndDate}`);
|
||||
}
|
||||
|
||||
// Validate dates
|
||||
const dStart = new Date(effectiveStartDate);
|
||||
const dEnd = new Date(effectiveEndDate);
|
||||
if (isNaN(dStart.getTime()) || isNaN(dEnd.getTime()) || dStart > dEnd) {
|
||||
throw new Error(`Invalid date range: Start "${effectiveStartDate}", End "${effectiveEndDate}"`);
|
||||
}
|
||||
|
||||
// --- Batch Processing ---
|
||||
totalDays = Math.ceil((dEnd - dStart) / (1000 * 60 * 60 * 24)) + 1; // Inclusive
|
||||
totalBatches = Math.ceil(totalDays / BATCH_SIZE_DAYS);
|
||||
|
||||
console.log(`Target Date Range: ${effectiveStartDate} to ${effectiveEndDate} (${totalDays} days)`);
|
||||
console.log(`Total Batches: ${totalBatches} (Batch Size: ${BATCH_SIZE_DAYS} days)`);
|
||||
console.log(`Starting from Batch: ${currentBatchNum}`);
|
||||
|
||||
// Initial progress update
|
||||
progress.outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting Batch Processing',
|
||||
currentBatch: currentBatchNum,
|
||||
totalBatches: totalBatches,
|
||||
totalDays: totalDays,
|
||||
elapsed: '0s',
|
||||
remaining: 'Calculating...',
|
||||
rate: 0,
|
||||
historyId: calculateHistoryId // Include history ID in the object
|
||||
});
|
||||
|
||||
while (currentBatchNum <= totalBatches && !isCancelled) {
|
||||
const batchOffset = (currentBatchNum - 1) * BATCH_SIZE_DAYS;
|
||||
const batchStartDate = new Date(dStart);
|
||||
batchStartDate.setDate(dStart.getDate() + batchOffset);
|
||||
|
||||
const batchEndDate = new Date(batchStartDate);
|
||||
batchEndDate.setDate(batchStartDate.getDate() + BATCH_SIZE_DAYS - 1);
|
||||
|
||||
// Clamp batch end date to the overall effective end date
|
||||
if (batchEndDate > dEnd) {
|
||||
batchEndDate.setTime(dEnd.getTime());
|
||||
}
|
||||
|
||||
const batchStartDateStr = batchStartDate.toISOString().split('T')[0];
|
||||
const batchEndDateStr = batchEndDate.toISOString().split('T')[0];
|
||||
const batchStartTime = Date.now();
|
||||
|
||||
console.log(`\n--- Processing Batch ${currentBatchNum} / ${totalBatches} ---`);
|
||||
console.log(` Dates: ${batchStartDateStr} to ${batchEndDateStr}`);
|
||||
|
||||
// Execute the function for the batch
|
||||
try {
|
||||
progress.outputProgress({
|
||||
status: 'running',
|
||||
operation: `Executing DB function for batch ${currentBatchNum}...`,
|
||||
currentBatch: currentBatchNum,
|
||||
totalBatches: totalBatches,
|
||||
totalDays: totalDays,
|
||||
elapsed: progress.formatElapsedTime(overallStartTime),
|
||||
remaining: 'Executing...',
|
||||
rate: 0,
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
|
||||
// Performance improvement: Add batch processing hint
|
||||
await connection.query('SET LOCAL enable_parallel_append = on; SET LOCAL enable_parallel_hash = on; SET LOCAL max_parallel_workers_per_gather = 4;');
|
||||
|
||||
// Store promise in case we need to try and cancel (though not implemented forcefully)
|
||||
runningQueryPromise = connection.query(
|
||||
`SELECT backfill_daily_snapshots_range_final($1::date, $2::date);`,
|
||||
[batchStartDateStr, batchEndDateStr]
|
||||
);
|
||||
await runningQueryPromise; // Wait for the function call to complete
|
||||
runningQueryPromise = null; // Clear the promise
|
||||
|
||||
const batchDurationMs = Date.now() - batchStartTime;
|
||||
const daysInThisBatch = Math.ceil((batchEndDate - batchStartDate) / (1000 * 60 * 60 * 24)) + 1;
|
||||
processedDaysTotal += daysInThisBatch;
|
||||
|
||||
console.log(` Batch ${currentBatchNum} completed in ${progress.formatElapsedTime(batchStartTime)}.`);
|
||||
|
||||
// --- Update Progress & History ---
|
||||
const overallElapsedSec = Math.round((Date.now() - overallStartTime) / 1000);
|
||||
progress.outputProgress({
|
||||
status: 'running',
|
||||
operation: `Completed batch ${currentBatchNum}`,
|
||||
currentBatch: currentBatchNum,
|
||||
totalBatches: totalBatches,
|
||||
totalDays: totalDays,
|
||||
processedDays: processedDaysTotal,
|
||||
elapsed: progress.formatElapsedTime(overallStartTime),
|
||||
remaining: progress.estimateRemaining(overallStartTime, processedDaysTotal, totalDays),
|
||||
rate: progress.calculateRate(overallStartTime, processedDaysTotal),
|
||||
batchDuration: progress.formatElapsedTime(batchStartTime),
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
|
||||
// Save checkpoint in history
|
||||
await connection.query(`
|
||||
UPDATE public.calculate_history
|
||||
SET additional_info = jsonb_set(additional_info, '{last_completed_batch}', $1::jsonb)
|
||||
|| jsonb_build_object('last_processed_date', $2::text)
|
||||
WHERE id = $3::integer;
|
||||
`, [JSON.stringify(currentBatchNum), batchEndDateStr, calculateHistoryId]);
|
||||
|
||||
|
||||
} catch (batchError) {
|
||||
console.error(`\n--- ERROR in Batch ${currentBatchNum} (${batchStartDateStr} to ${batchEndDateStr}) ---`);
|
||||
console.error(' Database Error:', batchError.message);
|
||||
console.error(' DB Error Code:', batchError.code);
|
||||
// Log detailed error to history and re-throw to stop the process
|
||||
await connection.query(`
|
||||
UPDATE public.calculate_history
|
||||
SET status = 'failed',
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1::integer,
|
||||
error_message = $2::text,
|
||||
additional_info = additional_info || jsonb_build_object('failed_batch', $3::integer, 'failed_date_range', $4::text)
|
||||
WHERE id = $5::integer;
|
||||
`, [
|
||||
Math.round((Date.now() - overallStartTime) / 1000),
|
||||
`Batch ${currentBatchNum} failed: ${batchError.message} (Code: ${batchError.code || 'N/A'})`,
|
||||
currentBatchNum,
|
||||
`${batchStartDateStr} to ${batchEndDateStr}`,
|
||||
calculateHistoryId
|
||||
]);
|
||||
throw batchError; // Stop execution
|
||||
}
|
||||
|
||||
currentBatchNum++;
|
||||
// Optional delay between batches
|
||||
// await new Promise(resolve => setTimeout(resolve, 500));
|
||||
|
||||
} // End while loop
|
||||
|
||||
// --- Final Outcome ---
|
||||
const finalStatus = isCancelled ? 'cancelled' : 'completed';
|
||||
const finalMessage = isCancelled ? `Calculation stopped after completing batch ${currentBatchNum - 1}.` : 'Historical snapshots backfill completed successfully.';
|
||||
const finalDurationSec = Math.round((Date.now() - overallStartTime) / 1000);
|
||||
|
||||
console.log(`\n--- Backfill ${finalStatus.toUpperCase()} ---`);
|
||||
console.log(finalMessage);
|
||||
console.log(`Total duration: ${progress.formatElapsedTime(overallStartTime)}`);
|
||||
|
||||
// Update history record
|
||||
await connection.query(`
|
||||
UPDATE public.calculate_history SET status = $1::calculation_status, end_time = NOW(), duration_seconds = $2::integer, error_message = $3
|
||||
WHERE id = $4::integer;
|
||||
`, [finalStatus, finalDurationSec, (isCancelled ? 'User cancelled' : null), calculateHistoryId]);
|
||||
|
||||
if (!isCancelled) {
|
||||
progress.clearProgress(); // Clear progress state only on successful completion
|
||||
} else {
|
||||
progress.outputProgress({ // Final cancelled status update
|
||||
status: 'cancelled',
|
||||
operation: finalMessage,
|
||||
currentBatch: currentBatchNum - 1,
|
||||
totalBatches: totalBatches,
|
||||
totalDays: totalDays,
|
||||
processedDays: processedDaysTotal,
|
||||
elapsed: progress.formatElapsedTime(overallStartTime),
|
||||
remaining: 'Cancelled',
|
||||
rate: 0,
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
}
|
||||
|
||||
return { success: true, status: finalStatus, message: finalMessage, duration: finalDurationSec };
|
||||
|
||||
} catch (error) {
|
||||
console.error('\n--- Backfill encountered an unrecoverable error ---');
|
||||
console.error(error.message);
|
||||
const finalDurationSec = Math.round((Date.now() - overallStartTime) / 1000);
|
||||
|
||||
// Update history if possible
|
||||
if (connection && calculateHistoryId) {
|
||||
try {
|
||||
await connection.query(`
|
||||
UPDATE public.calculate_history
|
||||
SET status = $1::calculation_status, end_time = NOW(), duration_seconds = $2::integer, error_message = $3::text
|
||||
WHERE id = $4::integer;
|
||||
`, [
|
||||
isCancelled ? 'cancelled' : 'failed',
|
||||
finalDurationSec,
|
||||
error.message,
|
||||
calculateHistoryId
|
||||
]);
|
||||
} catch (histError) {
|
||||
console.error("Failed to update history record with error state:", histError);
|
||||
}
|
||||
} else {
|
||||
console.error("Could not update history record (no ID or connection).");
|
||||
}
|
||||
|
||||
// FIX: Use initialized value or a default if loop never started
|
||||
const batchNumForError = currentBatchNum > cmdStartBatch ? currentBatchNum - 1 : cmdStartBatch - 1;
|
||||
|
||||
// Update progress.outputProgress call to match actual function signature
|
||||
try {
|
||||
// Create progress data object
|
||||
const progressData = {
|
||||
status: 'failed',
|
||||
operation: 'Backfill failed',
|
||||
message: error.message,
|
||||
currentBatch: batchNumForError,
|
||||
totalBatches: totalBatches,
|
||||
totalDays: totalDays,
|
||||
processedDays: processedDaysTotal,
|
||||
elapsed: progress.formatElapsedTime(overallStartTime),
|
||||
remaining: 'Failed',
|
||||
rate: 0,
|
||||
// Include history ID in progress data if needed
|
||||
historyId: calculateHistoryId
|
||||
};
|
||||
|
||||
// Call with single object parameter (not separate historyId)
|
||||
progress.outputProgress(progressData);
|
||||
} catch (progressError) {
|
||||
console.error('Failed to report progress:', progressError);
|
||||
}
|
||||
|
||||
return { success: false, status: 'failed', error: error.message, duration: finalDurationSec };
|
||||
|
||||
} finally {
|
||||
if (connection) {
|
||||
console.log('Releasing database connection.');
|
||||
connection.release();
|
||||
}
|
||||
// Close pool only if this script is meant to be standalone
|
||||
// If part of a larger app, the app should manage pool closure
|
||||
// console.log('Closing database pool.');
|
||||
// await closePool();
|
||||
}
|
||||
}
|
||||
|
||||
// --- Script Execution ---
|
||||
|
||||
// Parse command-line arguments
|
||||
const args = process.argv.slice(2);
|
||||
let cmdStartDateArg, cmdEndDateArg, cmdStartBatchArg = 1; // Default start batch is 1
|
||||
|
||||
for (let i = 0; i < args.length; i++) {
|
||||
if (args[i] === '--start-date' && args[i+1]) cmdStartDateArg = args[++i];
|
||||
else if (args[i] === '--end-date' && args[i+1]) cmdEndDateArg = args[++i];
|
||||
else if (args[i] === '--start-batch' && args[i+1]) cmdStartBatchArg = parseInt(args[++i], 10);
|
||||
}
|
||||
|
||||
if (isNaN(cmdStartBatchArg) || cmdStartBatchArg < 1) {
|
||||
console.warn(`Invalid --start-batch value. Defaulting to 1.`);
|
||||
cmdStartBatchArg = 1;
|
||||
}
|
||||
|
||||
// Run the backfill process
|
||||
backfillSnapshots(cmdStartDateArg, cmdEndDateArg, cmdStartBatchArg)
|
||||
.then(result => {
|
||||
if (result.success) {
|
||||
console.log(`\n✅ ${result.message} (Duration: ${result.duration}s)`);
|
||||
process.exitCode = 0; // Success
|
||||
} else {
|
||||
console.error(`\n❌ Backfill failed: ${result.error || 'Unknown error'} (Duration: ${result.duration}s)`);
|
||||
process.exitCode = 1; // Failure
|
||||
}
|
||||
})
|
||||
.catch(err => {
|
||||
console.error('\n❌ Unexpected error during backfill execution:', err);
|
||||
process.exitCode = 1; // Failure
|
||||
})
|
||||
.finally(async () => {
|
||||
// Ensure pool is closed if run standalone
|
||||
console.log('Backfill script finished. Closing pool.');
|
||||
await closePool(); // Make sure closePool exists and works in your db utils
|
||||
process.exit(process.exitCode); // Exit with appropriate code
|
||||
});
|
||||
161
inventory-server/old/backfill_historical_snapshots.sql
Normal file
161
inventory-server/old/backfill_historical_snapshots.sql
Normal file
@@ -0,0 +1,161 @@
|
||||
-- Description: Backfills the daily_product_snapshots table using imported historical unit data
|
||||
-- (daily inventory/stats) and historical price data (current prices table).
|
||||
-- - Uses imported daily sales/receipt UNIT counts for accuracy.
|
||||
-- - ESTIMATES historical stock levels using a forward calculation.
|
||||
-- - APPROXIMATES historical REVENUE using looked-up historical base prices.
|
||||
-- - APPROXIMATES historical COGS, PROFIT, and STOCK VALUE using CURRENT product costs/prices.
|
||||
-- Run ONCE after importing historical data and before initial product_metrics population.
|
||||
-- Dependencies: Core import tables (products), imported history tables (imported_daily_inventory,
|
||||
-- imported_product_stat_history, imported_product_current_prices),
|
||||
-- daily_product_snapshots table must exist.
|
||||
-- Frequency: Run ONCE.
|
||||
|
||||
CREATE OR REPLACE FUNCTION backfill_daily_snapshots_range_final(
|
||||
_start_date DATE,
|
||||
_end_date DATE
|
||||
)
|
||||
RETURNS VOID AS $$
|
||||
DECLARE
|
||||
_current_processing_date DATE := _start_date;
|
||||
_batch_start_time TIMESTAMPTZ;
|
||||
_row_count INTEGER;
|
||||
BEGIN
|
||||
RAISE NOTICE 'Starting FINAL historical snapshot backfill from % to %.', _start_date, _end_date;
|
||||
RAISE NOTICE 'Using historical units and historical prices (for revenue approximation).';
|
||||
RAISE NOTICE 'WARNING: Historical COGS, Profit, and Stock Value use CURRENT product costs/prices.';
|
||||
|
||||
-- Ensure end date is not in the future
|
||||
IF _end_date >= CURRENT_DATE THEN
|
||||
_end_date := CURRENT_DATE - INTERVAL '1 day';
|
||||
RAISE NOTICE 'Adjusted end date to % to avoid conflict with hourly script.', _end_date;
|
||||
END IF;
|
||||
|
||||
-- Performance: Create temporary table with product info to avoid repeated lookups
|
||||
CREATE TEMP TABLE IF NOT EXISTS temp_product_info AS
|
||||
SELECT
|
||||
pid,
|
||||
sku,
|
||||
COALESCE(landing_cost_price, cost_price, 0.00) as effective_cost_price,
|
||||
COALESCE(price, 0.00) as current_price,
|
||||
COALESCE(regular_price, 0.00) as current_regular_price
|
||||
FROM public.products;
|
||||
|
||||
-- Performance: Create index on temporary table
|
||||
CREATE INDEX IF NOT EXISTS temp_product_info_pid_idx ON temp_product_info(pid);
|
||||
|
||||
ANALYZE temp_product_info;
|
||||
|
||||
RAISE NOTICE 'Created temporary product info table with % products', (SELECT COUNT(*) FROM temp_product_info);
|
||||
|
||||
WHILE _current_processing_date <= _end_date LOOP
|
||||
_batch_start_time := clock_timestamp();
|
||||
RAISE NOTICE 'Processing date: %', _current_processing_date;
|
||||
|
||||
-- Get Daily Transaction Unit Info from imported history
|
||||
WITH DailyHistoryUnits AS (
|
||||
SELECT
|
||||
pids.pid,
|
||||
-- Prioritize daily_inventory, fallback to product_stat_history for sold qty
|
||||
COALESCE(di.amountsold, ps.qty_sold, 0)::integer as units_sold_today,
|
||||
COALESCE(di.qtyreceived, 0)::integer as units_received_today
|
||||
FROM
|
||||
(SELECT DISTINCT pid FROM temp_product_info) pids -- Ensure all products are considered
|
||||
LEFT JOIN public.imported_daily_inventory di
|
||||
ON pids.pid = di.pid AND di.date = _current_processing_date
|
||||
LEFT JOIN public.imported_product_stat_history ps
|
||||
ON pids.pid = ps.pid AND ps.date = _current_processing_date
|
||||
-- Removed WHERE clause to ensure snapshots are created even for days with 0 activity,
|
||||
-- allowing stock carry-over. The main query will handle products properly.
|
||||
),
|
||||
HistoricalPrice AS (
|
||||
-- Find the base price (qty_buy=1) active on the processing date
|
||||
SELECT DISTINCT ON (pid)
|
||||
pid,
|
||||
price_each
|
||||
FROM public.imported_product_current_prices
|
||||
WHERE
|
||||
qty_buy = 1
|
||||
-- Use TIMESTAMPTZ comparison logic:
|
||||
AND date_active <= (_current_processing_date + interval '1 day' - interval '1 second') -- Active sometime on or before end of processing day
|
||||
AND (date_deactive IS NULL OR date_deactive > _current_processing_date) -- Not deactivated before start of processing day
|
||||
-- Assuming 'active' flag isn't needed if dates are correct; add 'AND active != 0' if necessary
|
||||
ORDER BY
|
||||
pid, date_active DESC -- Get the most recently activated price
|
||||
),
|
||||
PreviousStock AS (
|
||||
-- Get the estimated stock from the PREVIOUS day snapshot
|
||||
SELECT pid, eod_stock_quantity
|
||||
FROM public.daily_product_snapshots
|
||||
WHERE snapshot_date = _current_processing_date - INTERVAL '1 day'
|
||||
)
|
||||
-- Insert into the daily snapshots table
|
||||
INSERT INTO public.daily_product_snapshots (
|
||||
snapshot_date, pid, sku,
|
||||
eod_stock_quantity, eod_stock_cost, eod_stock_retail, eod_stock_gross, stockout_flag,
|
||||
units_sold, units_returned,
|
||||
gross_revenue, discounts, returns_revenue,
|
||||
net_revenue, cogs, gross_regular_revenue, profit,
|
||||
units_received, cost_received,
|
||||
calculation_timestamp
|
||||
)
|
||||
SELECT
|
||||
_current_processing_date AS snapshot_date,
|
||||
p.pid,
|
||||
p.sku,
|
||||
-- Estimated EOD Stock (using historical daily units)
|
||||
-- Handle potential NULL from joins with COALESCE 0
|
||||
COALESCE(ps.eod_stock_quantity, 0) + COALESCE(dh.units_received_today, 0) - COALESCE(dh.units_sold_today, 0) AS estimated_eod_stock,
|
||||
-- Valued Stock (using estimated stock and CURRENT prices/costs - APPROXIMATION)
|
||||
GREATEST(0, COALESCE(ps.eod_stock_quantity, 0) + COALESCE(dh.units_received_today, 0) - COALESCE(dh.units_sold_today, 0)) * p.effective_cost_price AS eod_stock_cost,
|
||||
GREATEST(0, COALESCE(ps.eod_stock_quantity, 0) + COALESCE(dh.units_received_today, 0) - COALESCE(dh.units_sold_today, 0)) * p.current_price AS eod_stock_retail, -- Stock retail uses current price
|
||||
GREATEST(0, COALESCE(ps.eod_stock_quantity, 0) + COALESCE(dh.units_received_today, 0) - COALESCE(dh.units_sold_today, 0)) * p.current_regular_price AS eod_stock_gross, -- Stock gross uses current regular price
|
||||
-- Stockout Flag (based on estimated stock)
|
||||
(COALESCE(ps.eod_stock_quantity, 0) + COALESCE(dh.units_received_today, 0) - COALESCE(dh.units_sold_today, 0)) <= 0 AS stockout_flag,
|
||||
|
||||
-- Today's Unit Aggregates from History
|
||||
COALESCE(dh.units_sold_today, 0) as units_sold,
|
||||
0 AS units_returned, -- Placeholder: Cannot determine returns from daily summary
|
||||
|
||||
-- Monetary Values using looked-up Historical Price and CURRENT Cost/RegPrice
|
||||
COALESCE(dh.units_sold_today, 0) * COALESCE(hp.price_each, p.current_price) AS gross_revenue, -- Approx Revenue
|
||||
0 AS discounts, -- Placeholder
|
||||
0 AS returns_revenue, -- Placeholder
|
||||
COALESCE(dh.units_sold_today, 0) * COALESCE(hp.price_each, p.current_price) AS net_revenue, -- Approx Net Revenue
|
||||
COALESCE(dh.units_sold_today, 0) * p.effective_cost_price AS cogs, -- Approx COGS (uses CURRENT cost)
|
||||
COALESCE(dh.units_sold_today, 0) * p.current_regular_price AS gross_regular_revenue, -- Approx Gross Regular Revenue
|
||||
-- Approx Profit
|
||||
(COALESCE(dh.units_sold_today, 0) * COALESCE(hp.price_each, p.current_price)) - (COALESCE(dh.units_sold_today, 0) * p.effective_cost_price) AS profit,
|
||||
|
||||
COALESCE(dh.units_received_today, 0) as units_received,
|
||||
-- Estimate received cost using CURRENT product cost
|
||||
COALESCE(dh.units_received_today, 0) * p.effective_cost_price AS cost_received, -- Approx
|
||||
|
||||
clock_timestamp() -- Timestamp of this specific calculation
|
||||
FROM temp_product_info p -- Use the temp table for better performance
|
||||
LEFT JOIN PreviousStock ps ON p.pid = ps.pid
|
||||
LEFT JOIN DailyHistoryUnits dh ON p.pid = dh.pid -- Join today's historical activity
|
||||
LEFT JOIN HistoricalPrice hp ON p.pid = hp.pid -- Join the looked-up historical price
|
||||
-- Optimization: Only process products with activity or previous stock
|
||||
WHERE (dh.units_sold_today > 0 OR dh.units_received_today > 0 OR COALESCE(ps.eod_stock_quantity, 0) > 0)
|
||||
|
||||
ON CONFLICT (snapshot_date, pid) DO NOTHING; -- Avoid errors if rerunning parts, but prefer clean runs
|
||||
|
||||
GET DIAGNOSTICS _row_count = ROW_COUNT;
|
||||
RAISE NOTICE 'Processed %: Inserted/Skipped % rows. Duration: %',
|
||||
_current_processing_date,
|
||||
_row_count,
|
||||
clock_timestamp() - _batch_start_time;
|
||||
|
||||
_current_processing_date := _current_processing_date + INTERVAL '1 day';
|
||||
|
||||
END LOOP;
|
||||
|
||||
-- Clean up temporary tables
|
||||
DROP TABLE IF EXISTS temp_product_info;
|
||||
|
||||
RAISE NOTICE 'Finished FINAL historical snapshot backfill.';
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Example usage:
|
||||
-- SELECT backfill_daily_snapshots_range_final('2023-01-01'::date, '2023-12-31'::date);
|
||||
@@ -57,18 +57,20 @@ const TEMP_TABLES = [
|
||||
'temp_daily_sales',
|
||||
'temp_product_stats',
|
||||
'temp_category_sales',
|
||||
'temp_category_stats'
|
||||
'temp_category_stats',
|
||||
'temp_beginning_inventory',
|
||||
'temp_monthly_inventory'
|
||||
];
|
||||
|
||||
// Add cleanup function for temporary tables
|
||||
async function cleanupTemporaryTables(connection) {
|
||||
try {
|
||||
// Drop each temporary table if it exists
|
||||
for (const table of TEMP_TABLES) {
|
||||
await connection.query(`DROP TEMPORARY TABLE IF EXISTS ${table}`);
|
||||
await connection.query(`DROP TABLE IF EXISTS ${table}`);
|
||||
}
|
||||
} catch (error) {
|
||||
logError(error, 'Error cleaning up temporary tables');
|
||||
throw error; // Re-throw to be handled by the caller
|
||||
} catch (err) {
|
||||
console.error('Error cleaning up temporary tables:', err);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,22 +88,42 @@ let isCancelled = false;
|
||||
|
||||
function cancelCalculation() {
|
||||
isCancelled = true;
|
||||
global.clearProgress();
|
||||
// Format as SSE event
|
||||
const event = {
|
||||
progress: {
|
||||
status: 'cancelled',
|
||||
operation: 'Calculation cancelled',
|
||||
current: 0,
|
||||
total: 0,
|
||||
elapsed: null,
|
||||
remaining: null,
|
||||
rate: 0,
|
||||
timestamp: Date.now()
|
||||
}
|
||||
console.log('Calculation has been cancelled by user');
|
||||
|
||||
// Force-terminate any query that's been running for more than 5 seconds
|
||||
try {
|
||||
const connection = getConnection();
|
||||
connection.then(async (conn) => {
|
||||
try {
|
||||
// Identify and terminate long-running queries from our application
|
||||
await conn.query(`
|
||||
SELECT pg_cancel_backend(pid)
|
||||
FROM pg_stat_activity
|
||||
WHERE query_start < now() - interval '5 seconds'
|
||||
AND application_name LIKE '%node%'
|
||||
AND query NOT LIKE '%pg_cancel_backend%'
|
||||
`);
|
||||
|
||||
// Clean up any temporary tables
|
||||
await cleanupTemporaryTables(conn);
|
||||
|
||||
// Release connection
|
||||
conn.release();
|
||||
} catch (err) {
|
||||
console.error('Error during force cancellation:', err);
|
||||
conn.release();
|
||||
}
|
||||
}).catch(err => {
|
||||
console.error('Could not get connection for cancellation:', err);
|
||||
});
|
||||
} catch (err) {
|
||||
console.error('Failed to terminate running queries:', err);
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: 'Calculation has been cancelled'
|
||||
};
|
||||
process.stdout.write(JSON.stringify(event) + '\n');
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
// Handle SIGTERM signal for cancellation
|
||||
@@ -119,6 +141,15 @@ async function calculateMetrics() {
|
||||
let totalPurchaseOrders = 0;
|
||||
let calculateHistoryId;
|
||||
|
||||
// Set a maximum execution time (30 minutes)
|
||||
const MAX_EXECUTION_TIME = 30 * 60 * 1000;
|
||||
const timeout = setTimeout(() => {
|
||||
console.error(`Calculation timed out after ${MAX_EXECUTION_TIME/1000} seconds, forcing termination`);
|
||||
// Call cancel and force exit
|
||||
cancelCalculation();
|
||||
process.exit(1);
|
||||
}, MAX_EXECUTION_TIME);
|
||||
|
||||
try {
|
||||
// Clean up any previously running calculations
|
||||
connection = await getConnection();
|
||||
@@ -127,24 +158,24 @@ async function calculateMetrics() {
|
||||
SET
|
||||
status = 'cancelled',
|
||||
end_time = NOW(),
|
||||
duration_seconds = TIMESTAMPDIFF(SECOND, start_time, NOW()),
|
||||
duration_seconds = EXTRACT(EPOCH FROM (NOW() - start_time))::INTEGER,
|
||||
error_message = 'Previous calculation was not completed properly'
|
||||
WHERE status = 'running'
|
||||
`);
|
||||
|
||||
// Get counts from all relevant tables
|
||||
const [[productCount], [orderCount], [poCount]] = await Promise.all([
|
||||
const [productCountResult, orderCountResult, poCountResult] = await Promise.all([
|
||||
connection.query('SELECT COUNT(*) as total FROM products'),
|
||||
connection.query('SELECT COUNT(*) as total FROM orders'),
|
||||
connection.query('SELECT COUNT(*) as total FROM purchase_orders')
|
||||
]);
|
||||
|
||||
totalProducts = productCount.total;
|
||||
totalOrders = orderCount.total;
|
||||
totalPurchaseOrders = poCount.total;
|
||||
totalProducts = parseInt(productCountResult.rows[0].total);
|
||||
totalOrders = parseInt(orderCountResult.rows[0].total);
|
||||
totalPurchaseOrders = parseInt(poCountResult.rows[0].total);
|
||||
|
||||
// Create history record for this calculation
|
||||
const [historyResult] = await connection.query(`
|
||||
const historyResult = await connection.query(`
|
||||
INSERT INTO calculate_history (
|
||||
start_time,
|
||||
status,
|
||||
@@ -155,19 +186,19 @@ async function calculateMetrics() {
|
||||
) VALUES (
|
||||
NOW(),
|
||||
'running',
|
||||
?,
|
||||
?,
|
||||
?,
|
||||
JSON_OBJECT(
|
||||
'skip_product_metrics', ?,
|
||||
'skip_time_aggregates', ?,
|
||||
'skip_financial_metrics', ?,
|
||||
'skip_vendor_metrics', ?,
|
||||
'skip_category_metrics', ?,
|
||||
'skip_brand_metrics', ?,
|
||||
'skip_sales_forecasts', ?
|
||||
$1,
|
||||
$2,
|
||||
$3,
|
||||
jsonb_build_object(
|
||||
'skip_product_metrics', ($4::int > 0),
|
||||
'skip_time_aggregates', ($5::int > 0),
|
||||
'skip_financial_metrics', ($6::int > 0),
|
||||
'skip_vendor_metrics', ($7::int > 0),
|
||||
'skip_category_metrics', ($8::int > 0),
|
||||
'skip_brand_metrics', ($9::int > 0),
|
||||
'skip_sales_forecasts', ($10::int > 0)
|
||||
)
|
||||
)
|
||||
) RETURNING id
|
||||
`, [
|
||||
totalProducts,
|
||||
totalOrders,
|
||||
@@ -180,8 +211,7 @@ async function calculateMetrics() {
|
||||
SKIP_BRAND_METRICS,
|
||||
SKIP_SALES_FORECASTS
|
||||
]);
|
||||
calculateHistoryId = historyResult.insertId;
|
||||
connection.release();
|
||||
calculateHistoryId = historyResult.rows[0].id;
|
||||
|
||||
// Add debug logging for the progress functions
|
||||
console.log('Debug - Progress functions:', {
|
||||
@@ -199,6 +229,8 @@ async function calculateMetrics() {
|
||||
throw err;
|
||||
}
|
||||
|
||||
// Release the connection before getting a new one
|
||||
connection.release();
|
||||
isCancelled = false;
|
||||
connection = await getConnection();
|
||||
|
||||
@@ -234,10 +266,10 @@ async function calculateMetrics() {
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
processed_products = ?,
|
||||
processed_orders = ?,
|
||||
processed_purchase_orders = ?
|
||||
WHERE id = ?
|
||||
processed_products = $1,
|
||||
processed_orders = $2,
|
||||
processed_purchase_orders = $3
|
||||
WHERE id = $4
|
||||
`, [safeProducts, safeOrders, safePurchaseOrders, calculateHistoryId]);
|
||||
};
|
||||
|
||||
@@ -359,216 +391,6 @@ async function calculateMetrics() {
|
||||
console.log('Skipping sales forecasts calculation');
|
||||
}
|
||||
|
||||
// Calculate ABC classification
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting ABC classification',
|
||||
current: processedProducts || 0,
|
||||
total: totalProducts || 0,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedProducts || 0, totalProducts || 0),
|
||||
rate: calculateRate(startTime, processedProducts || 0),
|
||||
percentage: (((processedProducts || 0) / (totalProducts || 1)) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
if (isCancelled) return {
|
||||
processedProducts: processedProducts || 0,
|
||||
processedOrders: processedOrders || 0,
|
||||
processedPurchaseOrders: 0,
|
||||
success: false
|
||||
};
|
||||
|
||||
const [abcConfig] = await connection.query('SELECT a_threshold, b_threshold FROM abc_classification_config WHERE id = 1');
|
||||
const abcThresholds = abcConfig[0] || { a_threshold: 20, b_threshold: 50 };
|
||||
|
||||
// First, create and populate the rankings table with an index
|
||||
await connection.query('DROP TEMPORARY TABLE IF EXISTS temp_revenue_ranks');
|
||||
await connection.query(`
|
||||
CREATE TEMPORARY TABLE temp_revenue_ranks (
|
||||
pid BIGINT NOT NULL,
|
||||
total_revenue DECIMAL(10,3),
|
||||
rank_num INT,
|
||||
total_count INT,
|
||||
PRIMARY KEY (pid),
|
||||
INDEX (rank_num)
|
||||
) ENGINE=MEMORY
|
||||
`);
|
||||
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Creating revenue rankings',
|
||||
current: processedProducts || 0,
|
||||
total: totalProducts || 0,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedProducts || 0, totalProducts || 0),
|
||||
rate: calculateRate(startTime, processedProducts || 0),
|
||||
percentage: (((processedProducts || 0) / (totalProducts || 1)) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
if (isCancelled) return {
|
||||
processedProducts: processedProducts || 0,
|
||||
processedOrders: processedOrders || 0,
|
||||
processedPurchaseOrders: 0,
|
||||
success: false
|
||||
};
|
||||
|
||||
await connection.query(`
|
||||
INSERT INTO temp_revenue_ranks
|
||||
SELECT
|
||||
pid,
|
||||
total_revenue,
|
||||
@rank := @rank + 1 as rank_num,
|
||||
@total_count := @rank as total_count
|
||||
FROM (
|
||||
SELECT pid, total_revenue
|
||||
FROM product_metrics
|
||||
WHERE total_revenue > 0
|
||||
ORDER BY total_revenue DESC
|
||||
) ranked,
|
||||
(SELECT @rank := 0) r
|
||||
`);
|
||||
|
||||
// Get total count for percentage calculation
|
||||
const [rankingCount] = await connection.query('SELECT MAX(rank_num) as total_count FROM temp_revenue_ranks');
|
||||
const totalCount = rankingCount[0].total_count || 1;
|
||||
const max_rank = totalCount; // Store max_rank for use in classification
|
||||
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Updating ABC classifications',
|
||||
current: processedProducts || 0,
|
||||
total: totalProducts || 0,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedProducts || 0, totalProducts || 0),
|
||||
rate: calculateRate(startTime, processedProducts || 0),
|
||||
percentage: (((processedProducts || 0) / (totalProducts || 1)) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
if (isCancelled) return {
|
||||
processedProducts: processedProducts || 0,
|
||||
processedOrders: processedOrders || 0,
|
||||
processedPurchaseOrders: 0,
|
||||
success: false
|
||||
};
|
||||
|
||||
// ABC classification progress tracking
|
||||
let abcProcessedCount = 0;
|
||||
const batchSize = 5000;
|
||||
let lastProgressUpdate = Date.now();
|
||||
const progressUpdateInterval = 1000; // Update every second
|
||||
|
||||
while (true) {
|
||||
if (isCancelled) return {
|
||||
processedProducts: Number(processedProducts) || 0,
|
||||
processedOrders: Number(processedOrders) || 0,
|
||||
processedPurchaseOrders: 0,
|
||||
success: false
|
||||
};
|
||||
|
||||
// First get a batch of PIDs that need updating
|
||||
const [pids] = await connection.query(`
|
||||
SELECT pm.pid
|
||||
FROM product_metrics pm
|
||||
LEFT JOIN temp_revenue_ranks tr ON pm.pid = tr.pid
|
||||
WHERE pm.abc_class IS NULL
|
||||
OR pm.abc_class !=
|
||||
CASE
|
||||
WHEN tr.rank_num IS NULL THEN 'C'
|
||||
WHEN (tr.rank_num / ?) * 100 <= ? THEN 'A'
|
||||
WHEN (tr.rank_num / ?) * 100 <= ? THEN 'B'
|
||||
ELSE 'C'
|
||||
END
|
||||
LIMIT ?
|
||||
`, [max_rank, abcThresholds.a_threshold,
|
||||
max_rank, abcThresholds.b_threshold,
|
||||
batchSize]);
|
||||
|
||||
if (pids.length === 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Then update just those PIDs
|
||||
const [result] = await connection.query(`
|
||||
UPDATE product_metrics pm
|
||||
LEFT JOIN temp_revenue_ranks tr ON pm.pid = tr.pid
|
||||
SET pm.abc_class =
|
||||
CASE
|
||||
WHEN tr.rank_num IS NULL THEN 'C'
|
||||
WHEN (tr.rank_num / ?) * 100 <= ? THEN 'A'
|
||||
WHEN (tr.rank_num / ?) * 100 <= ? THEN 'B'
|
||||
ELSE 'C'
|
||||
END,
|
||||
pm.last_calculated_at = NOW()
|
||||
WHERE pm.pid IN (?)
|
||||
`, [max_rank, abcThresholds.a_threshold,
|
||||
max_rank, abcThresholds.b_threshold,
|
||||
pids.map(row => row.pid)]);
|
||||
|
||||
abcProcessedCount += result.affectedRows;
|
||||
|
||||
// Calculate progress ensuring valid numbers
|
||||
const currentProgress = Math.floor(totalProducts * (0.99 + (abcProcessedCount / (totalCount || 1)) * 0.01));
|
||||
processedProducts = Number(currentProgress) || processedProducts || 0;
|
||||
|
||||
// Only update progress at most once per second
|
||||
const now = Date.now();
|
||||
if (now - lastProgressUpdate >= progressUpdateInterval) {
|
||||
const progress = ensureValidProgress(processedProducts, totalProducts);
|
||||
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'ABC classification progress',
|
||||
current: progress.current,
|
||||
total: progress.total,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, progress.current, progress.total),
|
||||
rate: calculateRate(startTime, progress.current),
|
||||
percentage: progress.percentage,
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
lastProgressUpdate = now;
|
||||
}
|
||||
|
||||
// Update database progress
|
||||
await updateProgress(processedProducts, processedOrders, processedPurchaseOrders);
|
||||
|
||||
// Small delay between batches to allow other transactions
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
}
|
||||
|
||||
// Clean up
|
||||
await connection.query('DROP TEMPORARY TABLE IF EXISTS temp_revenue_ranks');
|
||||
|
||||
const endTime = Date.now();
|
||||
const totalElapsedSeconds = Math.round((endTime - startTime) / 1000);
|
||||
|
||||
// Update calculate_status for ABC classification
|
||||
await connection.query(`
|
||||
INSERT INTO calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES ('abc_classification', NOW())
|
||||
ON DUPLICATE KEY UPDATE last_calculation_timestamp = NOW()
|
||||
`);
|
||||
|
||||
// Final progress update with guaranteed valid numbers
|
||||
const finalProgress = ensureValidProgress(totalProducts, totalProducts);
|
||||
|
||||
@@ -578,14 +400,14 @@ async function calculateMetrics() {
|
||||
operation: 'Metrics calculation complete',
|
||||
current: finalProgress.current,
|
||||
total: finalProgress.total,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: '0s',
|
||||
rate: calculateRate(startTime, finalProgress.current),
|
||||
rate: global.calculateRate(startTime, finalProgress.current),
|
||||
percentage: '100',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: totalElapsedSeconds
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
@@ -601,13 +423,13 @@ async function calculateMetrics() {
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = ?,
|
||||
processed_products = ?,
|
||||
processed_orders = ?,
|
||||
processed_purchase_orders = ?,
|
||||
duration_seconds = $1,
|
||||
processed_products = $2,
|
||||
processed_orders = $3,
|
||||
processed_purchase_orders = $4,
|
||||
status = 'completed'
|
||||
WHERE id = ?
|
||||
`, [totalElapsedSeconds,
|
||||
WHERE id = $5
|
||||
`, [Math.round((Date.now() - startTime) / 1000),
|
||||
finalStats.processedProducts,
|
||||
finalStats.processedOrders,
|
||||
finalStats.processedPurchaseOrders,
|
||||
@@ -616,6 +438,11 @@ async function calculateMetrics() {
|
||||
// Clear progress file on successful completion
|
||||
global.clearProgress();
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: 'Calculation completed successfully',
|
||||
duration: Math.round((Date.now() - startTime) / 1000)
|
||||
};
|
||||
} catch (error) {
|
||||
const endTime = Date.now();
|
||||
const totalElapsedSeconds = Math.round((endTime - startTime) / 1000);
|
||||
@@ -625,13 +452,13 @@ async function calculateMetrics() {
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = ?,
|
||||
processed_products = ?,
|
||||
processed_orders = ?,
|
||||
processed_purchase_orders = ?,
|
||||
status = ?,
|
||||
error_message = ?
|
||||
WHERE id = ?
|
||||
duration_seconds = $1,
|
||||
processed_products = $2,
|
||||
processed_orders = $3,
|
||||
processed_purchase_orders = $4,
|
||||
status = $5,
|
||||
error_message = $6
|
||||
WHERE id = $7
|
||||
`, [
|
||||
totalElapsedSeconds,
|
||||
processedProducts || 0, // Ensure we have a valid number
|
||||
@@ -677,17 +504,38 @@ async function calculateMetrics() {
|
||||
}
|
||||
throw error;
|
||||
} finally {
|
||||
// Clear the timeout to prevent forced termination
|
||||
clearTimeout(timeout);
|
||||
|
||||
// Always clean up and release connection
|
||||
if (connection) {
|
||||
// Ensure temporary tables are cleaned up
|
||||
await cleanupTemporaryTables(connection);
|
||||
connection.release();
|
||||
try {
|
||||
await cleanupTemporaryTables(connection);
|
||||
connection.release();
|
||||
} catch (err) {
|
||||
console.error('Error in final cleanup:', err);
|
||||
}
|
||||
}
|
||||
// Close the connection pool when we're done
|
||||
await closePool();
|
||||
}
|
||||
} catch (error) {
|
||||
success = false;
|
||||
logError(error, 'Error in metrics calculation');
|
||||
console.error('Error in metrics calculation', error);
|
||||
|
||||
try {
|
||||
if (connection) {
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
status = 'failed',
|
||||
end_time = NOW(),
|
||||
duration_seconds = EXTRACT(EPOCH FROM (NOW() - start_time))::INTEGER,
|
||||
error_message = $1
|
||||
WHERE id = $2
|
||||
`, [error.message.substring(0, 500), calculateHistoryId]);
|
||||
}
|
||||
} catch (updateError) {
|
||||
console.error('Error updating calculation history:', updateError);
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
242
inventory-server/old/config-schema.sql
Normal file
242
inventory-server/old/config-schema.sql
Normal file
@@ -0,0 +1,242 @@
|
||||
-- -- Configuration tables schema
|
||||
|
||||
|
||||
|
||||
-- -- Stock threshold configurations
|
||||
-- CREATE TABLE stock_thresholds (
|
||||
-- id INTEGER NOT NULL,
|
||||
-- category_id BIGINT, -- NULL means default/global threshold
|
||||
-- vendor VARCHAR(100), -- NULL means applies to all vendors
|
||||
-- critical_days INTEGER NOT NULL DEFAULT 7,
|
||||
-- reorder_days INTEGER NOT NULL DEFAULT 14,
|
||||
-- overstock_days INTEGER NOT NULL DEFAULT 90,
|
||||
-- low_stock_threshold INTEGER NOT NULL DEFAULT 5,
|
||||
-- min_reorder_quantity INTEGER NOT NULL DEFAULT 1,
|
||||
-- created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
-- updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
-- PRIMARY KEY (id),
|
||||
-- FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
|
||||
-- UNIQUE (category_id, vendor)
|
||||
-- );
|
||||
|
||||
-- CREATE TRIGGER update_stock_thresholds_updated
|
||||
-- BEFORE UPDATE ON stock_thresholds
|
||||
-- FOR EACH ROW
|
||||
-- EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
-- CREATE INDEX idx_st_metrics ON stock_thresholds(category_id, vendor);
|
||||
|
||||
-- -- Lead time threshold configurations
|
||||
-- CREATE TABLE lead_time_thresholds (
|
||||
-- id INTEGER NOT NULL,
|
||||
-- category_id BIGINT, -- NULL means default/global threshold
|
||||
-- vendor VARCHAR(100), -- NULL means applies to all vendors
|
||||
-- target_days INTEGER NOT NULL DEFAULT 14,
|
||||
-- warning_days INTEGER NOT NULL DEFAULT 21,
|
||||
-- critical_days INTEGER NOT NULL DEFAULT 30,
|
||||
-- created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
-- updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
-- PRIMARY KEY (id),
|
||||
-- FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
|
||||
-- UNIQUE (category_id, vendor)
|
||||
-- );
|
||||
|
||||
-- CREATE TRIGGER update_lead_time_thresholds_updated
|
||||
-- BEFORE UPDATE ON lead_time_thresholds
|
||||
-- FOR EACH ROW
|
||||
-- EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
-- -- Sales velocity window configurations
|
||||
-- CREATE TABLE sales_velocity_config (
|
||||
-- id INTEGER NOT NULL,
|
||||
-- category_id BIGINT, -- NULL means default/global threshold
|
||||
-- vendor VARCHAR(100), -- NULL means applies to all vendors
|
||||
-- daily_window_days INTEGER NOT NULL DEFAULT 30,
|
||||
-- weekly_window_days INTEGER NOT NULL DEFAULT 7,
|
||||
-- monthly_window_days INTEGER NOT NULL DEFAULT 90,
|
||||
-- created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
-- updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
-- PRIMARY KEY (id),
|
||||
-- FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
|
||||
-- UNIQUE (category_id, vendor)
|
||||
-- );
|
||||
|
||||
-- CREATE TRIGGER update_sales_velocity_config_updated
|
||||
-- BEFORE UPDATE ON sales_velocity_config
|
||||
-- FOR EACH ROW
|
||||
-- EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
-- CREATE INDEX idx_sv_metrics ON sales_velocity_config(category_id, vendor);
|
||||
|
||||
-- -- ABC Classification configurations
|
||||
-- CREATE TABLE abc_classification_config (
|
||||
-- id INTEGER NOT NULL PRIMARY KEY,
|
||||
-- a_threshold DECIMAL(5,2) NOT NULL DEFAULT 20.0,
|
||||
-- b_threshold DECIMAL(5,2) NOT NULL DEFAULT 50.0,
|
||||
-- classification_period_days INTEGER NOT NULL DEFAULT 90,
|
||||
-- created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
-- updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
|
||||
-- );
|
||||
|
||||
-- CREATE TRIGGER update_abc_classification_config_updated
|
||||
-- BEFORE UPDATE ON abc_classification_config
|
||||
-- FOR EACH ROW
|
||||
-- EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
-- -- Safety stock configurations
|
||||
-- CREATE TABLE safety_stock_config (
|
||||
-- id INTEGER NOT NULL,
|
||||
-- category_id BIGINT, -- NULL means default/global threshold
|
||||
-- vendor VARCHAR(100), -- NULL means applies to all vendors
|
||||
-- coverage_days INTEGER NOT NULL DEFAULT 14,
|
||||
-- service_level DECIMAL(5,2) NOT NULL DEFAULT 95.0,
|
||||
-- created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
-- updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
-- PRIMARY KEY (id),
|
||||
-- FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
|
||||
-- UNIQUE (category_id, vendor)
|
||||
-- );
|
||||
|
||||
-- CREATE TRIGGER update_safety_stock_config_updated
|
||||
-- BEFORE UPDATE ON safety_stock_config
|
||||
-- FOR EACH ROW
|
||||
-- EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
-- CREATE INDEX idx_ss_metrics ON safety_stock_config(category_id, vendor);
|
||||
|
||||
-- -- Turnover rate configurations
|
||||
-- CREATE TABLE turnover_config (
|
||||
-- id INTEGER NOT NULL,
|
||||
-- category_id BIGINT, -- NULL means default/global threshold
|
||||
-- vendor VARCHAR(100), -- NULL means applies to all vendors
|
||||
-- calculation_period_days INTEGER NOT NULL DEFAULT 30,
|
||||
-- target_rate DECIMAL(10,2) NOT NULL DEFAULT 1.0,
|
||||
-- created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
-- updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
-- PRIMARY KEY (id),
|
||||
-- FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE,
|
||||
-- UNIQUE (category_id, vendor)
|
||||
-- );
|
||||
|
||||
-- CREATE TRIGGER update_turnover_config_updated
|
||||
-- BEFORE UPDATE ON turnover_config
|
||||
-- FOR EACH ROW
|
||||
-- EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
-- -- Create table for sales seasonality factors
|
||||
-- CREATE TABLE sales_seasonality (
|
||||
-- month INTEGER NOT NULL,
|
||||
-- seasonality_factor DECIMAL(5,3) DEFAULT 0,
|
||||
-- last_updated TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
-- PRIMARY KEY (month),
|
||||
-- CONSTRAINT month_range CHECK (month BETWEEN 1 AND 12),
|
||||
-- CONSTRAINT seasonality_range CHECK (seasonality_factor BETWEEN -1.0 AND 1.0)
|
||||
-- );
|
||||
|
||||
-- CREATE TRIGGER update_sales_seasonality_updated
|
||||
-- BEFORE UPDATE ON sales_seasonality
|
||||
-- FOR EACH ROW
|
||||
-- EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
-- -- Create table for financial calculation parameters
|
||||
-- CREATE TABLE financial_calc_config (
|
||||
-- id INTEGER NOT NULL PRIMARY KEY,
|
||||
-- order_cost DECIMAL(10,2) NOT NULL DEFAULT 25.00, -- The fixed cost per purchase order (used in EOQ)
|
||||
-- holding_rate DECIMAL(10,4) NOT NULL DEFAULT 0.25, -- The annual inventory holding cost as a percentage of unit cost (used in EOQ)
|
||||
-- service_level_z_score DECIMAL(10,4) NOT NULL DEFAULT 1.96, -- Z-score for ~95% service level (used in Safety Stock)
|
||||
-- min_reorder_qty INTEGER NOT NULL DEFAULT 1, -- Minimum reorder quantity
|
||||
-- default_reorder_qty INTEGER NOT NULL DEFAULT 5, -- Default reorder quantity when sales data is insufficient
|
||||
-- default_safety_stock INTEGER NOT NULL DEFAULT 5, -- Default safety stock when sales data is insufficient
|
||||
-- created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
-- updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
|
||||
-- );
|
||||
|
||||
-- CREATE TRIGGER update_financial_calc_config_updated
|
||||
-- BEFORE UPDATE ON financial_calc_config
|
||||
-- FOR EACH ROW
|
||||
-- EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
-- -- Insert default global thresholds
|
||||
-- INSERT INTO stock_thresholds (id, category_id, vendor, critical_days, reorder_days, overstock_days)
|
||||
-- VALUES (1, NULL, NULL, 7, 14, 90)
|
||||
-- ON CONFLICT (id) DO UPDATE SET
|
||||
-- critical_days = EXCLUDED.critical_days,
|
||||
-- reorder_days = EXCLUDED.reorder_days,
|
||||
-- overstock_days = EXCLUDED.overstock_days;
|
||||
|
||||
-- INSERT INTO lead_time_thresholds (id, category_id, vendor, target_days, warning_days, critical_days)
|
||||
-- VALUES (1, NULL, NULL, 14, 21, 30)
|
||||
-- ON CONFLICT (id) DO UPDATE SET
|
||||
-- target_days = EXCLUDED.target_days,
|
||||
-- warning_days = EXCLUDED.warning_days,
|
||||
-- critical_days = EXCLUDED.critical_days;
|
||||
|
||||
-- INSERT INTO sales_velocity_config (id, category_id, vendor, daily_window_days, weekly_window_days, monthly_window_days)
|
||||
-- VALUES (1, NULL, NULL, 30, 7, 90)
|
||||
-- ON CONFLICT (id) DO UPDATE SET
|
||||
-- daily_window_days = EXCLUDED.daily_window_days,
|
||||
-- weekly_window_days = EXCLUDED.weekly_window_days,
|
||||
-- monthly_window_days = EXCLUDED.monthly_window_days;
|
||||
|
||||
-- INSERT INTO abc_classification_config (id, a_threshold, b_threshold, classification_period_days)
|
||||
-- VALUES (1, 20.0, 50.0, 90)
|
||||
-- ON CONFLICT (id) DO UPDATE SET
|
||||
-- a_threshold = EXCLUDED.a_threshold,
|
||||
-- b_threshold = EXCLUDED.b_threshold,
|
||||
-- classification_period_days = EXCLUDED.classification_period_days;
|
||||
|
||||
-- INSERT INTO safety_stock_config (id, category_id, vendor, coverage_days, service_level)
|
||||
-- VALUES (1, NULL, NULL, 14, 95.0)
|
||||
-- ON CONFLICT (id) DO UPDATE SET
|
||||
-- coverage_days = EXCLUDED.coverage_days,
|
||||
-- service_level = EXCLUDED.service_level;
|
||||
|
||||
-- INSERT INTO turnover_config (id, category_id, vendor, calculation_period_days, target_rate)
|
||||
-- VALUES (1, NULL, NULL, 30, 1.0)
|
||||
-- ON CONFLICT (id) DO UPDATE SET
|
||||
-- calculation_period_days = EXCLUDED.calculation_period_days,
|
||||
-- target_rate = EXCLUDED.target_rate;
|
||||
|
||||
-- -- Insert default seasonality factors (neutral)
|
||||
-- INSERT INTO sales_seasonality (month, seasonality_factor)
|
||||
-- VALUES
|
||||
-- (1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0),
|
||||
-- (7, 0), (8, 0), (9, 0), (10, 0), (11, 0), (12, 0)
|
||||
-- ON CONFLICT (month) DO UPDATE SET
|
||||
-- last_updated = CURRENT_TIMESTAMP;
|
||||
|
||||
-- -- Insert default values
|
||||
-- INSERT INTO financial_calc_config (id, order_cost, holding_rate, service_level_z_score, min_reorder_qty, default_reorder_qty, default_safety_stock)
|
||||
-- VALUES (1, 25.00, 0.25, 1.96, 1, 5, 5)
|
||||
-- ON CONFLICT (id) DO UPDATE SET
|
||||
-- order_cost = EXCLUDED.order_cost,
|
||||
-- holding_rate = EXCLUDED.holding_rate,
|
||||
-- service_level_z_score = EXCLUDED.service_level_z_score,
|
||||
-- min_reorder_qty = EXCLUDED.min_reorder_qty,
|
||||
-- default_reorder_qty = EXCLUDED.default_reorder_qty,
|
||||
-- default_safety_stock = EXCLUDED.default_safety_stock;
|
||||
|
||||
-- -- View to show thresholds with category names
|
||||
-- CREATE OR REPLACE VIEW stock_thresholds_view AS
|
||||
-- SELECT
|
||||
-- st.*,
|
||||
-- c.name as category_name,
|
||||
-- CASE
|
||||
-- WHEN st.category_id IS NULL AND st.vendor IS NULL THEN 'Global Default'
|
||||
-- WHEN st.category_id IS NULL THEN 'Vendor: ' || st.vendor
|
||||
-- WHEN st.vendor IS NULL THEN 'Category: ' || c.name
|
||||
-- ELSE 'Category: ' || c.name || ' / Vendor: ' || st.vendor
|
||||
-- END as threshold_scope
|
||||
-- FROM
|
||||
-- stock_thresholds st
|
||||
-- LEFT JOIN
|
||||
-- categories c ON st.category_id = c.cat_id
|
||||
-- ORDER BY
|
||||
-- CASE
|
||||
-- WHEN st.category_id IS NULL AND st.vendor IS NULL THEN 1
|
||||
-- WHEN st.category_id IS NULL THEN 2
|
||||
-- WHEN st.vendor IS NULL THEN 3
|
||||
-- ELSE 4
|
||||
-- END,
|
||||
-- c.name,
|
||||
-- st.vendor;
|
||||
961
inventory-server/old/historical-data.js
Normal file
961
inventory-server/old/historical-data.js
Normal file
@@ -0,0 +1,961 @@
|
||||
const { outputProgress, formatElapsedTime, estimateRemaining, calculateRate } = require('../scripts/metrics-new/utils/progress');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { pipeline } = require('stream');
|
||||
const { promisify } = require('util');
|
||||
|
||||
// Configuration constants to control which tables get imported
|
||||
const IMPORT_PRODUCT_CURRENT_PRICES = false;
|
||||
const IMPORT_DAILY_INVENTORY = false;
|
||||
const IMPORT_PRODUCT_STAT_HISTORY = true;
|
||||
|
||||
// For product stat history, limit to more recent data for faster initial import
|
||||
const USE_RECENT_MONTHS = 12; // Just use the most recent months for product_stat_history
|
||||
|
||||
/**
|
||||
* Validates a date from MySQL before inserting it into PostgreSQL
|
||||
* @param {string|Date|null} mysqlDate - Date string or object from MySQL
|
||||
* @returns {string|null} Valid date string or null if invalid
|
||||
*/
|
||||
function validateDate(mysqlDate) {
|
||||
// Handle null, undefined, or empty values
|
||||
if (!mysqlDate) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Convert to string if it's not already
|
||||
const dateStr = String(mysqlDate);
|
||||
|
||||
// Handle MySQL zero dates and empty values
|
||||
if (dateStr === '0000-00-00' ||
|
||||
dateStr === '0000-00-00 00:00:00' ||
|
||||
dateStr.indexOf('0000-00-00') !== -1 ||
|
||||
dateStr === '') {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Check if the date is valid
|
||||
const date = new Date(mysqlDate);
|
||||
|
||||
// If the date is invalid or suspiciously old (pre-1970), return null
|
||||
if (isNaN(date.getTime()) || date.getFullYear() < 1970) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return mysqlDate;
|
||||
}
|
||||
|
||||
/**
|
||||
* Imports historical data from MySQL to PostgreSQL
|
||||
*/
|
||||
async function importHistoricalData(
|
||||
prodConnection,
|
||||
localConnection,
|
||||
options = {}
|
||||
) {
|
||||
const {
|
||||
incrementalUpdate = true,
|
||||
oneYearAgo = new Date(new Date().setFullYear(new Date().getFullYear() - 1))
|
||||
} = options;
|
||||
|
||||
const oneYearAgoStr = oneYearAgo.toISOString().split('T')[0];
|
||||
const startTime = Date.now();
|
||||
|
||||
// Use larger batch sizes to improve performance
|
||||
const BATCH_SIZE = 5000; // For fetching from small tables
|
||||
const INSERT_BATCH_SIZE = 500; // For inserting to small tables
|
||||
const LARGE_BATCH_SIZE = 10000; // For fetching from large tables
|
||||
const LARGE_INSERT_BATCH_SIZE = 1000; // For inserting to large tables
|
||||
|
||||
// Calculate date for recent data
|
||||
const recentDateStr = new Date(
|
||||
new Date().setMonth(new Date().getMonth() - USE_RECENT_MONTHS)
|
||||
).toISOString().split('T')[0];
|
||||
|
||||
console.log(`Starting import with:
|
||||
- One year ago date: ${oneYearAgoStr}
|
||||
- Recent months date: ${recentDateStr} (for product_stat_history)
|
||||
- Incremental update: ${incrementalUpdate}
|
||||
- Standard batch size: ${BATCH_SIZE}
|
||||
- Standard insert batch size: ${INSERT_BATCH_SIZE}
|
||||
- Large table batch size: ${LARGE_BATCH_SIZE}
|
||||
- Large table insert batch size: ${LARGE_INSERT_BATCH_SIZE}
|
||||
- Import product_current_prices: ${IMPORT_PRODUCT_CURRENT_PRICES}
|
||||
- Import daily_inventory: ${IMPORT_DAILY_INVENTORY}
|
||||
- Import product_stat_history: ${IMPORT_PRODUCT_STAT_HISTORY}`);
|
||||
|
||||
try {
|
||||
// Get last sync time for incremental updates
|
||||
const lastSyncTimes = {};
|
||||
|
||||
if (incrementalUpdate) {
|
||||
try {
|
||||
const syncResult = await localConnection.query(`
|
||||
SELECT table_name, last_sync_timestamp
|
||||
FROM sync_status
|
||||
WHERE table_name IN (
|
||||
'imported_product_current_prices',
|
||||
'imported_daily_inventory',
|
||||
'imported_product_stat_history'
|
||||
)
|
||||
`);
|
||||
|
||||
// Add check for rows existence and type
|
||||
if (syncResult && Array.isArray(syncResult.rows)) {
|
||||
for (const row of syncResult.rows) {
|
||||
lastSyncTimes[row.table_name] = row.last_sync_timestamp;
|
||||
console.log(`Last sync time for ${row.table_name}: ${row.last_sync_timestamp}`);
|
||||
}
|
||||
} else {
|
||||
console.warn('Sync status query did not return expected rows. Proceeding without last sync times.');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error fetching sync status:', error);
|
||||
}
|
||||
}
|
||||
|
||||
// Determine how many tables will be imported
|
||||
const tablesCount = [
|
||||
IMPORT_PRODUCT_CURRENT_PRICES,
|
||||
IMPORT_DAILY_INVENTORY,
|
||||
IMPORT_PRODUCT_STAT_HISTORY
|
||||
].filter(Boolean).length;
|
||||
|
||||
// Run all imports sequentially for better reliability
|
||||
console.log(`Starting sequential imports for ${tablesCount} tables...`);
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Historical data import",
|
||||
message: `Starting sequential imports for ${tablesCount} tables...`,
|
||||
current: 0,
|
||||
total: tablesCount,
|
||||
elapsed: formatElapsedTime(startTime)
|
||||
});
|
||||
|
||||
let progressCount = 0;
|
||||
let productCurrentPricesResult = { recordsAdded: 0, recordsUpdated: 0, totalProcessed: 0, errors: [] };
|
||||
let dailyInventoryResult = { recordsAdded: 0, recordsUpdated: 0, totalProcessed: 0, errors: [] };
|
||||
let productStatHistoryResult = { recordsAdded: 0, recordsUpdated: 0, totalProcessed: 0, errors: [] };
|
||||
|
||||
// Import product current prices
|
||||
if (IMPORT_PRODUCT_CURRENT_PRICES) {
|
||||
console.log('Importing product current prices...');
|
||||
productCurrentPricesResult = await importProductCurrentPrices(
|
||||
prodConnection,
|
||||
localConnection,
|
||||
oneYearAgoStr,
|
||||
lastSyncTimes['imported_product_current_prices'],
|
||||
BATCH_SIZE,
|
||||
INSERT_BATCH_SIZE,
|
||||
incrementalUpdate,
|
||||
startTime
|
||||
);
|
||||
progressCount++;
|
||||
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Historical data import",
|
||||
message: `Completed import ${progressCount} of ${tablesCount}`,
|
||||
current: progressCount,
|
||||
total: tablesCount,
|
||||
elapsed: formatElapsedTime(startTime)
|
||||
});
|
||||
}
|
||||
|
||||
// Import daily inventory
|
||||
if (IMPORT_DAILY_INVENTORY) {
|
||||
console.log('Importing daily inventory...');
|
||||
dailyInventoryResult = await importDailyInventory(
|
||||
prodConnection,
|
||||
localConnection,
|
||||
oneYearAgoStr,
|
||||
lastSyncTimes['imported_daily_inventory'],
|
||||
BATCH_SIZE,
|
||||
INSERT_BATCH_SIZE,
|
||||
incrementalUpdate,
|
||||
startTime
|
||||
);
|
||||
progressCount++;
|
||||
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Historical data import",
|
||||
message: `Completed import ${progressCount} of ${tablesCount}`,
|
||||
current: progressCount,
|
||||
total: tablesCount,
|
||||
elapsed: formatElapsedTime(startTime)
|
||||
});
|
||||
}
|
||||
|
||||
// Import product stat history - using optimized approach
|
||||
if (IMPORT_PRODUCT_STAT_HISTORY) {
|
||||
console.log('Importing product stat history...');
|
||||
productStatHistoryResult = await importProductStatHistory(
|
||||
prodConnection,
|
||||
localConnection,
|
||||
recentDateStr, // Use more recent date for this massive table
|
||||
lastSyncTimes['imported_product_stat_history'],
|
||||
LARGE_BATCH_SIZE,
|
||||
LARGE_INSERT_BATCH_SIZE,
|
||||
incrementalUpdate,
|
||||
startTime,
|
||||
USE_RECENT_MONTHS // Pass the recent months constant
|
||||
);
|
||||
progressCount++;
|
||||
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Historical data import",
|
||||
message: `Completed import ${progressCount} of ${tablesCount}`,
|
||||
current: progressCount,
|
||||
total: tablesCount,
|
||||
elapsed: formatElapsedTime(startTime)
|
||||
});
|
||||
}
|
||||
|
||||
// Aggregate results
|
||||
const totalRecordsAdded =
|
||||
productCurrentPricesResult.recordsAdded +
|
||||
dailyInventoryResult.recordsAdded +
|
||||
productStatHistoryResult.recordsAdded;
|
||||
|
||||
const totalRecordsUpdated =
|
||||
productCurrentPricesResult.recordsUpdated +
|
||||
dailyInventoryResult.recordsUpdated +
|
||||
productStatHistoryResult.recordsUpdated;
|
||||
|
||||
const totalProcessed =
|
||||
productCurrentPricesResult.totalProcessed +
|
||||
dailyInventoryResult.totalProcessed +
|
||||
productStatHistoryResult.totalProcessed;
|
||||
|
||||
const allErrors = [
|
||||
...productCurrentPricesResult.errors,
|
||||
...dailyInventoryResult.errors,
|
||||
...productStatHistoryResult.errors
|
||||
];
|
||||
|
||||
// Log import summary
|
||||
console.log(`
|
||||
Historical data import complete:
|
||||
-------------------------------
|
||||
Records added: ${totalRecordsAdded}
|
||||
Records updated: ${totalRecordsUpdated}
|
||||
Total processed: ${totalProcessed}
|
||||
Errors: ${allErrors.length}
|
||||
Time taken: ${formatElapsedTime(startTime)}
|
||||
`);
|
||||
|
||||
// Final progress update
|
||||
outputProgress({
|
||||
status: "complete",
|
||||
operation: "Historical data import",
|
||||
message: `Import complete. Added: ${totalRecordsAdded}, Updated: ${totalRecordsUpdated}, Errors: ${allErrors.length}`,
|
||||
current: tablesCount,
|
||||
total: tablesCount,
|
||||
elapsed: formatElapsedTime(startTime)
|
||||
});
|
||||
|
||||
// Log any errors
|
||||
if (allErrors.length > 0) {
|
||||
console.log('Errors encountered during import:');
|
||||
console.log(JSON.stringify(allErrors, null, 2));
|
||||
}
|
||||
|
||||
// Calculate duration
|
||||
const endTime = Date.now();
|
||||
const durationSeconds = Math.round((endTime - startTime) / 1000);
|
||||
const finalStatus = allErrors.length === 0 ? 'complete' : 'failed';
|
||||
const errorMessage = allErrors.length > 0 ? JSON.stringify(allErrors) : null;
|
||||
|
||||
// Update import history
|
||||
await localConnection.query(`
|
||||
INSERT INTO import_history (
|
||||
table_name,
|
||||
end_time,
|
||||
duration_seconds,
|
||||
records_added,
|
||||
records_updated,
|
||||
is_incremental,
|
||||
status,
|
||||
error_message,
|
||||
additional_info
|
||||
)
|
||||
VALUES ($1, NOW(), $2, $3, $4, $5, $6, $7, $8)
|
||||
`, [
|
||||
'historical_data_combined',
|
||||
durationSeconds,
|
||||
totalRecordsAdded,
|
||||
totalRecordsUpdated,
|
||||
incrementalUpdate,
|
||||
finalStatus,
|
||||
errorMessage,
|
||||
JSON.stringify({
|
||||
totalProcessed,
|
||||
tablesImported: {
|
||||
imported_product_current_prices: IMPORT_PRODUCT_CURRENT_PRICES,
|
||||
imported_daily_inventory: IMPORT_DAILY_INVENTORY,
|
||||
imported_product_stat_history: IMPORT_PRODUCT_STAT_HISTORY
|
||||
}
|
||||
})
|
||||
]);
|
||||
|
||||
// Return summary
|
||||
return {
|
||||
recordsAdded: totalRecordsAdded,
|
||||
recordsUpdated: totalRecordsUpdated,
|
||||
totalProcessed,
|
||||
errors: allErrors,
|
||||
timeTaken: formatElapsedTime(startTime)
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('Error importing historical data:', error);
|
||||
|
||||
// Final progress update on error
|
||||
outputProgress({
|
||||
status: "failed",
|
||||
operation: "Historical data import",
|
||||
message: `Import failed: ${error.message}`,
|
||||
elapsed: formatElapsedTime(startTime)
|
||||
});
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Imports product_current_prices data from MySQL to PostgreSQL
|
||||
*/
|
||||
async function importProductCurrentPrices(
|
||||
prodConnection,
|
||||
localConnection,
|
||||
oneYearAgoStr,
|
||||
lastSyncTime,
|
||||
batchSize,
|
||||
insertBatchSize,
|
||||
incrementalUpdate,
|
||||
startTime
|
||||
) {
|
||||
let recordsAdded = 0;
|
||||
let recordsUpdated = 0;
|
||||
let totalProcessed = 0;
|
||||
let errors = [];
|
||||
let offset = 0;
|
||||
let allProcessed = false;
|
||||
|
||||
try {
|
||||
// Get total count for progress reporting
|
||||
const [countResult] = await prodConnection.query(`
|
||||
SELECT COUNT(*) as total
|
||||
FROM product_current_prices
|
||||
WHERE (date_active >= ? OR date_deactive >= ?)
|
||||
${incrementalUpdate && lastSyncTime ? `AND date_deactive > ?` : ''}
|
||||
`, [oneYearAgoStr, oneYearAgoStr, ...(incrementalUpdate && lastSyncTime ? [lastSyncTime] : [])]);
|
||||
|
||||
const totalCount = countResult[0].total;
|
||||
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Historical data import - Product Current Prices",
|
||||
message: `Found ${totalCount} records to process`,
|
||||
current: 0,
|
||||
total: totalCount,
|
||||
elapsed: formatElapsedTime(startTime)
|
||||
});
|
||||
|
||||
// Process in batches for better performance
|
||||
while (!allProcessed) {
|
||||
try {
|
||||
// Fetch batch from production
|
||||
const [rows] = await prodConnection.query(`
|
||||
SELECT
|
||||
price_id,
|
||||
pid,
|
||||
qty_buy,
|
||||
is_min_qty_buy,
|
||||
price_each,
|
||||
qty_limit,
|
||||
no_promo,
|
||||
checkout_offer,
|
||||
active,
|
||||
date_active,
|
||||
date_deactive
|
||||
FROM product_current_prices
|
||||
WHERE (date_active >= ? OR date_deactive >= ?)
|
||||
${incrementalUpdate && lastSyncTime ? `AND date_deactive > ?` : ''}
|
||||
ORDER BY price_id
|
||||
LIMIT ? OFFSET ?
|
||||
`, [
|
||||
oneYearAgoStr,
|
||||
oneYearAgoStr,
|
||||
...(incrementalUpdate && lastSyncTime ? [lastSyncTime] : []),
|
||||
batchSize,
|
||||
offset
|
||||
]);
|
||||
|
||||
if (rows.length === 0) {
|
||||
allProcessed = true;
|
||||
break;
|
||||
}
|
||||
|
||||
// Process rows in smaller batches for better performance
|
||||
for (let i = 0; i < rows.length; i += insertBatchSize) {
|
||||
const batch = rows.slice(i, i + insertBatchSize);
|
||||
|
||||
if (batch.length === 0) continue;
|
||||
|
||||
try {
|
||||
// Build parameterized query to handle NULL values properly
|
||||
const values = [];
|
||||
const placeholders = [];
|
||||
let placeholderIndex = 1;
|
||||
|
||||
for (const row of batch) {
|
||||
const rowPlaceholders = [
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`
|
||||
];
|
||||
|
||||
placeholders.push(`(${rowPlaceholders.join(', ')})`);
|
||||
|
||||
values.push(
|
||||
row.price_id,
|
||||
row.pid,
|
||||
row.qty_buy,
|
||||
row.is_min_qty_buy ? true : false,
|
||||
row.price_each,
|
||||
row.qty_limit, // PostgreSQL will handle null values properly
|
||||
row.no_promo ? true : false,
|
||||
row.checkout_offer ? true : false,
|
||||
row.active ? true : false,
|
||||
validateDate(row.date_active),
|
||||
validateDate(row.date_deactive)
|
||||
);
|
||||
}
|
||||
|
||||
// Execute batch insert
|
||||
const result = await localConnection.query(`
|
||||
WITH ins AS (
|
||||
INSERT INTO imported_product_current_prices (
|
||||
price_id, pid, qty_buy, is_min_qty_buy, price_each, qty_limit,
|
||||
no_promo, checkout_offer, active, date_active, date_deactive
|
||||
)
|
||||
VALUES ${placeholders.join(',\n')}
|
||||
ON CONFLICT (price_id) DO UPDATE SET
|
||||
pid = EXCLUDED.pid,
|
||||
qty_buy = EXCLUDED.qty_buy,
|
||||
is_min_qty_buy = EXCLUDED.is_min_qty_buy,
|
||||
price_each = EXCLUDED.price_each,
|
||||
qty_limit = EXCLUDED.qty_limit,
|
||||
no_promo = EXCLUDED.no_promo,
|
||||
checkout_offer = EXCLUDED.checkout_offer,
|
||||
active = EXCLUDED.active,
|
||||
date_active = EXCLUDED.date_active,
|
||||
date_deactive = EXCLUDED.date_deactive,
|
||||
updated = CURRENT_TIMESTAMP
|
||||
RETURNING (xmax = 0) AS inserted
|
||||
)
|
||||
SELECT
|
||||
COUNT(*) FILTER (WHERE inserted) AS inserted_count,
|
||||
COUNT(*) FILTER (WHERE NOT inserted) AS updated_count
|
||||
FROM ins
|
||||
`, values);
|
||||
|
||||
// Safely update counts based on the result
|
||||
if (result && result.rows && result.rows.length > 0) {
|
||||
const insertedCount = parseInt(result.rows[0].inserted_count || 0);
|
||||
const updatedCount = parseInt(result.rows[0].updated_count || 0);
|
||||
|
||||
recordsAdded += insertedCount;
|
||||
recordsUpdated += updatedCount;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Error in batch import of product_current_prices at offset ${i}:`, error);
|
||||
errors.push({
|
||||
table: 'imported_product_current_prices',
|
||||
batchOffset: i,
|
||||
batchSize: batch.length,
|
||||
error: error.message
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
totalProcessed += rows.length;
|
||||
offset += rows.length;
|
||||
|
||||
// Update progress
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Historical data import - Product Current Prices",
|
||||
message: `Processed ${totalProcessed} of ${totalCount} records`,
|
||||
current: totalProcessed,
|
||||
total: totalCount,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, totalProcessed, totalCount),
|
||||
rate: calculateRate(startTime, totalProcessed)
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Error in batch import of product_current_prices:', error);
|
||||
errors.push({
|
||||
table: 'imported_product_current_prices',
|
||||
error: error.message,
|
||||
offset: offset,
|
||||
batchSize: batchSize
|
||||
});
|
||||
|
||||
// Try to continue with next batch
|
||||
offset += batchSize;
|
||||
}
|
||||
}
|
||||
|
||||
// Update sync status
|
||||
await localConnection.query(`
|
||||
INSERT INTO sync_status (table_name, last_sync_timestamp)
|
||||
VALUES ('imported_product_current_prices', NOW())
|
||||
ON CONFLICT (table_name) DO UPDATE SET
|
||||
last_sync_timestamp = NOW()
|
||||
`);
|
||||
|
||||
return { recordsAdded, recordsUpdated, totalProcessed, errors };
|
||||
} catch (error) {
|
||||
console.error('Error in product current prices import:', error);
|
||||
return {
|
||||
recordsAdded,
|
||||
recordsUpdated,
|
||||
totalProcessed,
|
||||
errors: [...errors, {
|
||||
table: 'imported_product_current_prices',
|
||||
error: error.message
|
||||
}]
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Imports daily_inventory data from MySQL to PostgreSQL
|
||||
*/
|
||||
async function importDailyInventory(
|
||||
prodConnection,
|
||||
localConnection,
|
||||
oneYearAgoStr,
|
||||
lastSyncTime,
|
||||
batchSize,
|
||||
insertBatchSize,
|
||||
incrementalUpdate,
|
||||
startTime
|
||||
) {
|
||||
let recordsAdded = 0;
|
||||
let recordsUpdated = 0;
|
||||
let totalProcessed = 0;
|
||||
let errors = [];
|
||||
let offset = 0;
|
||||
let allProcessed = false;
|
||||
|
||||
try {
|
||||
// Get total count for progress reporting
|
||||
const [countResult] = await prodConnection.query(`
|
||||
SELECT COUNT(*) as total
|
||||
FROM daily_inventory
|
||||
WHERE date >= ?
|
||||
${incrementalUpdate && lastSyncTime ? `AND stamp > ?` : ''}
|
||||
`, [oneYearAgoStr, ...(incrementalUpdate && lastSyncTime ? [lastSyncTime] : [])]);
|
||||
|
||||
const totalCount = countResult[0].total;
|
||||
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Historical data import - Daily Inventory",
|
||||
message: `Found ${totalCount} records to process`,
|
||||
current: 0,
|
||||
total: totalCount,
|
||||
elapsed: formatElapsedTime(startTime)
|
||||
});
|
||||
|
||||
// Process in batches for better performance
|
||||
while (!allProcessed) {
|
||||
try {
|
||||
// Fetch batch from production
|
||||
const [rows] = await prodConnection.query(`
|
||||
SELECT
|
||||
date,
|
||||
pid,
|
||||
amountsold,
|
||||
times_sold,
|
||||
qtyreceived,
|
||||
price,
|
||||
costeach,
|
||||
stamp
|
||||
FROM daily_inventory
|
||||
WHERE date >= ?
|
||||
${incrementalUpdate && lastSyncTime ? `AND stamp > ?` : ''}
|
||||
ORDER BY date, pid
|
||||
LIMIT ? OFFSET ?
|
||||
`, [
|
||||
oneYearAgoStr,
|
||||
...(incrementalUpdate && lastSyncTime ? [lastSyncTime] : []),
|
||||
batchSize,
|
||||
offset
|
||||
]);
|
||||
|
||||
if (rows.length === 0) {
|
||||
allProcessed = true;
|
||||
break;
|
||||
}
|
||||
|
||||
// Process rows in smaller batches for better performance
|
||||
for (let i = 0; i < rows.length; i += insertBatchSize) {
|
||||
const batch = rows.slice(i, i + insertBatchSize);
|
||||
|
||||
if (batch.length === 0) continue;
|
||||
|
||||
try {
|
||||
// Build parameterized query to handle NULL values properly
|
||||
const values = [];
|
||||
const placeholders = [];
|
||||
let placeholderIndex = 1;
|
||||
|
||||
for (const row of batch) {
|
||||
const rowPlaceholders = [
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`
|
||||
];
|
||||
|
||||
placeholders.push(`(${rowPlaceholders.join(', ')})`);
|
||||
|
||||
values.push(
|
||||
validateDate(row.date),
|
||||
row.pid,
|
||||
row.amountsold || 0,
|
||||
row.times_sold || 0,
|
||||
row.qtyreceived || 0,
|
||||
row.price || 0,
|
||||
row.costeach || 0,
|
||||
validateDate(row.stamp)
|
||||
);
|
||||
}
|
||||
|
||||
// Execute batch insert
|
||||
const result = await localConnection.query(`
|
||||
WITH ins AS (
|
||||
INSERT INTO imported_daily_inventory (
|
||||
date, pid, amountsold, times_sold, qtyreceived, price, costeach, stamp
|
||||
)
|
||||
VALUES ${placeholders.join(',\n')}
|
||||
ON CONFLICT (date, pid) DO UPDATE SET
|
||||
amountsold = EXCLUDED.amountsold,
|
||||
times_sold = EXCLUDED.times_sold,
|
||||
qtyreceived = EXCLUDED.qtyreceived,
|
||||
price = EXCLUDED.price,
|
||||
costeach = EXCLUDED.costeach,
|
||||
stamp = EXCLUDED.stamp,
|
||||
updated = CURRENT_TIMESTAMP
|
||||
RETURNING (xmax = 0) AS inserted
|
||||
)
|
||||
SELECT
|
||||
COUNT(*) FILTER (WHERE inserted) AS inserted_count,
|
||||
COUNT(*) FILTER (WHERE NOT inserted) AS updated_count
|
||||
FROM ins
|
||||
`, values);
|
||||
|
||||
// Safely update counts based on the result
|
||||
if (result && result.rows && result.rows.length > 0) {
|
||||
const insertedCount = parseInt(result.rows[0].inserted_count || 0);
|
||||
const updatedCount = parseInt(result.rows[0].updated_count || 0);
|
||||
|
||||
recordsAdded += insertedCount;
|
||||
recordsUpdated += updatedCount;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Error in batch import of daily_inventory at offset ${i}:`, error);
|
||||
errors.push({
|
||||
table: 'imported_daily_inventory',
|
||||
batchOffset: i,
|
||||
batchSize: batch.length,
|
||||
error: error.message
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
totalProcessed += rows.length;
|
||||
offset += rows.length;
|
||||
|
||||
// Update progress
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Historical data import - Daily Inventory",
|
||||
message: `Processed ${totalProcessed} of ${totalCount} records`,
|
||||
current: totalProcessed,
|
||||
total: totalCount,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, totalProcessed, totalCount),
|
||||
rate: calculateRate(startTime, totalProcessed)
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Error in batch import of daily_inventory:', error);
|
||||
errors.push({
|
||||
table: 'imported_daily_inventory',
|
||||
error: error.message,
|
||||
offset: offset,
|
||||
batchSize: batchSize
|
||||
});
|
||||
|
||||
// Try to continue with next batch
|
||||
offset += batchSize;
|
||||
}
|
||||
}
|
||||
|
||||
// Update sync status
|
||||
await localConnection.query(`
|
||||
INSERT INTO sync_status (table_name, last_sync_timestamp)
|
||||
VALUES ('imported_daily_inventory', NOW())
|
||||
ON CONFLICT (table_name) DO UPDATE SET
|
||||
last_sync_timestamp = NOW()
|
||||
`);
|
||||
|
||||
return { recordsAdded, recordsUpdated, totalProcessed, errors };
|
||||
} catch (error) {
|
||||
console.error('Error in daily inventory import:', error);
|
||||
return {
|
||||
recordsAdded,
|
||||
recordsUpdated,
|
||||
totalProcessed,
|
||||
errors: [...errors, {
|
||||
table: 'imported_daily_inventory',
|
||||
error: error.message
|
||||
}]
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Imports product_stat_history data from MySQL to PostgreSQL
|
||||
* Using fast direct inserts without conflict checking
|
||||
*/
|
||||
async function importProductStatHistory(
|
||||
prodConnection,
|
||||
localConnection,
|
||||
recentDateStr, // Use more recent date instead of one year ago
|
||||
lastSyncTime,
|
||||
batchSize,
|
||||
insertBatchSize,
|
||||
incrementalUpdate,
|
||||
startTime,
|
||||
recentMonths // Add parameter for recent months
|
||||
) {
|
||||
let recordsAdded = 0;
|
||||
let recordsUpdated = 0;
|
||||
let totalProcessed = 0;
|
||||
let errors = [];
|
||||
let offset = 0;
|
||||
let allProcessed = false;
|
||||
let lastRateCheck = Date.now();
|
||||
let lastProcessed = 0;
|
||||
|
||||
try {
|
||||
// Get total count for progress reporting
|
||||
const [countResult] = await prodConnection.query(`
|
||||
SELECT COUNT(*) as total
|
||||
FROM product_stat_history
|
||||
WHERE date >= ?
|
||||
${incrementalUpdate && lastSyncTime ? `AND date > ?` : ''}
|
||||
`, [recentDateStr, ...(incrementalUpdate && lastSyncTime ? [lastSyncTime] : [])]);
|
||||
|
||||
const totalCount = countResult[0].total;
|
||||
console.log(`Found ${totalCount} records to process in product_stat_history (using recent date: ${recentDateStr})`);
|
||||
|
||||
// Progress indicator
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Historical data import - Product Stat History",
|
||||
message: `Found ${totalCount} records to process (last ${recentMonths} months only)`,
|
||||
current: 0,
|
||||
total: totalCount,
|
||||
elapsed: formatElapsedTime(startTime)
|
||||
});
|
||||
|
||||
// If not incremental, truncate the table first for better performance
|
||||
if (!incrementalUpdate) {
|
||||
console.log('Truncating imported_product_stat_history for full import...');
|
||||
await localConnection.query('TRUNCATE TABLE imported_product_stat_history');
|
||||
} else if (lastSyncTime) {
|
||||
// For incremental updates, delete records that will be reimported
|
||||
console.log(`Deleting records from imported_product_stat_history since ${lastSyncTime}...`);
|
||||
await localConnection.query('DELETE FROM imported_product_stat_history WHERE date > $1', [lastSyncTime]);
|
||||
}
|
||||
|
||||
// Process in batches for better performance
|
||||
while (!allProcessed) {
|
||||
try {
|
||||
// Fetch batch from production with minimal filtering and no sorting
|
||||
const [rows] = await prodConnection.query(`
|
||||
SELECT
|
||||
pid,
|
||||
date,
|
||||
COALESCE(score, 0) as score,
|
||||
COALESCE(score2, 0) as score2,
|
||||
COALESCE(qty_in_baskets, 0) as qty_in_baskets,
|
||||
COALESCE(qty_sold, 0) as qty_sold,
|
||||
COALESCE(notifies_set, 0) as notifies_set,
|
||||
COALESCE(visibility_score, 0) as visibility_score,
|
||||
COALESCE(health_score, 0) as health_score,
|
||||
COALESCE(sold_view_score, 0) as sold_view_score
|
||||
FROM product_stat_history
|
||||
WHERE date >= ?
|
||||
${incrementalUpdate && lastSyncTime ? `AND date > ?` : ''}
|
||||
LIMIT ? OFFSET ?
|
||||
`, [
|
||||
recentDateStr,
|
||||
...(incrementalUpdate && lastSyncTime ? [lastSyncTime] : []),
|
||||
batchSize,
|
||||
offset
|
||||
]);
|
||||
|
||||
if (rows.length === 0) {
|
||||
allProcessed = true;
|
||||
break;
|
||||
}
|
||||
|
||||
// Process rows in smaller batches for better performance
|
||||
for (let i = 0; i < rows.length; i += insertBatchSize) {
|
||||
const batch = rows.slice(i, i + insertBatchSize);
|
||||
|
||||
if (batch.length === 0) continue;
|
||||
|
||||
try {
|
||||
// Build parameterized query to handle NULL values properly
|
||||
const values = [];
|
||||
const placeholders = [];
|
||||
let placeholderIndex = 1;
|
||||
|
||||
for (const row of batch) {
|
||||
const rowPlaceholders = [
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`,
|
||||
`$${placeholderIndex++}`
|
||||
];
|
||||
|
||||
placeholders.push(`(${rowPlaceholders.join(', ')})`);
|
||||
|
||||
values.push(
|
||||
row.pid,
|
||||
validateDate(row.date),
|
||||
row.score,
|
||||
row.score2,
|
||||
row.qty_in_baskets,
|
||||
row.qty_sold,
|
||||
row.notifies_set,
|
||||
row.visibility_score,
|
||||
row.health_score,
|
||||
row.sold_view_score
|
||||
);
|
||||
}
|
||||
|
||||
// Execute direct batch insert without conflict checking
|
||||
await localConnection.query(`
|
||||
INSERT INTO imported_product_stat_history (
|
||||
pid, date, score, score2, qty_in_baskets, qty_sold, notifies_set,
|
||||
visibility_score, health_score, sold_view_score
|
||||
)
|
||||
VALUES ${placeholders.join(',\n')}
|
||||
`, values);
|
||||
|
||||
// All inserts are new records when using this approach
|
||||
recordsAdded += batch.length;
|
||||
} catch (error) {
|
||||
console.error(`Error in batch insert of product_stat_history at offset ${i}:`, error);
|
||||
errors.push({
|
||||
table: 'imported_product_stat_history',
|
||||
batchOffset: i,
|
||||
batchSize: batch.length,
|
||||
error: error.message
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
totalProcessed += rows.length;
|
||||
offset += rows.length;
|
||||
|
||||
// Calculate current rate every 10 seconds or 100,000 records
|
||||
const now = Date.now();
|
||||
if (now - lastRateCheck > 10000 || totalProcessed - lastProcessed > 100000) {
|
||||
const timeElapsed = (now - lastRateCheck) / 1000; // seconds
|
||||
const recordsProcessed = totalProcessed - lastProcessed;
|
||||
const currentRate = Math.round(recordsProcessed / timeElapsed);
|
||||
|
||||
console.log(`Current import rate: ${currentRate} records/second`);
|
||||
|
||||
lastRateCheck = now;
|
||||
lastProcessed = totalProcessed;
|
||||
}
|
||||
|
||||
// Update progress
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Historical data import - Product Stat History",
|
||||
message: `Processed ${totalProcessed} of ${totalCount} records`,
|
||||
current: totalProcessed,
|
||||
total: totalCount,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, totalProcessed, totalCount),
|
||||
rate: calculateRate(startTime, totalProcessed)
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Error in batch import of product_stat_history:', error);
|
||||
errors.push({
|
||||
table: 'imported_product_stat_history',
|
||||
error: error.message,
|
||||
offset: offset,
|
||||
batchSize: batchSize
|
||||
});
|
||||
|
||||
// Try to continue with next batch
|
||||
offset += batchSize;
|
||||
}
|
||||
}
|
||||
|
||||
// Update sync status
|
||||
await localConnection.query(`
|
||||
INSERT INTO sync_status (table_name, last_sync_timestamp)
|
||||
VALUES ('imported_product_stat_history', NOW())
|
||||
ON CONFLICT (table_name) DO UPDATE SET
|
||||
last_sync_timestamp = NOW()
|
||||
`);
|
||||
|
||||
return { recordsAdded, recordsUpdated, totalProcessed, errors };
|
||||
} catch (error) {
|
||||
console.error('Error in product stat history import:', error);
|
||||
return {
|
||||
recordsAdded,
|
||||
recordsUpdated,
|
||||
totalProcessed,
|
||||
errors: [...errors, {
|
||||
table: 'imported_product_stat_history',
|
||||
error: error.message
|
||||
}]
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = importHistoricalData;
|
||||
377
inventory-server/old/metrics-schema.sql
Normal file
377
inventory-server/old/metrics-schema.sql
Normal file
@@ -0,0 +1,377 @@
|
||||
-- Disable foreign key checks
|
||||
SET session_replication_role = 'replica';
|
||||
|
||||
-- Temporary tables for batch metrics processing
|
||||
CREATE TABLE temp_sales_metrics (
|
||||
pid BIGINT NOT NULL,
|
||||
daily_sales_avg DECIMAL(10,3),
|
||||
weekly_sales_avg DECIMAL(10,3),
|
||||
monthly_sales_avg DECIMAL(10,3),
|
||||
total_revenue DECIMAL(10,3),
|
||||
avg_margin_percent DECIMAL(10,3),
|
||||
first_sale_date DATE,
|
||||
last_sale_date DATE,
|
||||
stddev_daily_sales DECIMAL(10,3),
|
||||
PRIMARY KEY (pid)
|
||||
);
|
||||
|
||||
CREATE TABLE temp_purchase_metrics (
|
||||
pid BIGINT NOT NULL,
|
||||
avg_lead_time_days DECIMAL(10,2),
|
||||
last_purchase_date DATE,
|
||||
first_received_date DATE,
|
||||
last_received_date DATE,
|
||||
stddev_lead_time_days DECIMAL(10,2),
|
||||
PRIMARY KEY (pid)
|
||||
);
|
||||
|
||||
-- New table for product metrics
|
||||
CREATE TABLE product_metrics (
|
||||
pid BIGINT NOT NULL,
|
||||
last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
-- Sales velocity metrics
|
||||
daily_sales_avg DECIMAL(10,3),
|
||||
weekly_sales_avg DECIMAL(10,3),
|
||||
monthly_sales_avg DECIMAL(10,3),
|
||||
avg_quantity_per_order DECIMAL(10,3),
|
||||
number_of_orders INTEGER,
|
||||
first_sale_date DATE,
|
||||
last_sale_date DATE,
|
||||
-- Stock metrics
|
||||
days_of_inventory INTEGER,
|
||||
weeks_of_inventory INTEGER,
|
||||
reorder_point INTEGER,
|
||||
safety_stock INTEGER,
|
||||
reorder_qty INTEGER DEFAULT 0,
|
||||
overstocked_amt INTEGER DEFAULT 0,
|
||||
-- Financial metrics
|
||||
avg_margin_percent DECIMAL(10,3),
|
||||
total_revenue DECIMAL(10,3),
|
||||
inventory_value DECIMAL(10,3),
|
||||
cost_of_goods_sold DECIMAL(10,3),
|
||||
gross_profit DECIMAL(10,3),
|
||||
gmroi DECIMAL(10,3),
|
||||
-- Purchase metrics
|
||||
avg_lead_time_days DECIMAL(10,2),
|
||||
last_purchase_date DATE,
|
||||
first_received_date DATE,
|
||||
last_received_date DATE,
|
||||
-- Classification metrics
|
||||
abc_class CHAR(1),
|
||||
stock_status VARCHAR(20),
|
||||
-- Turnover metrics
|
||||
turnover_rate DECIMAL(12,3),
|
||||
-- Lead time metrics
|
||||
current_lead_time INTEGER,
|
||||
target_lead_time INTEGER,
|
||||
lead_time_status VARCHAR(20),
|
||||
-- Forecast metrics
|
||||
forecast_accuracy DECIMAL(5,2) DEFAULT NULL,
|
||||
forecast_bias DECIMAL(5,2) DEFAULT NULL,
|
||||
last_forecast_date DATE DEFAULT NULL,
|
||||
PRIMARY KEY (pid),
|
||||
FOREIGN KEY (pid) REFERENCES products(pid) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE INDEX idx_metrics_revenue ON product_metrics(total_revenue);
|
||||
CREATE INDEX idx_metrics_stock_status ON product_metrics(stock_status);
|
||||
CREATE INDEX idx_metrics_lead_time ON product_metrics(lead_time_status);
|
||||
CREATE INDEX idx_metrics_turnover ON product_metrics(turnover_rate);
|
||||
CREATE INDEX idx_metrics_last_calculated ON product_metrics(last_calculated_at);
|
||||
CREATE INDEX idx_metrics_abc ON product_metrics(abc_class);
|
||||
CREATE INDEX idx_metrics_sales ON product_metrics(daily_sales_avg, weekly_sales_avg, monthly_sales_avg);
|
||||
CREATE INDEX idx_metrics_forecast ON product_metrics(forecast_accuracy, forecast_bias);
|
||||
|
||||
-- New table for time-based aggregates
|
||||
CREATE TABLE product_time_aggregates (
|
||||
pid BIGINT NOT NULL,
|
||||
year INTEGER NOT NULL,
|
||||
month INTEGER NOT NULL,
|
||||
-- Sales metrics
|
||||
total_quantity_sold INTEGER DEFAULT 0,
|
||||
total_revenue DECIMAL(10,3) DEFAULT 0,
|
||||
total_cost DECIMAL(10,3) DEFAULT 0,
|
||||
order_count INTEGER DEFAULT 0,
|
||||
-- Stock changes
|
||||
stock_received INTEGER DEFAULT 0,
|
||||
stock_ordered INTEGER DEFAULT 0,
|
||||
-- Calculated fields
|
||||
avg_price DECIMAL(10,3),
|
||||
profit_margin DECIMAL(10,3),
|
||||
inventory_value DECIMAL(10,3),
|
||||
gmroi DECIMAL(10,3),
|
||||
PRIMARY KEY (pid, year, month),
|
||||
FOREIGN KEY (pid) REFERENCES products(pid) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE INDEX idx_date ON product_time_aggregates(year, month);
|
||||
|
||||
-- Create vendor_details table
|
||||
CREATE TABLE vendor_details (
|
||||
vendor VARCHAR(100) PRIMARY KEY,
|
||||
contact_name VARCHAR(100),
|
||||
email VARCHAR(255),
|
||||
phone VARCHAR(50),
|
||||
status VARCHAR(20) DEFAULT 'active',
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE INDEX idx_vendor_details_status ON vendor_details(status);
|
||||
|
||||
-- New table for vendor metrics
|
||||
CREATE TABLE vendor_metrics (
|
||||
vendor VARCHAR(100) NOT NULL,
|
||||
last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
-- Performance metrics
|
||||
avg_lead_time_days DECIMAL(10,3),
|
||||
on_time_delivery_rate DECIMAL(5,2),
|
||||
order_fill_rate DECIMAL(5,2),
|
||||
total_orders INTEGER DEFAULT 0,
|
||||
total_late_orders INTEGER DEFAULT 0,
|
||||
total_purchase_value DECIMAL(10,3) DEFAULT 0,
|
||||
avg_order_value DECIMAL(10,3),
|
||||
-- Product metrics
|
||||
active_products INTEGER DEFAULT 0,
|
||||
total_products INTEGER DEFAULT 0,
|
||||
-- Financial metrics
|
||||
total_revenue DECIMAL(10,3) DEFAULT 0,
|
||||
avg_margin_percent DECIMAL(5,2),
|
||||
-- Status
|
||||
status VARCHAR(20) DEFAULT 'active',
|
||||
PRIMARY KEY (vendor),
|
||||
FOREIGN KEY (vendor) REFERENCES vendor_details(vendor) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE INDEX idx_vendor_performance ON vendor_metrics(on_time_delivery_rate);
|
||||
CREATE INDEX idx_vendor_status ON vendor_metrics(status);
|
||||
CREATE INDEX idx_vendor_metrics_last_calculated ON vendor_metrics(last_calculated_at);
|
||||
CREATE INDEX idx_vendor_metrics_orders ON vendor_metrics(total_orders, total_late_orders);
|
||||
|
||||
-- New table for category metrics
|
||||
CREATE TABLE category_metrics (
|
||||
category_id BIGINT NOT NULL,
|
||||
last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
-- Product metrics
|
||||
product_count INTEGER DEFAULT 0,
|
||||
active_products INTEGER DEFAULT 0,
|
||||
-- Financial metrics
|
||||
total_value DECIMAL(15,3) DEFAULT 0,
|
||||
avg_margin DECIMAL(5,2),
|
||||
turnover_rate DECIMAL(12,3),
|
||||
growth_rate DECIMAL(5,2),
|
||||
-- Status
|
||||
status VARCHAR(20) DEFAULT 'active',
|
||||
PRIMARY KEY (category_id),
|
||||
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE INDEX idx_category_status ON category_metrics(status);
|
||||
CREATE INDEX idx_category_growth ON category_metrics(growth_rate);
|
||||
CREATE INDEX idx_metrics_last_calculated_cat ON category_metrics(last_calculated_at);
|
||||
CREATE INDEX idx_category_metrics_products ON category_metrics(product_count, active_products);
|
||||
|
||||
-- New table for vendor time-based metrics
|
||||
CREATE TABLE vendor_time_metrics (
|
||||
vendor VARCHAR(100) NOT NULL,
|
||||
year INTEGER NOT NULL,
|
||||
month INTEGER NOT NULL,
|
||||
-- Order metrics
|
||||
total_orders INTEGER DEFAULT 0,
|
||||
late_orders INTEGER DEFAULT 0,
|
||||
avg_lead_time_days DECIMAL(10,3),
|
||||
-- Financial metrics
|
||||
total_purchase_value DECIMAL(10,3) DEFAULT 0,
|
||||
total_revenue DECIMAL(10,3) DEFAULT 0,
|
||||
avg_margin_percent DECIMAL(5,2),
|
||||
PRIMARY KEY (vendor, year, month),
|
||||
FOREIGN KEY (vendor) REFERENCES vendor_details(vendor) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE INDEX idx_vendor_date ON vendor_time_metrics(year, month);
|
||||
|
||||
-- New table for category time-based metrics
|
||||
CREATE TABLE category_time_metrics (
|
||||
category_id BIGINT NOT NULL,
|
||||
year INTEGER NOT NULL,
|
||||
month INTEGER NOT NULL,
|
||||
-- Product metrics
|
||||
product_count INTEGER DEFAULT 0,
|
||||
active_products INTEGER DEFAULT 0,
|
||||
-- Financial metrics
|
||||
total_value DECIMAL(15,3) DEFAULT 0,
|
||||
total_revenue DECIMAL(15,3) DEFAULT 0,
|
||||
avg_margin DECIMAL(5,2),
|
||||
turnover_rate DECIMAL(12,3),
|
||||
PRIMARY KEY (category_id, year, month),
|
||||
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE INDEX idx_category_date ON category_time_metrics(year, month);
|
||||
|
||||
-- New table for category-based sales metrics
|
||||
CREATE TABLE category_sales_metrics (
|
||||
category_id BIGINT NOT NULL,
|
||||
brand VARCHAR(100) NOT NULL,
|
||||
period_start DATE NOT NULL,
|
||||
period_end DATE NOT NULL,
|
||||
avg_daily_sales DECIMAL(10,3) DEFAULT 0,
|
||||
total_sold INTEGER DEFAULT 0,
|
||||
num_products INTEGER DEFAULT 0,
|
||||
avg_price DECIMAL(10,3) DEFAULT 0,
|
||||
last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (category_id, brand, period_start, period_end),
|
||||
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE INDEX idx_category_brand ON category_sales_metrics(category_id, brand);
|
||||
CREATE INDEX idx_period ON category_sales_metrics(period_start, period_end);
|
||||
|
||||
-- New table for brand metrics
|
||||
CREATE TABLE brand_metrics (
|
||||
brand VARCHAR(100) NOT NULL,
|
||||
last_calculated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
-- Product metrics
|
||||
product_count INTEGER DEFAULT 0,
|
||||
active_products INTEGER DEFAULT 0,
|
||||
-- Stock metrics
|
||||
total_stock_units INTEGER DEFAULT 0,
|
||||
total_stock_cost DECIMAL(15,2) DEFAULT 0,
|
||||
total_stock_retail DECIMAL(15,2) DEFAULT 0,
|
||||
-- Sales metrics
|
||||
total_revenue DECIMAL(15,2) DEFAULT 0,
|
||||
avg_margin DECIMAL(5,2) DEFAULT 0,
|
||||
growth_rate DECIMAL(5,2) DEFAULT 0,
|
||||
PRIMARY KEY (brand)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_brand_metrics_last_calculated ON brand_metrics(last_calculated_at);
|
||||
CREATE INDEX idx_brand_metrics_revenue ON brand_metrics(total_revenue);
|
||||
CREATE INDEX idx_brand_metrics_growth ON brand_metrics(growth_rate);
|
||||
|
||||
-- New table for brand time-based metrics
|
||||
CREATE TABLE brand_time_metrics (
|
||||
brand VARCHAR(100) NOT NULL,
|
||||
year INTEGER NOT NULL,
|
||||
month INTEGER NOT NULL,
|
||||
-- Product metrics
|
||||
product_count INTEGER DEFAULT 0,
|
||||
active_products INTEGER DEFAULT 0,
|
||||
-- Stock metrics
|
||||
total_stock_units INTEGER DEFAULT 0,
|
||||
total_stock_cost DECIMAL(15,2) DEFAULT 0,
|
||||
total_stock_retail DECIMAL(15,2) DEFAULT 0,
|
||||
-- Sales metrics
|
||||
total_revenue DECIMAL(15,2) DEFAULT 0,
|
||||
avg_margin DECIMAL(5,2) DEFAULT 0,
|
||||
growth_rate DECIMAL(5,2) DEFAULT 0,
|
||||
PRIMARY KEY (brand, year, month)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_brand_time_date ON brand_time_metrics(year, month);
|
||||
|
||||
-- New table for sales forecasts
|
||||
CREATE TABLE sales_forecasts (
|
||||
pid BIGINT NOT NULL,
|
||||
forecast_date DATE NOT NULL,
|
||||
forecast_quantity INTEGER,
|
||||
confidence_level DECIMAL(5,2),
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (pid, forecast_date),
|
||||
FOREIGN KEY (pid) REFERENCES products(pid) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE INDEX idx_forecast_date ON sales_forecasts(forecast_date);
|
||||
|
||||
-- New table for category forecasts
|
||||
CREATE TABLE category_forecasts (
|
||||
category_id BIGINT NOT NULL,
|
||||
forecast_date DATE NOT NULL,
|
||||
forecast_revenue DECIMAL(15,2),
|
||||
forecast_units INTEGER,
|
||||
confidence_level DECIMAL(5,2),
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (category_id, forecast_date),
|
||||
FOREIGN KEY (category_id) REFERENCES categories(cat_id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE INDEX idx_cat_forecast_date ON category_forecasts(forecast_date);
|
||||
|
||||
-- Create views for common calculations
|
||||
CREATE OR REPLACE VIEW inventory_health AS
|
||||
WITH stock_levels AS (
|
||||
SELECT
|
||||
p.pid,
|
||||
p.title,
|
||||
p.SKU,
|
||||
p.stock_quantity,
|
||||
p.preorder_count,
|
||||
pm.daily_sales_avg,
|
||||
pm.weekly_sales_avg,
|
||||
pm.monthly_sales_avg,
|
||||
pm.reorder_point,
|
||||
pm.safety_stock,
|
||||
pm.days_of_inventory,
|
||||
pm.weeks_of_inventory,
|
||||
pm.stock_status,
|
||||
pm.abc_class,
|
||||
pm.turnover_rate,
|
||||
pm.avg_lead_time_days,
|
||||
pm.current_lead_time,
|
||||
pm.target_lead_time,
|
||||
pm.lead_time_status,
|
||||
p.cost_price,
|
||||
p.price,
|
||||
pm.inventory_value,
|
||||
pm.gmroi
|
||||
FROM products p
|
||||
LEFT JOIN product_metrics pm ON p.pid = pm.pid
|
||||
WHERE p.managing_stock = true AND p.visible = true
|
||||
)
|
||||
SELECT
|
||||
*,
|
||||
CASE
|
||||
WHEN stock_quantity <= safety_stock THEN 'Critical'
|
||||
WHEN stock_quantity <= reorder_point THEN 'Low'
|
||||
WHEN stock_quantity > (reorder_point * 3) THEN 'Excess'
|
||||
ELSE 'Healthy'
|
||||
END as inventory_status,
|
||||
CASE
|
||||
WHEN lead_time_status = 'delayed' AND stock_status = 'low' THEN 'High'
|
||||
WHEN lead_time_status = 'delayed' OR stock_status = 'low' THEN 'Medium'
|
||||
ELSE 'Low'
|
||||
END as risk_level
|
||||
FROM stock_levels;
|
||||
|
||||
-- Create view for category performance trends
|
||||
CREATE OR REPLACE VIEW category_performance_trends AS
|
||||
WITH monthly_trends AS (
|
||||
SELECT
|
||||
c.cat_id,
|
||||
c.name as category_name,
|
||||
ctm.year,
|
||||
ctm.month,
|
||||
ctm.product_count,
|
||||
ctm.active_products,
|
||||
ctm.total_value,
|
||||
ctm.total_revenue,
|
||||
ctm.avg_margin,
|
||||
ctm.turnover_rate,
|
||||
LAG(ctm.total_revenue) OVER (PARTITION BY c.cat_id ORDER BY ctm.year, ctm.month) as prev_month_revenue,
|
||||
LAG(ctm.turnover_rate) OVER (PARTITION BY c.cat_id ORDER BY ctm.year, ctm.month) as prev_month_turnover
|
||||
FROM categories c
|
||||
JOIN category_time_metrics ctm ON c.cat_id = ctm.category_id
|
||||
)
|
||||
SELECT
|
||||
*,
|
||||
CASE
|
||||
WHEN prev_month_revenue IS NULL THEN 0
|
||||
ELSE ((total_revenue - prev_month_revenue) / prev_month_revenue) * 100
|
||||
END as revenue_growth_percent,
|
||||
CASE
|
||||
WHEN prev_month_turnover IS NULL THEN 0
|
||||
ELSE ((turnover_rate - prev_month_turnover) / prev_month_turnover) * 100
|
||||
END as turnover_growth_percent
|
||||
FROM monthly_trends;
|
||||
|
||||
SET session_replication_role = 'origin';
|
||||
@@ -32,12 +32,12 @@ async function calculateBrandMetrics(startTime, totalProducts, processedCount =
|
||||
}
|
||||
|
||||
// Get order count that will be processed
|
||||
const [orderCount] = await connection.query(`
|
||||
const orderCount = await connection.query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM orders o
|
||||
WHERE o.canceled = false
|
||||
`);
|
||||
processedOrders = orderCount[0].count;
|
||||
processedOrders = parseInt(orderCount.rows[0].count);
|
||||
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
@@ -98,14 +98,14 @@ async function calculateBrandMetrics(startTime, totalProducts, processedCount =
|
||||
SUM(o.quantity * (o.price - COALESCE(o.discount, 0) - p.cost_price)) as period_margin,
|
||||
COUNT(DISTINCT DATE(o.date)) as period_days,
|
||||
CASE
|
||||
WHEN o.date >= DATE_SUB(CURRENT_DATE, INTERVAL 3 MONTH) THEN 'current'
|
||||
WHEN o.date BETWEEN DATE_SUB(CURRENT_DATE, INTERVAL 15 MONTH)
|
||||
AND DATE_SUB(CURRENT_DATE, INTERVAL 12 MONTH) THEN 'previous'
|
||||
WHEN o.date >= CURRENT_DATE - INTERVAL '3 months' THEN 'current'
|
||||
WHEN o.date BETWEEN CURRENT_DATE - INTERVAL '15 months'
|
||||
AND CURRENT_DATE - INTERVAL '12 months' THEN 'previous'
|
||||
END as period_type
|
||||
FROM filtered_products p
|
||||
JOIN orders o ON p.pid = o.pid
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= DATE_SUB(CURRENT_DATE, INTERVAL 15 MONTH)
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '15 months'
|
||||
GROUP BY p.brand, period_type
|
||||
),
|
||||
brand_data AS (
|
||||
@@ -165,15 +165,16 @@ async function calculateBrandMetrics(startTime, totalProducts, processedCount =
|
||||
LEFT JOIN sales_periods sp ON bd.brand = sp.brand
|
||||
GROUP BY bd.brand, bd.product_count, bd.active_products, bd.total_stock_units,
|
||||
bd.total_stock_cost, bd.total_stock_retail, bd.total_revenue, bd.avg_margin
|
||||
ON DUPLICATE KEY UPDATE
|
||||
product_count = VALUES(product_count),
|
||||
active_products = VALUES(active_products),
|
||||
total_stock_units = VALUES(total_stock_units),
|
||||
total_stock_cost = VALUES(total_stock_cost),
|
||||
total_stock_retail = VALUES(total_stock_retail),
|
||||
total_revenue = VALUES(total_revenue),
|
||||
avg_margin = VALUES(avg_margin),
|
||||
growth_rate = VALUES(growth_rate),
|
||||
ON CONFLICT (brand) DO UPDATE
|
||||
SET
|
||||
product_count = EXCLUDED.product_count,
|
||||
active_products = EXCLUDED.active_products,
|
||||
total_stock_units = EXCLUDED.total_stock_units,
|
||||
total_stock_cost = EXCLUDED.total_stock_cost,
|
||||
total_stock_retail = EXCLUDED.total_stock_retail,
|
||||
total_revenue = EXCLUDED.total_revenue,
|
||||
avg_margin = EXCLUDED.avg_margin,
|
||||
growth_rate = EXCLUDED.growth_rate,
|
||||
last_calculated_at = CURRENT_TIMESTAMP
|
||||
`);
|
||||
|
||||
@@ -230,8 +231,8 @@ async function calculateBrandMetrics(startTime, totalProducts, processedCount =
|
||||
monthly_metrics AS (
|
||||
SELECT
|
||||
p.brand,
|
||||
YEAR(o.date) as year,
|
||||
MONTH(o.date) as month,
|
||||
EXTRACT(YEAR FROM o.date::timestamp with time zone) as year,
|
||||
EXTRACT(MONTH FROM o.date::timestamp with time zone) as month,
|
||||
COUNT(DISTINCT p.valid_pid) as product_count,
|
||||
COUNT(DISTINCT p.active_pid) as active_products,
|
||||
SUM(p.valid_stock) as total_stock_units,
|
||||
@@ -255,19 +256,20 @@ async function calculateBrandMetrics(startTime, totalProducts, processedCount =
|
||||
END as avg_margin
|
||||
FROM filtered_products p
|
||||
LEFT JOIN orders o ON p.pid = o.pid AND o.canceled = false
|
||||
WHERE o.date >= DATE_SUB(CURRENT_DATE, INTERVAL 12 MONTH)
|
||||
GROUP BY p.brand, YEAR(o.date), MONTH(o.date)
|
||||
WHERE o.date >= CURRENT_DATE - INTERVAL '12 months'
|
||||
GROUP BY p.brand, EXTRACT(YEAR FROM o.date::timestamp with time zone), EXTRACT(MONTH FROM o.date::timestamp with time zone)
|
||||
)
|
||||
SELECT *
|
||||
FROM monthly_metrics
|
||||
ON DUPLICATE KEY UPDATE
|
||||
product_count = VALUES(product_count),
|
||||
active_products = VALUES(active_products),
|
||||
total_stock_units = VALUES(total_stock_units),
|
||||
total_stock_cost = VALUES(total_stock_cost),
|
||||
total_stock_retail = VALUES(total_stock_retail),
|
||||
total_revenue = VALUES(total_revenue),
|
||||
avg_margin = VALUES(avg_margin)
|
||||
ON CONFLICT (brand, year, month) DO UPDATE
|
||||
SET
|
||||
product_count = EXCLUDED.product_count,
|
||||
active_products = EXCLUDED.active_products,
|
||||
total_stock_units = EXCLUDED.total_stock_units,
|
||||
total_stock_cost = EXCLUDED.total_stock_cost,
|
||||
total_stock_retail = EXCLUDED.total_stock_retail,
|
||||
total_revenue = EXCLUDED.total_revenue,
|
||||
avg_margin = EXCLUDED.avg_margin
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.99);
|
||||
@@ -294,7 +296,8 @@ async function calculateBrandMetrics(startTime, totalProducts, processedCount =
|
||||
await connection.query(`
|
||||
INSERT INTO calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES ('brand_metrics', NOW())
|
||||
ON DUPLICATE KEY UPDATE last_calculation_timestamp = NOW()
|
||||
ON CONFLICT (module_name) DO UPDATE
|
||||
SET last_calculation_timestamp = NOW()
|
||||
`);
|
||||
|
||||
return {
|
||||
@@ -32,12 +32,12 @@ async function calculateCategoryMetrics(startTime, totalProducts, processedCount
|
||||
}
|
||||
|
||||
// Get order count that will be processed
|
||||
const [orderCount] = await connection.query(`
|
||||
const orderCount = await connection.query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM orders o
|
||||
WHERE o.canceled = false
|
||||
`);
|
||||
processedOrders = orderCount[0].count;
|
||||
processedOrders = parseInt(orderCount.rows[0].count);
|
||||
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
@@ -76,12 +76,13 @@ async function calculateCategoryMetrics(startTime, totalProducts, processedCount
|
||||
LEFT JOIN product_categories pc ON c.cat_id = pc.cat_id
|
||||
LEFT JOIN products p ON pc.pid = p.pid
|
||||
GROUP BY c.cat_id, c.status
|
||||
ON DUPLICATE KEY UPDATE
|
||||
product_count = VALUES(product_count),
|
||||
active_products = VALUES(active_products),
|
||||
total_value = VALUES(total_value),
|
||||
status = VALUES(status),
|
||||
last_calculated_at = VALUES(last_calculated_at)
|
||||
ON CONFLICT (category_id) DO UPDATE
|
||||
SET
|
||||
product_count = EXCLUDED.product_count,
|
||||
active_products = EXCLUDED.active_products,
|
||||
total_value = EXCLUDED.total_value,
|
||||
status = EXCLUDED.status,
|
||||
last_calculated_at = EXCLUDED.last_calculated_at
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.90);
|
||||
@@ -127,17 +128,13 @@ async function calculateCategoryMetrics(startTime, totalProducts, processedCount
|
||||
(tc.category_id IS NULL AND tc.vendor = p.vendor) OR
|
||||
(tc.category_id IS NULL AND tc.vendor IS NULL)
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= DATE_SUB(CURRENT_DATE, INTERVAL COALESCE(tc.calculation_period_days, 30) DAY)
|
||||
AND o.date >= CURRENT_DATE - (COALESCE(tc.calculation_period_days, 30) || ' days')::INTERVAL
|
||||
GROUP BY pc.cat_id
|
||||
)
|
||||
UPDATE category_metrics cm
|
||||
JOIN category_sales cs ON cm.category_id = cs.cat_id
|
||||
LEFT JOIN turnover_config tc ON
|
||||
(tc.category_id = cm.category_id AND tc.vendor IS NULL) OR
|
||||
(tc.category_id IS NULL AND tc.vendor IS NULL)
|
||||
UPDATE category_metrics
|
||||
SET
|
||||
cm.avg_margin = COALESCE(cs.total_margin * 100.0 / NULLIF(cs.total_sales, 0), 0),
|
||||
cm.turnover_rate = CASE
|
||||
avg_margin = COALESCE(cs.total_margin * 100.0 / NULLIF(cs.total_sales, 0), 0),
|
||||
turnover_rate = CASE
|
||||
WHEN cs.avg_stock > 0 AND cs.active_days > 0
|
||||
THEN LEAST(
|
||||
(cs.units_sold / cs.avg_stock) * (365.0 / cs.active_days),
|
||||
@@ -145,7 +142,9 @@ async function calculateCategoryMetrics(startTime, totalProducts, processedCount
|
||||
)
|
||||
ELSE 0
|
||||
END,
|
||||
cm.last_calculated_at = NOW()
|
||||
last_calculated_at = NOW()
|
||||
FROM category_sales cs
|
||||
WHERE category_id = cs.cat_id
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.95);
|
||||
@@ -184,9 +183,9 @@ async function calculateCategoryMetrics(startTime, totalProducts, processedCount
|
||||
FROM product_categories pc
|
||||
JOIN products p ON pc.pid = p.pid
|
||||
JOIN orders o ON p.pid = o.pid
|
||||
LEFT JOIN sales_seasonality ss ON MONTH(o.date) = ss.month
|
||||
LEFT JOIN sales_seasonality ss ON EXTRACT(MONTH FROM o.date) = ss.month
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= DATE_SUB(CURRENT_DATE, INTERVAL 3 MONTH)
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '3 months'
|
||||
GROUP BY pc.cat_id
|
||||
),
|
||||
previous_period AS (
|
||||
@@ -198,26 +197,26 @@ async function calculateCategoryMetrics(startTime, totalProducts, processedCount
|
||||
FROM product_categories pc
|
||||
JOIN products p ON pc.pid = p.pid
|
||||
JOIN orders o ON p.pid = o.pid
|
||||
LEFT JOIN sales_seasonality ss ON MONTH(o.date) = ss.month
|
||||
LEFT JOIN sales_seasonality ss ON EXTRACT(MONTH FROM o.date) = ss.month
|
||||
WHERE o.canceled = false
|
||||
AND o.date BETWEEN DATE_SUB(CURRENT_DATE, INTERVAL 15 MONTH)
|
||||
AND DATE_SUB(CURRENT_DATE, INTERVAL 12 MONTH)
|
||||
AND o.date BETWEEN CURRENT_DATE - INTERVAL '15 months'
|
||||
AND CURRENT_DATE - INTERVAL '12 months'
|
||||
GROUP BY pc.cat_id
|
||||
),
|
||||
trend_data AS (
|
||||
SELECT
|
||||
pc.cat_id,
|
||||
MONTH(o.date) as month,
|
||||
EXTRACT(MONTH FROM o.date) as month,
|
||||
SUM(o.quantity * (o.price - COALESCE(o.discount, 0)) /
|
||||
(1 + COALESCE(ss.seasonality_factor, 0))) as revenue,
|
||||
COUNT(DISTINCT DATE(o.date)) as days_in_month
|
||||
FROM product_categories pc
|
||||
JOIN products p ON pc.pid = p.pid
|
||||
JOIN orders o ON p.pid = o.pid
|
||||
LEFT JOIN sales_seasonality ss ON MONTH(o.date) = ss.month
|
||||
LEFT JOIN sales_seasonality ss ON EXTRACT(MONTH FROM o.date) = ss.month
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= DATE_SUB(CURRENT_DATE, INTERVAL 15 MONTH)
|
||||
GROUP BY pc.cat_id, MONTH(o.date)
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '15 months'
|
||||
GROUP BY pc.cat_id, EXTRACT(MONTH FROM o.date)
|
||||
),
|
||||
trend_stats AS (
|
||||
SELECT
|
||||
@@ -261,16 +260,42 @@ async function calculateCategoryMetrics(startTime, totalProducts, processedCount
|
||||
JOIN products p ON pc.pid = p.pid
|
||||
JOIN orders o ON p.pid = o.pid
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= DATE_SUB(CURRENT_DATE, INTERVAL 3 MONTH)
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '3 months'
|
||||
GROUP BY pc.cat_id
|
||||
),
|
||||
combined_metrics AS (
|
||||
SELECT
|
||||
COALESCE(cp.cat_id, pp.cat_id) as category_id,
|
||||
CASE
|
||||
WHEN pp.revenue = 0 AND COALESCE(cp.revenue, 0) > 0 THEN 100.0
|
||||
WHEN pp.revenue = 0 OR cp.revenue IS NULL THEN 0.0
|
||||
WHEN ta.trend_slope IS NOT NULL THEN
|
||||
GREATEST(
|
||||
-100.0,
|
||||
LEAST(
|
||||
(ta.trend_slope / NULLIF(ta.avg_daily_revenue, 0)) * 365 * 100,
|
||||
999.99
|
||||
)
|
||||
)
|
||||
ELSE
|
||||
GREATEST(
|
||||
-100.0,
|
||||
LEAST(
|
||||
((COALESCE(cp.revenue, 0) - pp.revenue) /
|
||||
NULLIF(ABS(pp.revenue), 0)) * 100.0,
|
||||
999.99
|
||||
)
|
||||
)
|
||||
END as growth_rate,
|
||||
mc.avg_margin
|
||||
FROM current_period cp
|
||||
FULL OUTER JOIN previous_period pp ON cp.cat_id = pp.cat_id
|
||||
LEFT JOIN trend_analysis ta ON COALESCE(cp.cat_id, pp.cat_id) = ta.cat_id
|
||||
LEFT JOIN margin_calc mc ON COALESCE(cp.cat_id, pp.cat_id) = mc.cat_id
|
||||
)
|
||||
UPDATE category_metrics cm
|
||||
LEFT JOIN current_period cp ON cm.category_id = cp.cat_id
|
||||
LEFT JOIN previous_period pp ON cm.category_id = pp.cat_id
|
||||
LEFT JOIN trend_analysis ta ON cm.category_id = ta.cat_id
|
||||
LEFT JOIN margin_calc mc ON cm.category_id = mc.cat_id
|
||||
SET
|
||||
cm.growth_rate = CASE
|
||||
growth_rate = CASE
|
||||
WHEN pp.revenue = 0 AND COALESCE(cp.revenue, 0) > 0 THEN 100.0
|
||||
WHEN pp.revenue = 0 OR cp.revenue IS NULL THEN 0.0
|
||||
WHEN ta.trend_slope IS NOT NULL THEN
|
||||
@@ -291,9 +316,13 @@ async function calculateCategoryMetrics(startTime, totalProducts, processedCount
|
||||
)
|
||||
)
|
||||
END,
|
||||
cm.avg_margin = COALESCE(mc.avg_margin, cm.avg_margin),
|
||||
cm.last_calculated_at = NOW()
|
||||
WHERE cp.cat_id IS NOT NULL OR pp.cat_id IS NOT NULL
|
||||
avg_margin = COALESCE(mc.avg_margin, cm.avg_margin),
|
||||
last_calculated_at = NOW()
|
||||
FROM current_period cp
|
||||
FULL OUTER JOIN previous_period pp ON cp.cat_id = pp.cat_id
|
||||
LEFT JOIN trend_analysis ta ON COALESCE(cp.cat_id, pp.cat_id) = ta.cat_id
|
||||
LEFT JOIN margin_calc mc ON COALESCE(cp.cat_id, pp.cat_id) = mc.cat_id
|
||||
WHERE cm.category_id = COALESCE(cp.cat_id, pp.cat_id)
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.97);
|
||||
@@ -335,8 +364,8 @@ async function calculateCategoryMetrics(startTime, totalProducts, processedCount
|
||||
)
|
||||
SELECT
|
||||
pc.cat_id,
|
||||
YEAR(o.date) as year,
|
||||
MONTH(o.date) as month,
|
||||
EXTRACT(YEAR FROM o.date::timestamp with time zone) as year,
|
||||
EXTRACT(MONTH FROM o.date::timestamp with time zone) as month,
|
||||
COUNT(DISTINCT p.pid) as product_count,
|
||||
COUNT(DISTINCT CASE WHEN p.visible = true THEN p.pid END) as active_products,
|
||||
SUM(p.stock_quantity * p.cost_price) as total_value,
|
||||
@@ -364,15 +393,16 @@ async function calculateCategoryMetrics(startTime, totalProducts, processedCount
|
||||
JOIN products p ON pc.pid = p.pid
|
||||
JOIN orders o ON p.pid = o.pid
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= DATE_SUB(CURRENT_DATE, INTERVAL 12 MONTH)
|
||||
GROUP BY pc.cat_id, YEAR(o.date), MONTH(o.date)
|
||||
ON DUPLICATE KEY UPDATE
|
||||
product_count = VALUES(product_count),
|
||||
active_products = VALUES(active_products),
|
||||
total_value = VALUES(total_value),
|
||||
total_revenue = VALUES(total_revenue),
|
||||
avg_margin = VALUES(avg_margin),
|
||||
turnover_rate = VALUES(turnover_rate)
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '12 months'
|
||||
GROUP BY pc.cat_id, EXTRACT(YEAR FROM o.date::timestamp with time zone), EXTRACT(MONTH FROM o.date::timestamp with time zone)
|
||||
ON CONFLICT (category_id, year, month) DO UPDATE
|
||||
SET
|
||||
product_count = EXCLUDED.product_count,
|
||||
active_products = EXCLUDED.active_products,
|
||||
total_value = EXCLUDED.total_value,
|
||||
total_revenue = EXCLUDED.total_revenue,
|
||||
avg_margin = EXCLUDED.avg_margin,
|
||||
turnover_rate = EXCLUDED.turnover_rate
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.99);
|
||||
@@ -414,20 +444,20 @@ async function calculateCategoryMetrics(startTime, totalProducts, processedCount
|
||||
)
|
||||
WITH date_ranges AS (
|
||||
SELECT
|
||||
DATE_SUB(CURRENT_DATE, INTERVAL 30 DAY) as period_start,
|
||||
CURRENT_DATE - INTERVAL '30 days' as period_start,
|
||||
CURRENT_DATE as period_end
|
||||
UNION ALL
|
||||
SELECT
|
||||
DATE_SUB(CURRENT_DATE, INTERVAL 90 DAY),
|
||||
DATE_SUB(CURRENT_DATE, INTERVAL 31 DAY)
|
||||
CURRENT_DATE - INTERVAL '90 days',
|
||||
CURRENT_DATE - INTERVAL '31 days'
|
||||
UNION ALL
|
||||
SELECT
|
||||
DATE_SUB(CURRENT_DATE, INTERVAL 180 DAY),
|
||||
DATE_SUB(CURRENT_DATE, INTERVAL 91 DAY)
|
||||
CURRENT_DATE - INTERVAL '180 days',
|
||||
CURRENT_DATE - INTERVAL '91 days'
|
||||
UNION ALL
|
||||
SELECT
|
||||
DATE_SUB(CURRENT_DATE, INTERVAL 365 DAY),
|
||||
DATE_SUB(CURRENT_DATE, INTERVAL 181 DAY)
|
||||
CURRENT_DATE - INTERVAL '365 days',
|
||||
CURRENT_DATE - INTERVAL '181 days'
|
||||
),
|
||||
sales_data AS (
|
||||
SELECT
|
||||
@@ -466,12 +496,13 @@ async function calculateCategoryMetrics(startTime, totalProducts, processedCount
|
||||
END as avg_price,
|
||||
NOW() as last_calculated_at
|
||||
FROM sales_data
|
||||
ON DUPLICATE KEY UPDATE
|
||||
avg_daily_sales = VALUES(avg_daily_sales),
|
||||
total_sold = VALUES(total_sold),
|
||||
num_products = VALUES(num_products),
|
||||
avg_price = VALUES(avg_price),
|
||||
last_calculated_at = VALUES(last_calculated_at)
|
||||
ON CONFLICT (category_id, brand, period_start, period_end) DO UPDATE
|
||||
SET
|
||||
avg_daily_sales = EXCLUDED.avg_daily_sales,
|
||||
total_sold = EXCLUDED.total_sold,
|
||||
num_products = EXCLUDED.num_products,
|
||||
avg_price = EXCLUDED.avg_price,
|
||||
last_calculated_at = EXCLUDED.last_calculated_at
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 1.0);
|
||||
@@ -498,7 +529,8 @@ async function calculateCategoryMetrics(startTime, totalProducts, processedCount
|
||||
await connection.query(`
|
||||
INSERT INTO calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES ('category_metrics', NOW())
|
||||
ON DUPLICATE KEY UPDATE last_calculation_timestamp = NOW()
|
||||
ON CONFLICT (module_name) DO UPDATE
|
||||
SET last_calculation_timestamp = NOW()
|
||||
`);
|
||||
|
||||
return {
|
||||
@@ -32,13 +32,13 @@ async function calculateFinancialMetrics(startTime, totalProducts, processedCoun
|
||||
}
|
||||
|
||||
// Get order count that will be processed
|
||||
const [orderCount] = await connection.query(`
|
||||
const orderCount = await connection.query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM orders o
|
||||
WHERE o.canceled = false
|
||||
AND DATE(o.date) >= DATE_SUB(CURDATE(), INTERVAL 12 MONTH)
|
||||
AND DATE(o.date) >= CURRENT_DATE - INTERVAL '12 months'
|
||||
`);
|
||||
processedOrders = orderCount[0].count;
|
||||
processedOrders = parseInt(orderCount.rows[0].count);
|
||||
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
@@ -56,38 +56,97 @@ async function calculateFinancialMetrics(startTime, totalProducts, processedCoun
|
||||
}
|
||||
});
|
||||
|
||||
// Calculate financial metrics with optimized query
|
||||
// First, calculate beginning inventory values (12 months ago)
|
||||
await connection.query(`
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS temp_beginning_inventory AS
|
||||
WITH beginning_inventory_calc AS (
|
||||
SELECT
|
||||
p.pid,
|
||||
p.stock_quantity as current_quantity,
|
||||
COALESCE(SUM(o.quantity), 0) as sold_quantity,
|
||||
COALESCE(SUM(po.received), 0) as received_quantity,
|
||||
GREATEST(0, (p.stock_quantity + COALESCE(SUM(o.quantity), 0) - COALESCE(SUM(po.received), 0))) as beginning_quantity,
|
||||
p.cost_price
|
||||
FROM
|
||||
products p
|
||||
LEFT JOIN
|
||||
orders o ON p.pid = o.pid
|
||||
AND o.canceled = false
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '12 months'::interval
|
||||
LEFT JOIN
|
||||
purchase_orders po ON p.pid = po.pid
|
||||
AND po.received_date IS NOT NULL
|
||||
AND po.received_date >= CURRENT_DATE - INTERVAL '12 months'::interval
|
||||
GROUP BY
|
||||
p.pid, p.stock_quantity, p.cost_price
|
||||
)
|
||||
SELECT
|
||||
pid,
|
||||
beginning_quantity,
|
||||
beginning_quantity * cost_price as beginning_value,
|
||||
current_quantity * cost_price as current_value,
|
||||
((beginning_quantity * cost_price) + (current_quantity * cost_price)) / 2 as average_inventory_value
|
||||
FROM
|
||||
beginning_inventory_calc
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.60);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Beginning inventory values calculated, computing financial metrics',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// Calculate financial metrics with optimized query and standard formulas
|
||||
await connection.query(`
|
||||
WITH product_financials AS (
|
||||
SELECT
|
||||
p.pid,
|
||||
p.cost_price * p.stock_quantity as inventory_value,
|
||||
SUM(o.quantity * o.price) as total_revenue,
|
||||
SUM(o.quantity * p.cost_price) as cost_of_goods_sold,
|
||||
SUM(o.quantity * (o.price - p.cost_price)) as gross_profit,
|
||||
COALESCE(bi.average_inventory_value, p.cost_price * p.stock_quantity) as avg_inventory_value,
|
||||
p.cost_price * p.stock_quantity as current_inventory_value,
|
||||
SUM(o.quantity * (o.price - COALESCE(o.discount, 0))) as total_revenue,
|
||||
SUM(o.quantity * COALESCE(o.costeach, 0)) as cost_of_goods_sold,
|
||||
SUM(o.quantity * (o.price - COALESCE(o.discount, 0) - COALESCE(o.costeach, 0))) as gross_profit,
|
||||
MIN(o.date) as first_sale_date,
|
||||
MAX(o.date) as last_sale_date,
|
||||
DATEDIFF(MAX(o.date), MIN(o.date)) + 1 as calculation_period_days,
|
||||
EXTRACT(DAY FROM (MAX(o.date)::timestamp with time zone - MIN(o.date)::timestamp with time zone)) + 1 as calculation_period_days,
|
||||
COUNT(DISTINCT DATE(o.date)) as active_days
|
||||
FROM products p
|
||||
LEFT JOIN orders o ON p.pid = o.pid
|
||||
LEFT JOIN temp_beginning_inventory bi ON p.pid = bi.pid
|
||||
WHERE o.canceled = false
|
||||
AND DATE(o.date) >= DATE_SUB(CURDATE(), INTERVAL 12 MONTH)
|
||||
GROUP BY p.pid
|
||||
AND DATE(o.date) >= CURRENT_DATE - INTERVAL '12 months'::interval
|
||||
GROUP BY p.pid, p.cost_price, p.stock_quantity, bi.average_inventory_value
|
||||
)
|
||||
UPDATE product_metrics pm
|
||||
JOIN product_financials pf ON pm.pid = pf.pid
|
||||
SET
|
||||
pm.inventory_value = COALESCE(pf.inventory_value, 0),
|
||||
pm.total_revenue = COALESCE(pf.total_revenue, 0),
|
||||
pm.cost_of_goods_sold = COALESCE(pf.cost_of_goods_sold, 0),
|
||||
pm.gross_profit = COALESCE(pf.gross_profit, 0),
|
||||
pm.gmroi = CASE
|
||||
WHEN COALESCE(pf.inventory_value, 0) > 0 AND pf.active_days > 0 THEN
|
||||
(COALESCE(pf.gross_profit, 0) * (365.0 / pf.active_days)) / COALESCE(pf.inventory_value, 0)
|
||||
inventory_value = COALESCE(pf.current_inventory_value, 0)::decimal(10,3),
|
||||
total_revenue = COALESCE(pf.total_revenue, 0)::decimal(10,3),
|
||||
cost_of_goods_sold = COALESCE(pf.cost_of_goods_sold, 0)::decimal(10,3),
|
||||
gross_profit = COALESCE(pf.gross_profit, 0)::decimal(10,3),
|
||||
turnover_rate = CASE
|
||||
WHEN COALESCE(pf.avg_inventory_value, 0) > 0 THEN
|
||||
COALESCE(pf.cost_of_goods_sold, 0) / NULLIF(pf.avg_inventory_value, 0)
|
||||
ELSE 0
|
||||
END,
|
||||
pm.last_calculated_at = CURRENT_TIMESTAMP
|
||||
END::decimal(12,3),
|
||||
gmroi = CASE
|
||||
WHEN COALESCE(pf.avg_inventory_value, 0) > 0 THEN
|
||||
COALESCE(pf.gross_profit, 0) / NULLIF(pf.avg_inventory_value, 0)
|
||||
ELSE 0
|
||||
END::decimal(10,3),
|
||||
last_calculated_at = CURRENT_TIMESTAMP
|
||||
FROM product_financials pf
|
||||
WHERE pm.pid = pf.pid
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.65);
|
||||
@@ -114,52 +173,8 @@ async function calculateFinancialMetrics(startTime, totalProducts, processedCoun
|
||||
success
|
||||
};
|
||||
|
||||
// Update time-based aggregates with optimized query
|
||||
await connection.query(`
|
||||
WITH monthly_financials AS (
|
||||
SELECT
|
||||
p.pid,
|
||||
YEAR(o.date) as year,
|
||||
MONTH(o.date) as month,
|
||||
p.cost_price * p.stock_quantity as inventory_value,
|
||||
SUM(o.quantity * (o.price - p.cost_price)) as gross_profit,
|
||||
COUNT(DISTINCT DATE(o.date)) as active_days,
|
||||
MIN(o.date) as period_start,
|
||||
MAX(o.date) as period_end
|
||||
FROM products p
|
||||
LEFT JOIN orders o ON p.pid = o.pid
|
||||
WHERE o.canceled = false
|
||||
GROUP BY p.pid, YEAR(o.date), MONTH(o.date)
|
||||
)
|
||||
UPDATE product_time_aggregates pta
|
||||
JOIN monthly_financials mf ON pta.pid = mf.pid
|
||||
AND pta.year = mf.year
|
||||
AND pta.month = mf.month
|
||||
SET
|
||||
pta.inventory_value = COALESCE(mf.inventory_value, 0),
|
||||
pta.gmroi = CASE
|
||||
WHEN COALESCE(mf.inventory_value, 0) > 0 AND mf.active_days > 0 THEN
|
||||
(COALESCE(mf.gross_profit, 0) * (365.0 / mf.active_days)) / COALESCE(mf.inventory_value, 0)
|
||||
ELSE 0
|
||||
END
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.70);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Time-based aggregates updated',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
// Clean up temporary tables
|
||||
await connection.query('DROP TABLE IF EXISTS temp_beginning_inventory');
|
||||
|
||||
// If we get here, everything completed successfully
|
||||
success = true;
|
||||
@@ -168,7 +183,8 @@ async function calculateFinancialMetrics(startTime, totalProducts, processedCoun
|
||||
await connection.query(`
|
||||
INSERT INTO calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES ('financial_metrics', NOW())
|
||||
ON DUPLICATE KEY UPDATE last_calculation_timestamp = NOW()
|
||||
ON CONFLICT (module_name) DO UPDATE
|
||||
SET last_calculation_timestamp = NOW()
|
||||
`);
|
||||
|
||||
return {
|
||||
@@ -184,6 +200,12 @@ async function calculateFinancialMetrics(startTime, totalProducts, processedCoun
|
||||
throw error;
|
||||
} finally {
|
||||
if (connection) {
|
||||
try {
|
||||
// Make sure temporary tables are always cleaned up
|
||||
await connection.query('DROP TABLE IF EXISTS temp_beginning_inventory');
|
||||
} catch (err) {
|
||||
console.error('Error cleaning up temp tables:', err);
|
||||
}
|
||||
connection.release();
|
||||
}
|
||||
}
|
||||
736
inventory-server/old/metrics/product-metrics.js
Normal file
736
inventory-server/old/metrics/product-metrics.js
Normal file
@@ -0,0 +1,736 @@
|
||||
const { outputProgress, formatElapsedTime, estimateRemaining, calculateRate, logError } = require('./utils/progress');
|
||||
const { getConnection } = require('./utils/db');
|
||||
|
||||
// Helper function to handle NaN and undefined values
|
||||
function sanitizeValue(value) {
|
||||
if (value === undefined || value === null || Number.isNaN(value)) {
|
||||
return null;
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
async function calculateProductMetrics(startTime, totalProducts, processedCount = 0, isCancelled = false) {
|
||||
let connection;
|
||||
let success = false;
|
||||
let processedOrders = 0;
|
||||
const BATCH_SIZE = 5000;
|
||||
|
||||
try {
|
||||
connection = await getConnection();
|
||||
// Skip flags are inherited from the parent scope
|
||||
const SKIP_PRODUCT_BASE_METRICS = 0;
|
||||
const SKIP_PRODUCT_TIME_AGGREGATES = 0;
|
||||
|
||||
// Get total product count if not provided
|
||||
if (!totalProducts) {
|
||||
const productCount = await connection.query('SELECT COUNT(*) as count FROM products');
|
||||
totalProducts = parseInt(productCount.rows[0].count);
|
||||
}
|
||||
|
||||
if (isCancelled) {
|
||||
outputProgress({
|
||||
status: 'cancelled',
|
||||
operation: 'Product metrics calculation cancelled',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: null,
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
}
|
||||
|
||||
// First ensure all products have a metrics record
|
||||
await connection.query(`
|
||||
INSERT INTO product_metrics (pid, last_calculated_at)
|
||||
SELECT pid, NOW()
|
||||
FROM products
|
||||
ON CONFLICT (pid) DO NOTHING
|
||||
`);
|
||||
|
||||
// Get threshold settings once
|
||||
const thresholds = await connection.query(`
|
||||
SELECT critical_days, reorder_days, overstock_days, low_stock_threshold
|
||||
FROM stock_thresholds
|
||||
WHERE category_id IS NULL AND vendor IS NULL
|
||||
LIMIT 1
|
||||
`);
|
||||
|
||||
// Check if threshold data was returned
|
||||
if (!thresholds.rows || thresholds.rows.length === 0) {
|
||||
console.warn('No default thresholds found in the database. Using explicit type casting in the query.');
|
||||
}
|
||||
|
||||
const defaultThresholds = thresholds.rows[0];
|
||||
|
||||
// Get financial calculation configuration parameters
|
||||
const financialConfig = await connection.query(`
|
||||
SELECT
|
||||
order_cost,
|
||||
holding_rate,
|
||||
service_level_z_score,
|
||||
min_reorder_qty,
|
||||
default_reorder_qty,
|
||||
default_safety_stock
|
||||
FROM financial_calc_config
|
||||
WHERE id = 1
|
||||
LIMIT 1
|
||||
`);
|
||||
const finConfig = financialConfig.rows[0] || {
|
||||
order_cost: 25.00,
|
||||
holding_rate: 0.25,
|
||||
service_level_z_score: 1.96,
|
||||
min_reorder_qty: 1,
|
||||
default_reorder_qty: 5,
|
||||
default_safety_stock: 5
|
||||
};
|
||||
|
||||
// Calculate base product metrics
|
||||
if (!SKIP_PRODUCT_BASE_METRICS) {
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting base product metrics calculation',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// Get order count that will be processed
|
||||
const orderCount = await connection.query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM orders o
|
||||
WHERE o.canceled = false
|
||||
`);
|
||||
processedOrders = parseInt(orderCount.rows[0].count);
|
||||
|
||||
// Clear temporary tables
|
||||
await connection.query('DROP TABLE IF EXISTS temp_sales_metrics');
|
||||
await connection.query('DROP TABLE IF EXISTS temp_purchase_metrics');
|
||||
|
||||
// Create temp_sales_metrics
|
||||
await connection.query(`
|
||||
CREATE TEMPORARY TABLE temp_sales_metrics (
|
||||
pid BIGINT NOT NULL,
|
||||
daily_sales_avg DECIMAL(10,3),
|
||||
weekly_sales_avg DECIMAL(10,3),
|
||||
monthly_sales_avg DECIMAL(10,3),
|
||||
total_revenue DECIMAL(10,3),
|
||||
avg_margin_percent DECIMAL(10,3),
|
||||
first_sale_date DATE,
|
||||
last_sale_date DATE,
|
||||
stddev_daily_sales DECIMAL(10,3),
|
||||
PRIMARY KEY (pid)
|
||||
)
|
||||
`);
|
||||
|
||||
// Create temp_purchase_metrics
|
||||
await connection.query(`
|
||||
CREATE TEMPORARY TABLE temp_purchase_metrics (
|
||||
pid BIGINT NOT NULL,
|
||||
avg_lead_time_days DECIMAL(10,2),
|
||||
last_purchase_date DATE,
|
||||
first_received_date DATE,
|
||||
last_received_date DATE,
|
||||
stddev_lead_time_days DECIMAL(10,2),
|
||||
PRIMARY KEY (pid)
|
||||
)
|
||||
`);
|
||||
|
||||
// Populate temp_sales_metrics with base stats and sales averages
|
||||
await connection.query(`
|
||||
INSERT INTO temp_sales_metrics
|
||||
SELECT
|
||||
p.pid,
|
||||
COALESCE(SUM(o.quantity) / NULLIF(COUNT(DISTINCT DATE(o.date)), 0), 0) as daily_sales_avg,
|
||||
COALESCE(SUM(o.quantity) / NULLIF(CEIL(COUNT(DISTINCT DATE(o.date)) / 7), 0), 0) as weekly_sales_avg,
|
||||
COALESCE(SUM(o.quantity) / NULLIF(CEIL(COUNT(DISTINCT DATE(o.date)) / 30), 0), 0) as monthly_sales_avg,
|
||||
COALESCE(SUM(o.quantity * o.price), 0) as total_revenue,
|
||||
CASE
|
||||
WHEN SUM(o.quantity * o.price) > 0
|
||||
THEN ((SUM(o.quantity * o.price) - SUM(o.quantity * p.cost_price)) / SUM(o.quantity * o.price)) * 100
|
||||
ELSE 0
|
||||
END as avg_margin_percent,
|
||||
MIN(o.date) as first_sale_date,
|
||||
MAX(o.date) as last_sale_date,
|
||||
COALESCE(STDDEV_SAMP(daily_qty.quantity), 0) as stddev_daily_sales
|
||||
FROM products p
|
||||
LEFT JOIN orders o ON p.pid = o.pid
|
||||
AND o.canceled = false
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '90 days'
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
pid,
|
||||
DATE(date) as sale_date,
|
||||
SUM(quantity) as quantity
|
||||
FROM orders
|
||||
WHERE canceled = false
|
||||
AND date >= CURRENT_DATE - INTERVAL '90 days'
|
||||
GROUP BY pid, DATE(date)
|
||||
) daily_qty ON p.pid = daily_qty.pid
|
||||
GROUP BY p.pid
|
||||
`);
|
||||
|
||||
// Populate temp_purchase_metrics with timeout protection
|
||||
await Promise.race([
|
||||
connection.query(`
|
||||
INSERT INTO temp_purchase_metrics
|
||||
SELECT
|
||||
p.pid,
|
||||
AVG(
|
||||
CASE
|
||||
WHEN po.received_date IS NOT NULL AND po.date IS NOT NULL
|
||||
THEN EXTRACT(EPOCH FROM (po.received_date::timestamp with time zone - po.date::timestamp with time zone)) / 86400.0
|
||||
ELSE NULL
|
||||
END
|
||||
) as avg_lead_time_days,
|
||||
MAX(po.date) as last_purchase_date,
|
||||
MIN(po.received_date) as first_received_date,
|
||||
MAX(po.received_date) as last_received_date,
|
||||
STDDEV_SAMP(
|
||||
CASE
|
||||
WHEN po.received_date IS NOT NULL AND po.date IS NOT NULL
|
||||
THEN EXTRACT(EPOCH FROM (po.received_date::timestamp with time zone - po.date::timestamp with time zone)) / 86400.0
|
||||
ELSE NULL
|
||||
END
|
||||
) as stddev_lead_time_days
|
||||
FROM products p
|
||||
LEFT JOIN purchase_orders po ON p.pid = po.pid
|
||||
AND po.received_date IS NOT NULL
|
||||
AND po.date IS NOT NULL
|
||||
AND po.date >= CURRENT_DATE - INTERVAL '365 days'
|
||||
GROUP BY p.pid
|
||||
`),
|
||||
new Promise((_, reject) =>
|
||||
setTimeout(() => reject(new Error('Timeout: temp_purchase_metrics query took too long')), 60000)
|
||||
)
|
||||
]).catch(async (err) => {
|
||||
logError(err, 'Error populating temp_purchase_metrics, continuing with empty table');
|
||||
// Create an empty fallback to continue processing
|
||||
await connection.query(`
|
||||
INSERT INTO temp_purchase_metrics
|
||||
SELECT
|
||||
p.pid,
|
||||
30.0 as avg_lead_time_days,
|
||||
NULL as last_purchase_date,
|
||||
NULL as first_received_date,
|
||||
NULL as last_received_date,
|
||||
0.0 as stddev_lead_time_days
|
||||
FROM products p
|
||||
LEFT JOIN temp_purchase_metrics tpm ON p.pid = tpm.pid
|
||||
WHERE tpm.pid IS NULL
|
||||
`);
|
||||
});
|
||||
|
||||
// Process updates in batches
|
||||
let lastPid = 0;
|
||||
let batchCount = 0;
|
||||
const MAX_BATCHES = 1000; // Safety limit for number of batches to prevent infinite loops
|
||||
|
||||
while (batchCount < MAX_BATCHES) {
|
||||
if (isCancelled) break;
|
||||
|
||||
batchCount++;
|
||||
const batch = await connection.query(
|
||||
'SELECT pid FROM products WHERE pid > $1 ORDER BY pid LIMIT $2',
|
||||
[lastPid, BATCH_SIZE]
|
||||
);
|
||||
|
||||
if (batch.rows.length === 0) break;
|
||||
|
||||
// Process the entire batch in a single efficient query
|
||||
const lowStockThreshold = parseInt(defaultThresholds?.low_stock_threshold) || 5;
|
||||
const criticalDays = parseInt(defaultThresholds?.critical_days) || 7;
|
||||
const reorderDays = parseInt(defaultThresholds?.reorder_days) || 14;
|
||||
const overstockDays = parseInt(defaultThresholds?.overstock_days) || 90;
|
||||
const serviceLevel = parseFloat(finConfig?.service_level_z_score) || 1.96;
|
||||
const defaultSafetyStock = parseInt(finConfig?.default_safety_stock) || 5;
|
||||
const defaultReorderQty = parseInt(finConfig?.default_reorder_qty) || 5;
|
||||
const orderCost = parseFloat(finConfig?.order_cost) || 25.00;
|
||||
const holdingRate = parseFloat(finConfig?.holding_rate) || 0.25;
|
||||
const minReorderQty = parseInt(finConfig?.min_reorder_qty) || 1;
|
||||
|
||||
await connection.query(`
|
||||
UPDATE product_metrics pm
|
||||
SET
|
||||
inventory_value = p.stock_quantity * NULLIF(p.cost_price, 0),
|
||||
daily_sales_avg = COALESCE(sm.daily_sales_avg, 0),
|
||||
weekly_sales_avg = COALESCE(sm.weekly_sales_avg, 0),
|
||||
monthly_sales_avg = COALESCE(sm.monthly_sales_avg, 0),
|
||||
total_revenue = COALESCE(sm.total_revenue, 0),
|
||||
avg_margin_percent = COALESCE(sm.avg_margin_percent, 0),
|
||||
first_sale_date = sm.first_sale_date,
|
||||
last_sale_date = sm.last_sale_date,
|
||||
avg_lead_time_days = COALESCE(lm.avg_lead_time_days, 30.0),
|
||||
days_of_inventory = CASE
|
||||
WHEN COALESCE(sm.daily_sales_avg, 0) > 0
|
||||
THEN FLOOR(p.stock_quantity / NULLIF(sm.daily_sales_avg, 0))
|
||||
ELSE NULL
|
||||
END,
|
||||
weeks_of_inventory = CASE
|
||||
WHEN COALESCE(sm.weekly_sales_avg, 0) > 0
|
||||
THEN FLOOR(p.stock_quantity / NULLIF(sm.weekly_sales_avg, 0))
|
||||
ELSE NULL
|
||||
END,
|
||||
stock_status = CASE
|
||||
WHEN p.stock_quantity <= 0 THEN 'Out of Stock'
|
||||
WHEN COALESCE(sm.daily_sales_avg, 0) = 0 AND p.stock_quantity <= ${lowStockThreshold} THEN 'Low Stock'
|
||||
WHEN COALESCE(sm.daily_sales_avg, 0) = 0 THEN 'In Stock'
|
||||
WHEN p.stock_quantity / NULLIF(sm.daily_sales_avg, 0) <= ${criticalDays} THEN 'Critical'
|
||||
WHEN p.stock_quantity / NULLIF(sm.daily_sales_avg, 0) <= ${reorderDays} THEN 'Reorder'
|
||||
WHEN p.stock_quantity / NULLIF(sm.daily_sales_avg, 0) > ${overstockDays} THEN 'Overstocked'
|
||||
ELSE 'Healthy'
|
||||
END,
|
||||
safety_stock = CASE
|
||||
WHEN COALESCE(sm.daily_sales_avg, 0) > 0 AND COALESCE(lm.avg_lead_time_days, 0) > 0 THEN
|
||||
CEIL(
|
||||
${serviceLevel} * SQRT(
|
||||
GREATEST(0, COALESCE(lm.avg_lead_time_days, 0)) * POWER(COALESCE(sm.stddev_daily_sales, 0), 2) +
|
||||
POWER(COALESCE(sm.daily_sales_avg, 0), 2) * POWER(COALESCE(lm.stddev_lead_time_days, 0), 2)
|
||||
)
|
||||
)
|
||||
ELSE ${defaultSafetyStock}
|
||||
END,
|
||||
reorder_point = CASE
|
||||
WHEN COALESCE(sm.daily_sales_avg, 0) > 0 THEN
|
||||
CEIL(sm.daily_sales_avg * GREATEST(0, COALESCE(lm.avg_lead_time_days, 30.0))) +
|
||||
(CASE
|
||||
WHEN COALESCE(sm.daily_sales_avg, 0) > 0 AND COALESCE(lm.avg_lead_time_days, 0) > 0 THEN
|
||||
CEIL(
|
||||
${serviceLevel} * SQRT(
|
||||
GREATEST(0, COALESCE(lm.avg_lead_time_days, 0)) * POWER(COALESCE(sm.stddev_daily_sales, 0), 2) +
|
||||
POWER(COALESCE(sm.daily_sales_avg, 0), 2) * POWER(COALESCE(lm.stddev_lead_time_days, 0), 2)
|
||||
)
|
||||
)
|
||||
ELSE ${defaultSafetyStock}
|
||||
END)
|
||||
ELSE ${lowStockThreshold}
|
||||
END,
|
||||
reorder_qty = CASE
|
||||
WHEN COALESCE(sm.daily_sales_avg, 0) > 0 AND NULLIF(p.cost_price, 0) IS NOT NULL AND NULLIF(p.cost_price, 0) > 0 THEN
|
||||
GREATEST(
|
||||
CEIL(SQRT(
|
||||
(2 * (sm.daily_sales_avg * 365) * ${orderCost}) /
|
||||
NULLIF(p.cost_price * ${holdingRate}, 0)
|
||||
)),
|
||||
${minReorderQty}
|
||||
)
|
||||
ELSE ${defaultReorderQty}
|
||||
END,
|
||||
overstocked_amt = CASE
|
||||
WHEN p.stock_quantity / NULLIF(sm.daily_sales_avg, 0) > ${overstockDays}
|
||||
THEN GREATEST(0, p.stock_quantity - CEIL(sm.daily_sales_avg * ${overstockDays}))
|
||||
ELSE 0
|
||||
END,
|
||||
last_calculated_at = NOW()
|
||||
FROM products p
|
||||
LEFT JOIN temp_sales_metrics sm ON p.pid = sm.pid
|
||||
LEFT JOIN temp_purchase_metrics lm ON p.pid = lm.pid
|
||||
WHERE p.pid = ANY($1::BIGINT[])
|
||||
AND pm.pid = p.pid
|
||||
`, [batch.rows.map(row => row.pid)]);
|
||||
|
||||
lastPid = batch.rows[batch.rows.length - 1].pid;
|
||||
processedCount += batch.rows.length;
|
||||
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Processing base metrics batch',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Add safety check if the loop processed MAX_BATCHES
|
||||
if (batchCount >= MAX_BATCHES) {
|
||||
logError(new Error(`Reached maximum batch count (${MAX_BATCHES}). Process may have entered an infinite loop.`), 'Batch processing safety limit reached');
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate forecast accuracy and bias in batches
|
||||
let forecastPid = 0;
|
||||
while (true) {
|
||||
if (isCancelled) break;
|
||||
|
||||
const forecastBatch = await connection.query(
|
||||
'SELECT pid FROM products WHERE pid > $1 ORDER BY pid LIMIT $2',
|
||||
[forecastPid, BATCH_SIZE]
|
||||
);
|
||||
|
||||
if (forecastBatch.rows.length === 0) break;
|
||||
|
||||
const forecastPidArray = forecastBatch.rows.map(row => row.pid);
|
||||
|
||||
// Use array_to_string to convert the array to a string of comma-separated values
|
||||
await connection.query(`
|
||||
WITH forecast_metrics AS (
|
||||
SELECT
|
||||
sf.pid,
|
||||
AVG(CASE
|
||||
WHEN o.quantity > 0
|
||||
THEN ABS(sf.forecast_quantity - o.quantity) / o.quantity * 100
|
||||
ELSE 100
|
||||
END) as avg_forecast_error,
|
||||
AVG(CASE
|
||||
WHEN o.quantity > 0
|
||||
THEN (sf.forecast_quantity - o.quantity) / o.quantity * 100
|
||||
ELSE 0
|
||||
END) as avg_forecast_bias,
|
||||
MAX(sf.forecast_date) as last_forecast_date
|
||||
FROM sales_forecasts sf
|
||||
JOIN orders o ON sf.pid = o.pid
|
||||
AND DATE(o.date) = sf.forecast_date
|
||||
WHERE o.canceled = false
|
||||
AND sf.forecast_date >= CURRENT_DATE - INTERVAL '90 days'
|
||||
AND sf.pid = ANY('{${forecastPidArray.join(',')}}'::BIGINT[])
|
||||
GROUP BY sf.pid
|
||||
)
|
||||
UPDATE product_metrics pm
|
||||
SET
|
||||
forecast_accuracy = GREATEST(0, 100 - LEAST(fm.avg_forecast_error, 100)),
|
||||
forecast_bias = GREATEST(-100, LEAST(fm.avg_forecast_bias, 100)),
|
||||
last_forecast_date = fm.last_forecast_date,
|
||||
last_calculated_at = NOW()
|
||||
FROM forecast_metrics fm
|
||||
WHERE pm.pid = fm.pid
|
||||
`);
|
||||
|
||||
forecastPid = forecastBatch.rows[forecastBatch.rows.length - 1].pid;
|
||||
}
|
||||
|
||||
// Calculate product time aggregates
|
||||
if (!SKIP_PRODUCT_TIME_AGGREGATES) {
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting product time aggregates calculation',
|
||||
current: processedCount || 0,
|
||||
total: totalProducts || 0,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount || 0, totalProducts || 0),
|
||||
rate: calculateRate(startTime, processedCount || 0),
|
||||
percentage: (((processedCount || 0) / (totalProducts || 1)) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// Note: The time-aggregates calculation has been moved to time-aggregates.js
|
||||
// This module will not duplicate that functionality
|
||||
processedCount = Math.floor(totalProducts * 0.6);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Product time aggregates calculation delegated to time-aggregates module',
|
||||
current: processedCount || 0,
|
||||
total: totalProducts || 0,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount || 0, totalProducts || 0),
|
||||
rate: calculateRate(startTime, processedCount || 0),
|
||||
percentage: (((processedCount || 0) / (totalProducts || 1)) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
} else {
|
||||
processedCount = Math.floor(totalProducts * 0.6);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Skipping product time aggregates calculation',
|
||||
current: processedCount || 0,
|
||||
total: totalProducts || 0,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount || 0, totalProducts || 0),
|
||||
rate: calculateRate(startTime, processedCount || 0),
|
||||
percentage: (((processedCount || 0) / (totalProducts || 1)) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Calculate ABC classification
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting ABC classification',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
if (isCancelled) return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0, // This module doesn't process POs
|
||||
success
|
||||
};
|
||||
|
||||
const abcConfig = await connection.query('SELECT a_threshold, b_threshold FROM abc_classification_config WHERE id = 1');
|
||||
const abcThresholds = abcConfig.rows[0] || { a_threshold: 20, b_threshold: 50 };
|
||||
|
||||
// Extract values and ensure they are valid numbers
|
||||
const aThreshold = parseFloat(abcThresholds.a_threshold) || 20;
|
||||
const bThreshold = parseFloat(abcThresholds.b_threshold) || 50;
|
||||
|
||||
// First, create and populate the rankings table with an index
|
||||
await connection.query('DROP TABLE IF EXISTS temp_revenue_ranks');
|
||||
await connection.query(`
|
||||
CREATE TEMPORARY TABLE temp_revenue_ranks (
|
||||
pid BIGINT NOT NULL,
|
||||
total_revenue DECIMAL(10,3),
|
||||
rank_num INT,
|
||||
dense_rank_num INT,
|
||||
percentile DECIMAL(5,2),
|
||||
total_count INT,
|
||||
PRIMARY KEY (pid)
|
||||
)
|
||||
`);
|
||||
await connection.query('CREATE INDEX ON temp_revenue_ranks (rank_num)');
|
||||
await connection.query('CREATE INDEX ON temp_revenue_ranks (dense_rank_num)');
|
||||
await connection.query('CREATE INDEX ON temp_revenue_ranks (percentile)');
|
||||
|
||||
// Calculate rankings with proper tie handling
|
||||
await connection.query(`
|
||||
INSERT INTO temp_revenue_ranks
|
||||
WITH revenue_data AS (
|
||||
SELECT
|
||||
pid,
|
||||
total_revenue,
|
||||
COUNT(*) OVER () as total_count,
|
||||
PERCENT_RANK() OVER (ORDER BY total_revenue DESC) * 100 as percentile,
|
||||
RANK() OVER (ORDER BY total_revenue DESC) as rank_num,
|
||||
DENSE_RANK() OVER (ORDER BY total_revenue DESC) as dense_rank_num
|
||||
FROM product_metrics
|
||||
WHERE total_revenue > 0
|
||||
)
|
||||
SELECT
|
||||
pid,
|
||||
total_revenue,
|
||||
rank_num,
|
||||
dense_rank_num,
|
||||
percentile,
|
||||
total_count
|
||||
FROM revenue_data
|
||||
`);
|
||||
|
||||
// Get total count for percentage calculation
|
||||
const rankingCount = await connection.query('SELECT MAX(rank_num) as total_count FROM temp_revenue_ranks');
|
||||
const totalCount = parseInt(rankingCount.rows[0].total_count) || 1;
|
||||
|
||||
// Process updates in batches
|
||||
let abcProcessedCount = 0;
|
||||
const batchSize = 5000;
|
||||
const maxPid = await connection.query('SELECT MAX(pid) as max_pid FROM products');
|
||||
const maxProductId = parseInt(maxPid.rows[0].max_pid);
|
||||
|
||||
while (abcProcessedCount < maxProductId) {
|
||||
if (isCancelled) return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
|
||||
// Get a batch of PIDs that need updating
|
||||
const pids = await connection.query(`
|
||||
SELECT pm.pid
|
||||
FROM product_metrics pm
|
||||
LEFT JOIN temp_revenue_ranks tr ON pm.pid = tr.pid
|
||||
WHERE pm.pid > $1
|
||||
AND (pm.abc_class IS NULL
|
||||
OR pm.abc_class !=
|
||||
CASE
|
||||
WHEN tr.pid IS NULL THEN 'C'
|
||||
WHEN tr.percentile <= ${aThreshold} THEN 'A'
|
||||
WHEN tr.percentile <= ${bThreshold} THEN 'B'
|
||||
ELSE 'C'
|
||||
END)
|
||||
ORDER BY pm.pid
|
||||
LIMIT $2
|
||||
`, [abcProcessedCount, batchSize]);
|
||||
|
||||
if (pids.rows.length === 0) break;
|
||||
|
||||
const pidValues = pids.rows.map(row => row.pid);
|
||||
|
||||
await connection.query(`
|
||||
UPDATE product_metrics pm
|
||||
SET abc_class =
|
||||
CASE
|
||||
WHEN tr.pid IS NULL THEN 'C'
|
||||
WHEN tr.percentile <= ${aThreshold} THEN 'A'
|
||||
WHEN tr.percentile <= ${bThreshold} THEN 'B'
|
||||
ELSE 'C'
|
||||
END,
|
||||
last_calculated_at = NOW()
|
||||
FROM (SELECT pid, percentile FROM temp_revenue_ranks) tr
|
||||
WHERE pm.pid = tr.pid AND pm.pid = ANY($1::BIGINT[])
|
||||
OR (pm.pid = ANY($1::BIGINT[]) AND tr.pid IS NULL)
|
||||
`, [pidValues]);
|
||||
|
||||
// Now update turnover rate with proper handling of zero inventory periods
|
||||
await connection.query(`
|
||||
UPDATE product_metrics pm
|
||||
SET
|
||||
turnover_rate = CASE
|
||||
WHEN sales.avg_nonzero_stock > 0 AND sales.active_days > 0
|
||||
THEN LEAST(
|
||||
(sales.total_sold / sales.avg_nonzero_stock) * (365.0 / sales.active_days),
|
||||
999.99
|
||||
)
|
||||
ELSE 0
|
||||
END,
|
||||
last_calculated_at = NOW()
|
||||
FROM (
|
||||
SELECT
|
||||
o.pid,
|
||||
SUM(o.quantity) as total_sold,
|
||||
COUNT(DISTINCT DATE(o.date)) as active_days,
|
||||
AVG(CASE
|
||||
WHEN p.stock_quantity > 0 THEN p.stock_quantity
|
||||
ELSE NULL
|
||||
END) as avg_nonzero_stock
|
||||
FROM orders o
|
||||
JOIN products p ON o.pid = p.pid
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '90 days'
|
||||
AND o.pid = ANY($1::BIGINT[])
|
||||
GROUP BY o.pid
|
||||
) sales
|
||||
WHERE pm.pid = sales.pid
|
||||
`, [pidValues]);
|
||||
|
||||
abcProcessedCount = pids.rows[pids.rows.length - 1].pid;
|
||||
|
||||
// Calculate progress proportionally to total products
|
||||
processedCount = Math.floor(totalProducts * (0.60 + (abcProcessedCount / maxProductId) * 0.2));
|
||||
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'ABC classification progress',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// If we get here, everything completed successfully
|
||||
success = true;
|
||||
|
||||
// Update calculate_status
|
||||
await connection.query(`
|
||||
INSERT INTO calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES ('product_metrics', NOW())
|
||||
ON CONFLICT (module_name) DO UPDATE
|
||||
SET last_calculation_timestamp = NOW()
|
||||
`);
|
||||
|
||||
return {
|
||||
processedProducts: processedCount || 0,
|
||||
processedOrders: processedOrders || 0,
|
||||
processedPurchaseOrders: 0, // This module doesn't process POs
|
||||
success
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
success = false;
|
||||
logError(error, 'Error calculating product metrics');
|
||||
throw error;
|
||||
} finally {
|
||||
// Always clean up temporary tables, even if an error occurred
|
||||
if (connection) {
|
||||
try {
|
||||
await connection.query('DROP TABLE IF EXISTS temp_sales_metrics');
|
||||
await connection.query('DROP TABLE IF EXISTS temp_purchase_metrics');
|
||||
} catch (err) {
|
||||
console.error('Error cleaning up temporary tables:', err);
|
||||
}
|
||||
|
||||
// Make sure to release the connection
|
||||
connection.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function calculateStockStatus(stock, config, daily_sales_avg, weekly_sales_avg, monthly_sales_avg) {
|
||||
if (stock <= 0) {
|
||||
return 'Out of Stock';
|
||||
}
|
||||
|
||||
// Use the most appropriate sales average based on data quality
|
||||
let sales_avg = daily_sales_avg;
|
||||
if (sales_avg === 0) {
|
||||
sales_avg = weekly_sales_avg / 7;
|
||||
}
|
||||
if (sales_avg === 0) {
|
||||
sales_avg = monthly_sales_avg / 30;
|
||||
}
|
||||
|
||||
if (sales_avg === 0) {
|
||||
return stock <= config.low_stock_threshold ? 'Low Stock' : 'In Stock';
|
||||
}
|
||||
|
||||
const days_of_stock = stock / sales_avg;
|
||||
|
||||
if (days_of_stock <= config.critical_days) {
|
||||
return 'Critical';
|
||||
} else if (days_of_stock <= config.reorder_days) {
|
||||
return 'Reorder';
|
||||
} else if (days_of_stock > config.overstock_days) {
|
||||
return 'Overstocked';
|
||||
}
|
||||
|
||||
return 'Healthy';
|
||||
}
|
||||
|
||||
// Note: calculateReorderQuantities function has been removed as its logic has been incorporated
|
||||
// in the main SQL query with configurable parameters
|
||||
|
||||
module.exports = calculateProductMetrics;
|
||||
@@ -32,13 +32,13 @@ async function calculateSalesForecasts(startTime, totalProducts, processedCount
|
||||
}
|
||||
|
||||
// Get order count that will be processed
|
||||
const [orderCount] = await connection.query(`
|
||||
const orderCount = await connection.query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM orders o
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= DATE_SUB(CURRENT_DATE, INTERVAL 90 DAY)
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '90 days'
|
||||
`);
|
||||
processedOrders = orderCount[0].count;
|
||||
processedOrders = parseInt(orderCount.rows[0].count);
|
||||
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
@@ -69,15 +69,15 @@ async function calculateSalesForecasts(startTime, totalProducts, processedCount
|
||||
await connection.query(`
|
||||
INSERT INTO temp_forecast_dates
|
||||
SELECT
|
||||
DATE_ADD(CURRENT_DATE, INTERVAL n DAY) as forecast_date,
|
||||
DAYOFWEEK(DATE_ADD(CURRENT_DATE, INTERVAL n DAY)) as day_of_week,
|
||||
MONTH(DATE_ADD(CURRENT_DATE, INTERVAL n DAY)) as month
|
||||
CURRENT_DATE + (n || ' days')::INTERVAL as forecast_date,
|
||||
EXTRACT(DOW FROM CURRENT_DATE + (n || ' days')::INTERVAL) + 1 as day_of_week,
|
||||
EXTRACT(MONTH FROM CURRENT_DATE + (n || ' days')::INTERVAL) as month
|
||||
FROM (
|
||||
SELECT a.N + b.N * 10 as n
|
||||
SELECT a.n + b.n * 10 as n
|
||||
FROM
|
||||
(SELECT 0 as N UNION SELECT 1 UNION SELECT 2 UNION SELECT 3 UNION SELECT 4 UNION
|
||||
(SELECT 0 as n UNION SELECT 1 UNION SELECT 2 UNION SELECT 3 UNION SELECT 4 UNION
|
||||
SELECT 5 UNION SELECT 6 UNION SELECT 7 UNION SELECT 8 UNION SELECT 9) a,
|
||||
(SELECT 0 as N UNION SELECT 1 UNION SELECT 2) b
|
||||
(SELECT 0 as n UNION SELECT 1 UNION SELECT 2) b
|
||||
ORDER BY n
|
||||
LIMIT 31
|
||||
) numbers
|
||||
@@ -109,17 +109,17 @@ async function calculateSalesForecasts(startTime, totalProducts, processedCount
|
||||
|
||||
// Create temporary table for daily sales stats
|
||||
await connection.query(`
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS temp_daily_sales AS
|
||||
CREATE TEMPORARY TABLE temp_daily_sales AS
|
||||
SELECT
|
||||
o.pid,
|
||||
DAYOFWEEK(o.date) as day_of_week,
|
||||
EXTRACT(DOW FROM o.date) + 1 as day_of_week,
|
||||
SUM(o.quantity) as daily_quantity,
|
||||
SUM(o.price * o.quantity) as daily_revenue,
|
||||
COUNT(DISTINCT DATE(o.date)) as day_count
|
||||
FROM orders o
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= DATE_SUB(CURRENT_DATE, INTERVAL 90 DAY)
|
||||
GROUP BY o.pid, DAYOFWEEK(o.date)
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '90 days'
|
||||
GROUP BY o.pid, EXTRACT(DOW FROM o.date) + 1
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.94);
|
||||
@@ -148,7 +148,7 @@ async function calculateSalesForecasts(startTime, totalProducts, processedCount
|
||||
|
||||
// Create temporary table for product stats
|
||||
await connection.query(`
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS temp_product_stats AS
|
||||
CREATE TEMPORARY TABLE temp_product_stats AS
|
||||
SELECT
|
||||
pid,
|
||||
AVG(daily_revenue) as overall_avg_revenue,
|
||||
@@ -186,10 +186,9 @@ async function calculateSalesForecasts(startTime, totalProducts, processedCount
|
||||
INSERT INTO sales_forecasts (
|
||||
pid,
|
||||
forecast_date,
|
||||
forecast_units,
|
||||
forecast_revenue,
|
||||
forecast_quantity,
|
||||
confidence_level,
|
||||
last_calculated_at
|
||||
created_at
|
||||
)
|
||||
WITH daily_stats AS (
|
||||
SELECT
|
||||
@@ -217,35 +216,9 @@ async function calculateSalesForecasts(startTime, totalProducts, processedCount
|
||||
GREATEST(0,
|
||||
ROUND(
|
||||
ds.avg_daily_qty *
|
||||
(1 + COALESCE(sf.seasonality_factor, 0)) *
|
||||
CASE
|
||||
WHEN ds.std_daily_qty / NULLIF(ds.avg_daily_qty, 0) > 1.5 THEN 0.85
|
||||
WHEN ds.std_daily_qty / NULLIF(ds.avg_daily_qty, 0) > 1.0 THEN 0.9
|
||||
WHEN ds.std_daily_qty / NULLIF(ds.avg_daily_qty, 0) > 0.5 THEN 0.95
|
||||
ELSE 1.0
|
||||
END,
|
||||
2
|
||||
(1 + COALESCE(sf.seasonality_factor, 0))
|
||||
)
|
||||
) as forecast_units,
|
||||
GREATEST(0,
|
||||
ROUND(
|
||||
COALESCE(
|
||||
CASE
|
||||
WHEN ds.data_points >= 4 THEN ds.avg_daily_revenue
|
||||
ELSE ps.overall_avg_revenue
|
||||
END *
|
||||
(1 + COALESCE(sf.seasonality_factor, 0)) *
|
||||
CASE
|
||||
WHEN ds.std_daily_revenue / NULLIF(ds.avg_daily_revenue, 0) > 1.5 THEN 0.85
|
||||
WHEN ds.std_daily_revenue / NULLIF(ds.avg_daily_revenue, 0) > 1.0 THEN 0.9
|
||||
WHEN ds.std_daily_revenue / NULLIF(ds.avg_daily_revenue, 0) > 0.5 THEN 0.95
|
||||
ELSE 1.0
|
||||
END,
|
||||
0
|
||||
),
|
||||
2
|
||||
)
|
||||
) as forecast_revenue,
|
||||
) as forecast_quantity,
|
||||
CASE
|
||||
WHEN ds.total_days >= 60 AND ds.daily_variance_ratio < 0.5 THEN 90
|
||||
WHEN ds.total_days >= 60 THEN 85
|
||||
@@ -255,17 +228,18 @@ async function calculateSalesForecasts(startTime, totalProducts, processedCount
|
||||
WHEN ds.total_days >= 14 THEN 65
|
||||
ELSE 60
|
||||
END as confidence_level,
|
||||
NOW() as last_calculated_at
|
||||
NOW() as created_at
|
||||
FROM daily_stats ds
|
||||
JOIN temp_product_stats ps ON ds.pid = ps.pid
|
||||
CROSS JOIN temp_forecast_dates fd
|
||||
LEFT JOIN sales_seasonality sf ON fd.month = sf.month
|
||||
GROUP BY ds.pid, fd.forecast_date, ps.overall_avg_revenue, sf.seasonality_factor
|
||||
ON DUPLICATE KEY UPDATE
|
||||
forecast_units = VALUES(forecast_units),
|
||||
forecast_revenue = VALUES(forecast_revenue),
|
||||
confidence_level = VALUES(confidence_level),
|
||||
last_calculated_at = NOW()
|
||||
GROUP BY ds.pid, fd.forecast_date, ps.overall_avg_revenue, sf.seasonality_factor,
|
||||
ds.avg_daily_qty, ds.std_daily_qty, ds.avg_daily_qty, ds.total_days, ds.daily_variance_ratio
|
||||
ON CONFLICT (pid, forecast_date) DO UPDATE
|
||||
SET
|
||||
forecast_quantity = EXCLUDED.forecast_quantity,
|
||||
confidence_level = EXCLUDED.confidence_level,
|
||||
created_at = NOW()
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.98);
|
||||
@@ -294,22 +268,22 @@ async function calculateSalesForecasts(startTime, totalProducts, processedCount
|
||||
|
||||
// Create temporary table for category stats
|
||||
await connection.query(`
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS temp_category_sales AS
|
||||
CREATE TEMPORARY TABLE temp_category_sales AS
|
||||
SELECT
|
||||
pc.cat_id,
|
||||
DAYOFWEEK(o.date) as day_of_week,
|
||||
EXTRACT(DOW FROM o.date) + 1 as day_of_week,
|
||||
SUM(o.quantity) as daily_quantity,
|
||||
SUM(o.price * o.quantity) as daily_revenue,
|
||||
COUNT(DISTINCT DATE(o.date)) as day_count
|
||||
FROM orders o
|
||||
JOIN product_categories pc ON o.pid = pc.pid
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= DATE_SUB(CURRENT_DATE, INTERVAL 90 DAY)
|
||||
GROUP BY pc.cat_id, DAYOFWEEK(o.date)
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '90 days'
|
||||
GROUP BY pc.cat_id, EXTRACT(DOW FROM o.date) + 1
|
||||
`);
|
||||
|
||||
await connection.query(`
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS temp_category_stats AS
|
||||
CREATE TEMPORARY TABLE temp_category_stats AS
|
||||
SELECT
|
||||
cat_id,
|
||||
AVG(daily_revenue) as overall_avg_revenue,
|
||||
@@ -350,14 +324,14 @@ async function calculateSalesForecasts(startTime, totalProducts, processedCount
|
||||
forecast_units,
|
||||
forecast_revenue,
|
||||
confidence_level,
|
||||
last_calculated_at
|
||||
created_at
|
||||
)
|
||||
SELECT
|
||||
cs.cat_id as category_id,
|
||||
cs.cat_id::bigint as category_id,
|
||||
fd.forecast_date,
|
||||
GREATEST(0,
|
||||
AVG(cs.daily_quantity) *
|
||||
(1 + COALESCE(sf.seasonality_factor, 0))
|
||||
ROUND(AVG(cs.daily_quantity) *
|
||||
(1 + COALESCE(sf.seasonality_factor, 0)))
|
||||
) as forecast_units,
|
||||
GREATEST(0,
|
||||
COALESCE(
|
||||
@@ -365,8 +339,7 @@ async function calculateSalesForecasts(startTime, totalProducts, processedCount
|
||||
WHEN SUM(cs.day_count) >= 4 THEN AVG(cs.daily_revenue)
|
||||
ELSE ct.overall_avg_revenue
|
||||
END *
|
||||
(1 + COALESCE(sf.seasonality_factor, 0)) *
|
||||
(0.95 + (RAND() * 0.1)),
|
||||
(1 + COALESCE(sf.seasonality_factor, 0)),
|
||||
0
|
||||
)
|
||||
) as forecast_revenue,
|
||||
@@ -376,27 +349,34 @@ async function calculateSalesForecasts(startTime, totalProducts, processedCount
|
||||
WHEN ct.total_days >= 14 THEN 70
|
||||
ELSE 60
|
||||
END as confidence_level,
|
||||
NOW() as last_calculated_at
|
||||
NOW() as created_at
|
||||
FROM temp_category_sales cs
|
||||
JOIN temp_category_stats ct ON cs.cat_id = ct.cat_id
|
||||
CROSS JOIN temp_forecast_dates fd
|
||||
LEFT JOIN sales_seasonality sf ON fd.month = sf.month
|
||||
GROUP BY cs.cat_id, fd.forecast_date, ct.overall_avg_revenue, ct.total_days, sf.seasonality_factor
|
||||
GROUP BY
|
||||
cs.cat_id,
|
||||
fd.forecast_date,
|
||||
ct.overall_avg_revenue,
|
||||
ct.total_days,
|
||||
sf.seasonality_factor,
|
||||
sf.month
|
||||
HAVING AVG(cs.daily_quantity) > 0
|
||||
ON DUPLICATE KEY UPDATE
|
||||
forecast_units = VALUES(forecast_units),
|
||||
forecast_revenue = VALUES(forecast_revenue),
|
||||
confidence_level = VALUES(confidence_level),
|
||||
last_calculated_at = NOW()
|
||||
ON CONFLICT (category_id, forecast_date) DO UPDATE
|
||||
SET
|
||||
forecast_units = EXCLUDED.forecast_units,
|
||||
forecast_revenue = EXCLUDED.forecast_revenue,
|
||||
confidence_level = EXCLUDED.confidence_level,
|
||||
created_at = NOW()
|
||||
`);
|
||||
|
||||
// Clean up temporary tables
|
||||
await connection.query(`
|
||||
DROP TEMPORARY TABLE IF EXISTS temp_forecast_dates;
|
||||
DROP TEMPORARY TABLE IF EXISTS temp_daily_sales;
|
||||
DROP TEMPORARY TABLE IF EXISTS temp_product_stats;
|
||||
DROP TEMPORARY TABLE IF EXISTS temp_category_sales;
|
||||
DROP TEMPORARY TABLE IF EXISTS temp_category_stats;
|
||||
DROP TABLE IF EXISTS temp_forecast_dates;
|
||||
DROP TABLE IF EXISTS temp_daily_sales;
|
||||
DROP TABLE IF EXISTS temp_product_stats;
|
||||
DROP TABLE IF EXISTS temp_category_sales;
|
||||
DROP TABLE IF EXISTS temp_category_stats;
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 1.0);
|
||||
@@ -423,7 +403,8 @@ async function calculateSalesForecasts(startTime, totalProducts, processedCount
|
||||
await connection.query(`
|
||||
INSERT INTO calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES ('sales_forecasts', NOW())
|
||||
ON DUPLICATE KEY UPDATE last_calculation_timestamp = NOW()
|
||||
ON CONFLICT (module_name) DO UPDATE
|
||||
SET last_calculation_timestamp = NOW()
|
||||
`);
|
||||
|
||||
return {
|
||||
@@ -439,6 +420,18 @@ async function calculateSalesForecasts(startTime, totalProducts, processedCount
|
||||
throw error;
|
||||
} finally {
|
||||
if (connection) {
|
||||
try {
|
||||
// Ensure temporary tables are cleaned up
|
||||
await connection.query(`
|
||||
DROP TABLE IF EXISTS temp_forecast_dates;
|
||||
DROP TABLE IF EXISTS temp_daily_sales;
|
||||
DROP TABLE IF EXISTS temp_product_stats;
|
||||
DROP TABLE IF EXISTS temp_category_sales;
|
||||
DROP TABLE IF EXISTS temp_category_stats;
|
||||
`);
|
||||
} catch (err) {
|
||||
console.error('Error cleaning up temporary tables:', err);
|
||||
}
|
||||
connection.release();
|
||||
}
|
||||
}
|
||||
344
inventory-server/old/metrics/time-aggregates.js
Normal file
344
inventory-server/old/metrics/time-aggregates.js
Normal file
@@ -0,0 +1,344 @@
|
||||
const { outputProgress, formatElapsedTime, estimateRemaining, calculateRate, logError } = require('./utils/progress');
|
||||
const { getConnection } = require('./utils/db');
|
||||
|
||||
async function calculateTimeAggregates(startTime, totalProducts, processedCount = 0, isCancelled = false) {
|
||||
const connection = await getConnection();
|
||||
let success = false;
|
||||
let processedOrders = 0;
|
||||
|
||||
try {
|
||||
if (isCancelled) {
|
||||
outputProgress({
|
||||
status: 'cancelled',
|
||||
operation: 'Time aggregates calculation cancelled',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: null,
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders: 0,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
}
|
||||
|
||||
// Get order count that will be processed
|
||||
const orderCount = await connection.query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM orders o
|
||||
WHERE o.canceled = false
|
||||
`);
|
||||
processedOrders = parseInt(orderCount.rows[0].count);
|
||||
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting time aggregates calculation',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// Create a temporary table for end-of-month inventory values
|
||||
await connection.query(`
|
||||
CREATE TEMPORARY TABLE IF NOT EXISTS temp_monthly_inventory AS
|
||||
WITH months AS (
|
||||
-- Generate all year/month combinations for the last 12 months
|
||||
SELECT
|
||||
EXTRACT(YEAR FROM month_date)::INTEGER as year,
|
||||
EXTRACT(MONTH FROM month_date)::INTEGER as month,
|
||||
month_date as start_date,
|
||||
(month_date + INTERVAL '1 month'::interval - INTERVAL '1 day'::interval)::DATE as end_date
|
||||
FROM (
|
||||
SELECT generate_series(
|
||||
DATE_TRUNC('month', CURRENT_DATE - INTERVAL '12 months'::interval)::DATE,
|
||||
DATE_TRUNC('month', CURRENT_DATE)::DATE,
|
||||
INTERVAL '1 month'::interval
|
||||
) as month_date
|
||||
) dates
|
||||
),
|
||||
monthly_inventory_calc AS (
|
||||
SELECT
|
||||
p.pid,
|
||||
m.year,
|
||||
m.month,
|
||||
m.end_date,
|
||||
p.stock_quantity as current_quantity,
|
||||
-- Calculate sold during period (before end_date)
|
||||
COALESCE(SUM(
|
||||
CASE
|
||||
WHEN o.date <= m.end_date THEN o.quantity
|
||||
ELSE 0
|
||||
END
|
||||
), 0) as sold_after_end_date,
|
||||
-- Calculate received during period (before end_date)
|
||||
COALESCE(SUM(
|
||||
CASE
|
||||
WHEN po.received_date <= m.end_date THEN po.received
|
||||
ELSE 0
|
||||
END
|
||||
), 0) as received_after_end_date,
|
||||
p.cost_price
|
||||
FROM
|
||||
products p
|
||||
CROSS JOIN
|
||||
months m
|
||||
LEFT JOIN
|
||||
orders o ON p.pid = o.pid
|
||||
AND o.canceled = false
|
||||
AND o.date > m.end_date
|
||||
AND o.date <= CURRENT_DATE
|
||||
LEFT JOIN
|
||||
purchase_orders po ON p.pid = po.pid
|
||||
AND po.received_date IS NOT NULL
|
||||
AND po.received_date > m.end_date
|
||||
AND po.received_date <= CURRENT_DATE
|
||||
GROUP BY
|
||||
p.pid, m.year, m.month, m.end_date, p.stock_quantity, p.cost_price
|
||||
)
|
||||
SELECT
|
||||
pid,
|
||||
year,
|
||||
month,
|
||||
-- End of month quantity = current quantity - sold after + received after
|
||||
GREATEST(0, current_quantity - sold_after_end_date + received_after_end_date) as end_of_month_quantity,
|
||||
-- End of month inventory value
|
||||
GREATEST(0, current_quantity - sold_after_end_date + received_after_end_date) * cost_price as end_of_month_value,
|
||||
cost_price
|
||||
FROM
|
||||
monthly_inventory_calc
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.40);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Monthly inventory values calculated, processing time aggregates',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
// Initial insert of time-based aggregates
|
||||
await connection.query(`
|
||||
INSERT INTO product_time_aggregates (
|
||||
pid,
|
||||
year,
|
||||
month,
|
||||
total_quantity_sold,
|
||||
total_revenue,
|
||||
total_cost,
|
||||
order_count,
|
||||
stock_received,
|
||||
stock_ordered,
|
||||
avg_price,
|
||||
profit_margin,
|
||||
inventory_value,
|
||||
gmroi
|
||||
)
|
||||
WITH monthly_sales AS (
|
||||
SELECT
|
||||
o.pid,
|
||||
EXTRACT(YEAR FROM o.date::timestamp with time zone)::INTEGER as year,
|
||||
EXTRACT(MONTH FROM o.date::timestamp with time zone)::INTEGER as month,
|
||||
SUM(o.quantity) as total_quantity_sold,
|
||||
SUM((o.price - COALESCE(o.discount, 0)) * o.quantity) as total_revenue,
|
||||
SUM(COALESCE(o.costeach, 0) * o.quantity) as total_cost,
|
||||
COUNT(DISTINCT o.order_number) as order_count,
|
||||
AVG(o.price - COALESCE(o.discount, 0)) as avg_price,
|
||||
CASE
|
||||
WHEN SUM((o.price - COALESCE(o.discount, 0)) * o.quantity) > 0
|
||||
THEN ((SUM((o.price - COALESCE(o.discount, 0)) * o.quantity) - SUM(COALESCE(o.costeach, 0) * o.quantity))
|
||||
/ SUM((o.price - COALESCE(o.discount, 0)) * o.quantity)) * 100
|
||||
ELSE 0
|
||||
END as profit_margin,
|
||||
COUNT(DISTINCT DATE(o.date)) as active_days
|
||||
FROM orders o
|
||||
JOIN products p ON o.pid = p.pid
|
||||
WHERE o.canceled = false
|
||||
GROUP BY o.pid, EXTRACT(YEAR FROM o.date::timestamp with time zone), EXTRACT(MONTH FROM o.date::timestamp with time zone)
|
||||
),
|
||||
monthly_stock AS (
|
||||
SELECT
|
||||
pid,
|
||||
EXTRACT(YEAR FROM date::timestamp with time zone)::INTEGER as year,
|
||||
EXTRACT(MONTH FROM date::timestamp with time zone)::INTEGER as month,
|
||||
SUM(received) as stock_received,
|
||||
SUM(ordered) as stock_ordered
|
||||
FROM purchase_orders
|
||||
GROUP BY pid, EXTRACT(YEAR FROM date::timestamp with time zone), EXTRACT(MONTH FROM date::timestamp with time zone)
|
||||
)
|
||||
SELECT
|
||||
COALESCE(s.pid, ms.pid, mi.pid) as pid,
|
||||
COALESCE(s.year, ms.year, mi.year) as year,
|
||||
COALESCE(s.month, ms.month, mi.month) as month,
|
||||
COALESCE(s.total_quantity_sold, 0)::INTEGER as total_quantity_sold,
|
||||
COALESCE(s.total_revenue, 0)::DECIMAL(10,3) as total_revenue,
|
||||
COALESCE(s.total_cost, 0)::DECIMAL(10,3) as total_cost,
|
||||
COALESCE(s.order_count, 0)::INTEGER as order_count,
|
||||
COALESCE(ms.stock_received, 0)::INTEGER as stock_received,
|
||||
COALESCE(ms.stock_ordered, 0)::INTEGER as stock_ordered,
|
||||
COALESCE(s.avg_price, 0)::DECIMAL(10,3) as avg_price,
|
||||
COALESCE(s.profit_margin, 0)::DECIMAL(10,3) as profit_margin,
|
||||
COALESCE(mi.end_of_month_value, 0)::DECIMAL(10,3) as inventory_value,
|
||||
CASE
|
||||
WHEN COALESCE(mi.end_of_month_value, 0) > 0
|
||||
THEN (COALESCE(s.total_revenue, 0) - COALESCE(s.total_cost, 0))
|
||||
/ NULLIF(COALESCE(mi.end_of_month_value, 0), 0)
|
||||
ELSE 0
|
||||
END::DECIMAL(10,3) as gmroi
|
||||
FROM (
|
||||
SELECT * FROM monthly_sales s
|
||||
UNION ALL
|
||||
SELECT
|
||||
pid,
|
||||
year,
|
||||
month,
|
||||
0 as total_quantity_sold,
|
||||
0 as total_revenue,
|
||||
0 as total_cost,
|
||||
0 as order_count,
|
||||
NULL as avg_price,
|
||||
0 as profit_margin,
|
||||
0 as active_days
|
||||
FROM monthly_stock ms
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM monthly_sales s2
|
||||
WHERE s2.pid = ms.pid
|
||||
AND s2.year = ms.year
|
||||
AND s2.month = ms.month
|
||||
)
|
||||
UNION ALL
|
||||
SELECT
|
||||
pid,
|
||||
year,
|
||||
month,
|
||||
0 as total_quantity_sold,
|
||||
0 as total_revenue,
|
||||
0 as total_cost,
|
||||
0 as order_count,
|
||||
NULL as avg_price,
|
||||
0 as profit_margin,
|
||||
0 as active_days
|
||||
FROM temp_monthly_inventory mi
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM monthly_sales s3
|
||||
WHERE s3.pid = mi.pid
|
||||
AND s3.year = mi.year
|
||||
AND s3.month = mi.month
|
||||
)
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM monthly_stock ms3
|
||||
WHERE ms3.pid = mi.pid
|
||||
AND ms3.year = mi.year
|
||||
AND ms3.month = mi.month
|
||||
)
|
||||
) s
|
||||
LEFT JOIN monthly_stock ms
|
||||
ON s.pid = ms.pid
|
||||
AND s.year = ms.year
|
||||
AND s.month = ms.month
|
||||
LEFT JOIN temp_monthly_inventory mi
|
||||
ON s.pid = mi.pid
|
||||
AND s.year = mi.year
|
||||
AND s.month = mi.month
|
||||
ON CONFLICT (pid, year, month) DO UPDATE
|
||||
SET
|
||||
total_quantity_sold = EXCLUDED.total_quantity_sold,
|
||||
total_revenue = EXCLUDED.total_revenue,
|
||||
total_cost = EXCLUDED.total_cost,
|
||||
order_count = EXCLUDED.order_count,
|
||||
stock_received = EXCLUDED.stock_received,
|
||||
stock_ordered = EXCLUDED.stock_ordered,
|
||||
avg_price = EXCLUDED.avg_price,
|
||||
profit_margin = EXCLUDED.profit_margin,
|
||||
inventory_value = EXCLUDED.inventory_value,
|
||||
gmroi = EXCLUDED.gmroi
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.60);
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Base time aggregates calculated',
|
||||
current: processedCount,
|
||||
total: totalProducts,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
remaining: estimateRemaining(startTime, processedCount, totalProducts),
|
||||
rate: calculateRate(startTime, processedCount),
|
||||
percentage: ((processedCount / totalProducts) * 100).toFixed(1),
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
}
|
||||
});
|
||||
|
||||
if (isCancelled) return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
|
||||
// Clean up temporary tables
|
||||
await connection.query('DROP TABLE IF EXISTS temp_monthly_inventory');
|
||||
|
||||
// If we get here, everything completed successfully
|
||||
success = true;
|
||||
|
||||
// Update calculate_status
|
||||
await connection.query(`
|
||||
INSERT INTO calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES ('time_aggregates', NOW())
|
||||
ON CONFLICT (module_name) DO UPDATE
|
||||
SET last_calculation_timestamp = NOW()
|
||||
`);
|
||||
|
||||
return {
|
||||
processedProducts: processedCount,
|
||||
processedOrders,
|
||||
processedPurchaseOrders: 0,
|
||||
success
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
success = false;
|
||||
logError(error, 'Error calculating time aggregates');
|
||||
throw error;
|
||||
} finally {
|
||||
if (connection) {
|
||||
try {
|
||||
// Ensure temporary tables are cleaned up
|
||||
await connection.query('DROP TABLE IF EXISTS temp_monthly_inventory');
|
||||
} catch (err) {
|
||||
console.error('Error cleaning up temporary tables:', err);
|
||||
}
|
||||
connection.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = calculateTimeAggregates;
|
||||
39
inventory-server/old/metrics/utils/db.js
Normal file
39
inventory-server/old/metrics/utils/db.js
Normal file
@@ -0,0 +1,39 @@
|
||||
const { Pool } = require('pg');
|
||||
const path = require('path');
|
||||
require('dotenv').config({ path: path.resolve(__dirname, '../../..', '.env') });
|
||||
|
||||
// Database configuration
|
||||
const dbConfig = {
|
||||
host: process.env.DB_HOST,
|
||||
user: process.env.DB_USER,
|
||||
password: process.env.DB_PASSWORD,
|
||||
database: process.env.DB_NAME,
|
||||
port: process.env.DB_PORT || 5432,
|
||||
ssl: process.env.DB_SSL === 'true',
|
||||
// Add performance optimizations
|
||||
max: 10, // connection pool max size
|
||||
idleTimeoutMillis: 30000,
|
||||
connectionTimeoutMillis: 60000
|
||||
};
|
||||
|
||||
// Create a single pool instance to be reused
|
||||
const pool = new Pool(dbConfig);
|
||||
|
||||
// Add event handlers for pool
|
||||
pool.on('error', (err, client) => {
|
||||
console.error('Unexpected error on idle client', err);
|
||||
});
|
||||
|
||||
async function getConnection() {
|
||||
return await pool.connect();
|
||||
}
|
||||
|
||||
async function closePool() {
|
||||
await pool.end();
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
dbConfig,
|
||||
getConnection,
|
||||
closePool
|
||||
};
|
||||
@@ -33,7 +33,7 @@ async function calculateVendorMetrics(startTime, totalProducts, processedCount =
|
||||
}
|
||||
|
||||
// Get counts of records that will be processed
|
||||
const [[orderCount], [poCount]] = await Promise.all([
|
||||
const [orderCountResult, poCountResult] = await Promise.all([
|
||||
connection.query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM orders o
|
||||
@@ -45,8 +45,8 @@ async function calculateVendorMetrics(startTime, totalProducts, processedCount =
|
||||
WHERE po.status != 0
|
||||
`)
|
||||
]);
|
||||
processedOrders = orderCount.count;
|
||||
processedPurchaseOrders = poCount.count;
|
||||
processedOrders = parseInt(orderCountResult.rows[0].count);
|
||||
processedPurchaseOrders = parseInt(poCountResult.rows[0].count);
|
||||
|
||||
outputProgress({
|
||||
status: 'running',
|
||||
@@ -66,7 +66,7 @@ async function calculateVendorMetrics(startTime, totalProducts, processedCount =
|
||||
|
||||
// First ensure all vendors exist in vendor_details
|
||||
await connection.query(`
|
||||
INSERT IGNORE INTO vendor_details (vendor, status, created_at, updated_at)
|
||||
INSERT INTO vendor_details (vendor, status, created_at, updated_at)
|
||||
SELECT DISTINCT
|
||||
vendor,
|
||||
'active' as status,
|
||||
@@ -74,6 +74,7 @@ async function calculateVendorMetrics(startTime, totalProducts, processedCount =
|
||||
NOW() as updated_at
|
||||
FROM products
|
||||
WHERE vendor IS NOT NULL
|
||||
ON CONFLICT (vendor) DO NOTHING
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.8);
|
||||
@@ -128,7 +129,7 @@ async function calculateVendorMetrics(startTime, totalProducts, processedCount =
|
||||
FROM products p
|
||||
JOIN orders o ON p.pid = o.pid
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= DATE_SUB(CURRENT_DATE, INTERVAL 12 MONTH)
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '12 months'
|
||||
GROUP BY p.vendor
|
||||
),
|
||||
vendor_po AS (
|
||||
@@ -138,12 +139,15 @@ async function calculateVendorMetrics(startTime, totalProducts, processedCount =
|
||||
COUNT(DISTINCT po.id) as total_orders,
|
||||
AVG(CASE
|
||||
WHEN po.receiving_status = 40
|
||||
THEN DATEDIFF(po.received_date, po.date)
|
||||
AND po.received_date IS NOT NULL
|
||||
AND po.date IS NOT NULL
|
||||
THEN EXTRACT(EPOCH FROM (po.received_date::timestamp with time zone - po.date::timestamp with time zone)) / 86400.0
|
||||
ELSE NULL
|
||||
END) as avg_lead_time_days,
|
||||
SUM(po.ordered * po.po_cost_price) as total_purchase_value
|
||||
FROM products p
|
||||
JOIN purchase_orders po ON p.pid = po.pid
|
||||
WHERE po.date >= DATE_SUB(CURRENT_DATE, INTERVAL 12 MONTH)
|
||||
WHERE po.date >= CURRENT_DATE - INTERVAL '12 months'
|
||||
GROUP BY p.vendor
|
||||
),
|
||||
vendor_products AS (
|
||||
@@ -188,20 +192,21 @@ async function calculateVendorMetrics(startTime, totalProducts, processedCount =
|
||||
LEFT JOIN vendor_po vp ON vs.vendor = vp.vendor
|
||||
LEFT JOIN vendor_products vpr ON vs.vendor = vpr.vendor
|
||||
WHERE vs.vendor IS NOT NULL
|
||||
ON DUPLICATE KEY UPDATE
|
||||
total_revenue = VALUES(total_revenue),
|
||||
total_orders = VALUES(total_orders),
|
||||
total_late_orders = VALUES(total_late_orders),
|
||||
avg_lead_time_days = VALUES(avg_lead_time_days),
|
||||
on_time_delivery_rate = VALUES(on_time_delivery_rate),
|
||||
order_fill_rate = VALUES(order_fill_rate),
|
||||
avg_order_value = VALUES(avg_order_value),
|
||||
active_products = VALUES(active_products),
|
||||
total_products = VALUES(total_products),
|
||||
total_purchase_value = VALUES(total_purchase_value),
|
||||
avg_margin_percent = VALUES(avg_margin_percent),
|
||||
status = VALUES(status),
|
||||
last_calculated_at = VALUES(last_calculated_at)
|
||||
ON CONFLICT (vendor) DO UPDATE
|
||||
SET
|
||||
total_revenue = EXCLUDED.total_revenue,
|
||||
total_orders = EXCLUDED.total_orders,
|
||||
total_late_orders = EXCLUDED.total_late_orders,
|
||||
avg_lead_time_days = EXCLUDED.avg_lead_time_days,
|
||||
on_time_delivery_rate = EXCLUDED.on_time_delivery_rate,
|
||||
order_fill_rate = EXCLUDED.order_fill_rate,
|
||||
avg_order_value = EXCLUDED.avg_order_value,
|
||||
active_products = EXCLUDED.active_products,
|
||||
total_products = EXCLUDED.total_products,
|
||||
total_purchase_value = EXCLUDED.total_purchase_value,
|
||||
avg_margin_percent = EXCLUDED.avg_margin_percent,
|
||||
status = EXCLUDED.status,
|
||||
last_calculated_at = EXCLUDED.last_calculated_at
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.9);
|
||||
@@ -244,23 +249,23 @@ async function calculateVendorMetrics(startTime, totalProducts, processedCount =
|
||||
WITH monthly_orders AS (
|
||||
SELECT
|
||||
p.vendor,
|
||||
YEAR(o.date) as year,
|
||||
MONTH(o.date) as month,
|
||||
EXTRACT(YEAR FROM o.date::timestamp with time zone) as year,
|
||||
EXTRACT(MONTH FROM o.date::timestamp with time zone) as month,
|
||||
COUNT(DISTINCT o.id) as total_orders,
|
||||
SUM(o.quantity * o.price) as total_revenue,
|
||||
SUM(o.quantity * (o.price - p.cost_price)) as total_margin
|
||||
FROM products p
|
||||
JOIN orders o ON p.pid = o.pid
|
||||
WHERE o.canceled = false
|
||||
AND o.date >= DATE_SUB(CURRENT_DATE, INTERVAL 12 MONTH)
|
||||
AND o.date >= CURRENT_DATE - INTERVAL '12 months'
|
||||
AND p.vendor IS NOT NULL
|
||||
GROUP BY p.vendor, YEAR(o.date), MONTH(o.date)
|
||||
GROUP BY p.vendor, EXTRACT(YEAR FROM o.date::timestamp with time zone), EXTRACT(MONTH FROM o.date::timestamp with time zone)
|
||||
),
|
||||
monthly_po AS (
|
||||
SELECT
|
||||
p.vendor,
|
||||
YEAR(po.date) as year,
|
||||
MONTH(po.date) as month,
|
||||
EXTRACT(YEAR FROM po.date::timestamp with time zone) as year,
|
||||
EXTRACT(MONTH FROM po.date::timestamp with time zone) as month,
|
||||
COUNT(DISTINCT po.id) as total_po,
|
||||
COUNT(DISTINCT CASE
|
||||
WHEN po.receiving_status = 40 AND po.received_date > po.expected_date
|
||||
@@ -268,14 +273,17 @@ async function calculateVendorMetrics(startTime, totalProducts, processedCount =
|
||||
END) as late_orders,
|
||||
AVG(CASE
|
||||
WHEN po.receiving_status = 40
|
||||
THEN DATEDIFF(po.received_date, po.date)
|
||||
AND po.received_date IS NOT NULL
|
||||
AND po.date IS NOT NULL
|
||||
THEN EXTRACT(EPOCH FROM (po.received_date::timestamp with time zone - po.date::timestamp with time zone)) / 86400.0
|
||||
ELSE NULL
|
||||
END) as avg_lead_time_days,
|
||||
SUM(po.ordered * po.po_cost_price) as total_purchase_value
|
||||
FROM products p
|
||||
JOIN purchase_orders po ON p.pid = po.pid
|
||||
WHERE po.date >= DATE_SUB(CURRENT_DATE, INTERVAL 12 MONTH)
|
||||
WHERE po.date >= CURRENT_DATE - INTERVAL '12 months'
|
||||
AND p.vendor IS NOT NULL
|
||||
GROUP BY p.vendor, YEAR(po.date), MONTH(po.date)
|
||||
GROUP BY p.vendor, EXTRACT(YEAR FROM po.date::timestamp with time zone), EXTRACT(MONTH FROM po.date::timestamp with time zone)
|
||||
)
|
||||
SELECT
|
||||
mo.vendor,
|
||||
@@ -311,13 +319,14 @@ async function calculateVendorMetrics(startTime, totalProducts, processedCount =
|
||||
AND mp.year = mo.year
|
||||
AND mp.month = mo.month
|
||||
WHERE mo.vendor IS NULL
|
||||
ON DUPLICATE KEY UPDATE
|
||||
total_orders = VALUES(total_orders),
|
||||
late_orders = VALUES(late_orders),
|
||||
avg_lead_time_days = VALUES(avg_lead_time_days),
|
||||
total_purchase_value = VALUES(total_purchase_value),
|
||||
total_revenue = VALUES(total_revenue),
|
||||
avg_margin_percent = VALUES(avg_margin_percent)
|
||||
ON CONFLICT (vendor, year, month) DO UPDATE
|
||||
SET
|
||||
total_orders = EXCLUDED.total_orders,
|
||||
late_orders = EXCLUDED.late_orders,
|
||||
avg_lead_time_days = EXCLUDED.avg_lead_time_days,
|
||||
total_purchase_value = EXCLUDED.total_purchase_value,
|
||||
total_revenue = EXCLUDED.total_revenue,
|
||||
avg_margin_percent = EXCLUDED.avg_margin_percent
|
||||
`);
|
||||
|
||||
processedCount = Math.floor(totalProducts * 0.95);
|
||||
@@ -344,7 +353,8 @@ async function calculateVendorMetrics(startTime, totalProducts, processedCount =
|
||||
await connection.query(`
|
||||
INSERT INTO calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES ('vendor_metrics', NOW())
|
||||
ON DUPLICATE KEY UPDATE last_calculation_timestamp = NOW()
|
||||
ON CONFLICT (module_name) DO UPDATE
|
||||
SET last_calculation_timestamp = NOW()
|
||||
`);
|
||||
|
||||
return {
|
||||
677
inventory-server/old/populate-initial-metrics.js
Normal file
677
inventory-server/old/populate-initial-metrics.js
Normal file
@@ -0,0 +1,677 @@
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const os = require('os'); // For detecting CPU cores
|
||||
|
||||
// Get the base directory (the directory containing the inventory-server folder)
|
||||
const baseDir = path.resolve(__dirname, '../../..');
|
||||
|
||||
// Load environment variables from the inventory-server directory
|
||||
require('dotenv').config({ path: path.resolve(__dirname, '../..', '.env') });
|
||||
|
||||
// Configure statement timeout (30 minutes)
|
||||
const PG_STATEMENT_TIMEOUT_MS = 1800000;
|
||||
|
||||
// Add error handler for uncaught exceptions
|
||||
process.on('uncaughtException', (error) => {
|
||||
console.error('Uncaught Exception:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// Add error handler for unhandled promise rejections
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// Load progress module
|
||||
const progress = require('../scripts/metrics-new/utils/progress');
|
||||
|
||||
// Store progress functions in global scope to ensure availability
|
||||
global.formatElapsedTime = progress.formatElapsedTime;
|
||||
global.estimateRemaining = progress.estimateRemaining;
|
||||
global.calculateRate = progress.calculateRate;
|
||||
global.outputProgress = progress.outputProgress;
|
||||
global.clearProgress = progress.clearProgress;
|
||||
global.getProgress = progress.getProgress;
|
||||
global.logError = progress.logError;
|
||||
|
||||
// Load database module
|
||||
const { getConnection, closePool } = require('../scripts/metrics-new/utils/db');
|
||||
|
||||
// Add cancel handler
|
||||
let isCancelled = false;
|
||||
let runningQueryPromise = null;
|
||||
|
||||
function cancelCalculation() {
|
||||
if (!isCancelled) {
|
||||
isCancelled = true;
|
||||
console.log('Calculation has been cancelled by user');
|
||||
|
||||
// Store the query promise to potentially cancel it
|
||||
const queryToCancel = runningQueryPromise;
|
||||
if (queryToCancel) {
|
||||
console.log('Attempting to cancel the running query...');
|
||||
}
|
||||
|
||||
// Force-terminate any query that's been running for more than 5 seconds
|
||||
try {
|
||||
const connection = getConnection();
|
||||
connection.then(async (conn) => {
|
||||
try {
|
||||
// Identify and terminate long-running queries from our application
|
||||
await conn.query(`
|
||||
SELECT pg_cancel_backend(pid)
|
||||
FROM pg_stat_activity
|
||||
WHERE query_start < now() - interval '5 seconds'
|
||||
AND application_name = 'populate_metrics'
|
||||
AND query NOT LIKE '%pg_cancel_backend%'
|
||||
`);
|
||||
|
||||
// Release connection
|
||||
conn.release();
|
||||
} catch (err) {
|
||||
console.error('Error during force cancellation:', err);
|
||||
conn.release();
|
||||
}
|
||||
}).catch(err => {
|
||||
console.error('Could not get connection for cancellation:', err);
|
||||
});
|
||||
} catch (err) {
|
||||
console.error('Failed to terminate running queries:', err);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: 'Calculation has been cancelled'
|
||||
};
|
||||
}
|
||||
|
||||
// Handle SIGTERM signal for cancellation
|
||||
process.on('SIGTERM', cancelCalculation);
|
||||
process.on('SIGINT', cancelCalculation);
|
||||
|
||||
const calculateInitialMetrics = (client, onProgress) => {
|
||||
return client.query(`
|
||||
-- Truncate the existing metrics tables to ensure clean data
|
||||
TRUNCATE TABLE public.daily_product_snapshots;
|
||||
TRUNCATE TABLE public.product_metrics;
|
||||
|
||||
-- First let's create daily snapshots for all products with order activity
|
||||
WITH SalesData AS (
|
||||
SELECT
|
||||
p.pid,
|
||||
p.sku,
|
||||
o.date::date AS order_date,
|
||||
-- Count orders to ensure we only include products with real activity
|
||||
COUNT(o.id) as order_count,
|
||||
-- Aggregate Sales (Quantity > 0, Status not Canceled/Returned)
|
||||
COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN o.quantity ELSE 0 END), 0) AS units_sold,
|
||||
COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN o.price * o.quantity ELSE 0 END), 0.00) AS gross_revenue_unadjusted,
|
||||
COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN o.discount ELSE 0 END), 0.00) AS discounts,
|
||||
COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN COALESCE(o.costeach, p.landing_cost_price, p.cost_price) * o.quantity ELSE 0 END), 0.00) AS cogs,
|
||||
COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN p.regular_price * o.quantity ELSE 0 END), 0.00) AS gross_regular_revenue,
|
||||
|
||||
-- Aggregate Returns (Quantity < 0 or Status = Returned)
|
||||
COALESCE(SUM(CASE WHEN o.quantity < 0 OR COALESCE(o.status, 'pending') = 'returned' THEN ABS(o.quantity) ELSE 0 END), 0) AS units_returned,
|
||||
COALESCE(SUM(CASE WHEN o.quantity < 0 OR COALESCE(o.status, 'pending') = 'returned' THEN o.price * ABS(o.quantity) ELSE 0 END), 0.00) AS returns_revenue
|
||||
FROM public.products p
|
||||
LEFT JOIN public.orders o ON p.pid = o.pid
|
||||
GROUP BY p.pid, p.sku, o.date::date
|
||||
HAVING COUNT(o.id) > 0 -- Only include products with actual orders
|
||||
),
|
||||
ReceivingData AS (
|
||||
SELECT
|
||||
r.pid,
|
||||
r.received_date::date AS receiving_date,
|
||||
-- Count receiving documents to ensure we only include products with real activity
|
||||
COUNT(DISTINCT r.receiving_id) as receiving_count,
|
||||
-- Calculate received quantity for this day
|
||||
SUM(r.received_quantity) AS units_received,
|
||||
-- Calculate received cost for this day
|
||||
SUM(r.received_quantity * r.unit_cost) AS cost_received
|
||||
FROM public.receivings r
|
||||
GROUP BY r.pid, r.received_date::date
|
||||
HAVING COUNT(DISTINCT r.receiving_id) > 0 OR SUM(r.received_quantity) > 0
|
||||
),
|
||||
-- Get current stock quantities
|
||||
StockData AS (
|
||||
SELECT
|
||||
p.pid,
|
||||
p.stock_quantity,
|
||||
COALESCE(p.landing_cost_price, p.cost_price, 0.00) as effective_cost_price,
|
||||
COALESCE(p.price, 0.00) as current_price,
|
||||
COALESCE(p.regular_price, 0.00) as current_regular_price
|
||||
FROM public.products p
|
||||
),
|
||||
-- Combine sales and receiving dates to get all activity dates
|
||||
DatePidCombos AS (
|
||||
SELECT DISTINCT pid, order_date AS activity_date FROM SalesData
|
||||
UNION
|
||||
SELECT DISTINCT pid, receiving_date FROM ReceivingData
|
||||
),
|
||||
-- Insert daily snapshots for all product-date combinations
|
||||
SnapshotInsert AS (
|
||||
INSERT INTO public.daily_product_snapshots (
|
||||
snapshot_date,
|
||||
pid,
|
||||
sku,
|
||||
eod_stock_quantity,
|
||||
eod_stock_cost,
|
||||
eod_stock_retail,
|
||||
eod_stock_gross,
|
||||
stockout_flag,
|
||||
units_sold,
|
||||
units_returned,
|
||||
gross_revenue,
|
||||
discounts,
|
||||
returns_revenue,
|
||||
net_revenue,
|
||||
cogs,
|
||||
gross_regular_revenue,
|
||||
profit,
|
||||
units_received,
|
||||
cost_received,
|
||||
calculation_timestamp
|
||||
)
|
||||
SELECT
|
||||
d.activity_date AS snapshot_date,
|
||||
d.pid,
|
||||
p.sku,
|
||||
-- Use current stock as approximation, since historical stock data is not available
|
||||
s.stock_quantity AS eod_stock_quantity,
|
||||
s.stock_quantity * s.effective_cost_price AS eod_stock_cost,
|
||||
s.stock_quantity * s.current_price AS eod_stock_retail,
|
||||
s.stock_quantity * s.current_regular_price AS eod_stock_gross,
|
||||
(s.stock_quantity <= 0) AS stockout_flag,
|
||||
-- Sales metrics
|
||||
COALESCE(sd.units_sold, 0),
|
||||
COALESCE(sd.units_returned, 0),
|
||||
COALESCE(sd.gross_revenue_unadjusted, 0.00),
|
||||
COALESCE(sd.discounts, 0.00),
|
||||
COALESCE(sd.returns_revenue, 0.00),
|
||||
COALESCE(sd.gross_revenue_unadjusted, 0.00) - COALESCE(sd.discounts, 0.00) AS net_revenue,
|
||||
COALESCE(sd.cogs, 0.00),
|
||||
COALESCE(sd.gross_regular_revenue, 0.00),
|
||||
(COALESCE(sd.gross_revenue_unadjusted, 0.00) - COALESCE(sd.discounts, 0.00)) - COALESCE(sd.cogs, 0.00) AS profit,
|
||||
-- Receiving metrics
|
||||
COALESCE(rd.units_received, 0),
|
||||
COALESCE(rd.cost_received, 0.00),
|
||||
now() -- calculation timestamp
|
||||
FROM DatePidCombos d
|
||||
JOIN public.products p ON d.pid = p.pid
|
||||
LEFT JOIN SalesData sd ON d.pid = sd.pid AND d.activity_date = sd.order_date
|
||||
LEFT JOIN ReceivingData rd ON d.pid = rd.pid AND d.activity_date = rd.receiving_date
|
||||
LEFT JOIN StockData s ON d.pid = s.pid
|
||||
RETURNING pid, snapshot_date
|
||||
),
|
||||
-- Now build the aggregated product metrics from the daily snapshots
|
||||
MetricsInsert AS (
|
||||
INSERT INTO public.product_metrics (
|
||||
pid,
|
||||
sku,
|
||||
current_stock_quantity,
|
||||
current_stock_cost,
|
||||
current_stock_retail,
|
||||
current_stock_msrp,
|
||||
is_out_of_stock,
|
||||
total_units_sold,
|
||||
total_units_returned,
|
||||
return_rate,
|
||||
gross_revenue,
|
||||
total_discounts,
|
||||
total_returns,
|
||||
net_revenue,
|
||||
total_cogs,
|
||||
total_gross_revenue,
|
||||
total_profit,
|
||||
profit_margin,
|
||||
avg_daily_units,
|
||||
reorder_point,
|
||||
reorder_alert,
|
||||
days_of_supply,
|
||||
sales_velocity,
|
||||
sales_velocity_score,
|
||||
rank_by_revenue,
|
||||
rank_by_quantity,
|
||||
rank_by_profit,
|
||||
total_received_quantity,
|
||||
total_received_cost,
|
||||
last_sold_date,
|
||||
last_received_date,
|
||||
days_since_last_sale,
|
||||
days_since_last_received,
|
||||
calculation_timestamp
|
||||
)
|
||||
SELECT
|
||||
p.pid,
|
||||
p.sku,
|
||||
p.stock_quantity AS current_stock_quantity,
|
||||
p.stock_quantity * COALESCE(p.landing_cost_price, p.cost_price, 0) AS current_stock_cost,
|
||||
p.stock_quantity * COALESCE(p.price, 0) AS current_stock_retail,
|
||||
p.stock_quantity * COALESCE(p.regular_price, 0) AS current_stock_msrp,
|
||||
(p.stock_quantity <= 0) AS is_out_of_stock,
|
||||
-- Aggregate metrics
|
||||
COALESCE(SUM(ds.units_sold), 0) AS total_units_sold,
|
||||
COALESCE(SUM(ds.units_returned), 0) AS total_units_returned,
|
||||
CASE
|
||||
WHEN COALESCE(SUM(ds.units_sold), 0) > 0
|
||||
THEN COALESCE(SUM(ds.units_returned), 0)::float / NULLIF(COALESCE(SUM(ds.units_sold), 0), 0)
|
||||
ELSE 0
|
||||
END AS return_rate,
|
||||
COALESCE(SUM(ds.gross_revenue), 0) AS gross_revenue,
|
||||
COALESCE(SUM(ds.discounts), 0) AS total_discounts,
|
||||
COALESCE(SUM(ds.returns_revenue), 0) AS total_returns,
|
||||
COALESCE(SUM(ds.net_revenue), 0) AS net_revenue,
|
||||
COALESCE(SUM(ds.cogs), 0) AS total_cogs,
|
||||
COALESCE(SUM(ds.gross_regular_revenue), 0) AS total_gross_revenue,
|
||||
COALESCE(SUM(ds.profit), 0) AS total_profit,
|
||||
CASE
|
||||
WHEN COALESCE(SUM(ds.net_revenue), 0) > 0
|
||||
THEN COALESCE(SUM(ds.profit), 0) / NULLIF(COALESCE(SUM(ds.net_revenue), 0), 0)
|
||||
ELSE 0
|
||||
END AS profit_margin,
|
||||
-- Calculate average daily units
|
||||
COALESCE(AVG(ds.units_sold), 0) AS avg_daily_units,
|
||||
-- Calculate reorder point (simplified, can be enhanced with lead time and safety stock)
|
||||
CEILING(COALESCE(AVG(ds.units_sold) * 14, 0)) AS reorder_point,
|
||||
(p.stock_quantity <= CEILING(COALESCE(AVG(ds.units_sold) * 14, 0))) AS reorder_alert,
|
||||
-- Days of supply based on average daily sales
|
||||
CASE
|
||||
WHEN COALESCE(AVG(ds.units_sold), 0) > 0
|
||||
THEN p.stock_quantity / NULLIF(COALESCE(AVG(ds.units_sold), 0), 0)
|
||||
ELSE NULL
|
||||
END AS days_of_supply,
|
||||
-- Sales velocity (average units sold per day over last 30 days)
|
||||
(SELECT COALESCE(AVG(recent.units_sold), 0)
|
||||
FROM public.daily_product_snapshots recent
|
||||
WHERE recent.pid = p.pid
|
||||
AND recent.snapshot_date >= CURRENT_DATE - INTERVAL '30 days'
|
||||
) AS sales_velocity,
|
||||
-- Placeholder for sales velocity score (can be calculated based on velocity)
|
||||
0 AS sales_velocity_score,
|
||||
-- Will be updated later by ranking procedure
|
||||
0 AS rank_by_revenue,
|
||||
0 AS rank_by_quantity,
|
||||
0 AS rank_by_profit,
|
||||
-- Receiving data
|
||||
COALESCE(SUM(ds.units_received), 0) AS total_received_quantity,
|
||||
COALESCE(SUM(ds.cost_received), 0) AS total_received_cost,
|
||||
-- Date metrics
|
||||
(SELECT MAX(sd.snapshot_date)
|
||||
FROM public.daily_product_snapshots sd
|
||||
WHERE sd.pid = p.pid AND sd.units_sold > 0
|
||||
) AS last_sold_date,
|
||||
(SELECT MAX(rd.snapshot_date)
|
||||
FROM public.daily_product_snapshots rd
|
||||
WHERE rd.pid = p.pid AND rd.units_received > 0
|
||||
) AS last_received_date,
|
||||
-- Calculate days since last sale/received
|
||||
CASE
|
||||
WHEN (SELECT MAX(sd.snapshot_date)
|
||||
FROM public.daily_product_snapshots sd
|
||||
WHERE sd.pid = p.pid AND sd.units_sold > 0) IS NOT NULL
|
||||
THEN (CURRENT_DATE - (SELECT MAX(sd.snapshot_date)
|
||||
FROM public.daily_product_snapshots sd
|
||||
WHERE sd.pid = p.pid AND sd.units_sold > 0))::integer
|
||||
ELSE NULL
|
||||
END AS days_since_last_sale,
|
||||
CASE
|
||||
WHEN (SELECT MAX(rd.snapshot_date)
|
||||
FROM public.daily_product_snapshots rd
|
||||
WHERE rd.pid = p.pid AND rd.units_received > 0) IS NOT NULL
|
||||
THEN (CURRENT_DATE - (SELECT MAX(rd.snapshot_date)
|
||||
FROM public.daily_product_snapshots rd
|
||||
WHERE rd.pid = p.pid AND rd.units_received > 0))::integer
|
||||
ELSE NULL
|
||||
END AS days_since_last_received,
|
||||
now() -- calculation timestamp
|
||||
FROM public.products p
|
||||
LEFT JOIN public.daily_product_snapshots ds ON p.pid = ds.pid
|
||||
GROUP BY p.pid, p.sku, p.stock_quantity, p.landing_cost_price, p.cost_price, p.price, p.regular_price
|
||||
)
|
||||
|
||||
-- Update the calculate_status table
|
||||
INSERT INTO public.calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES
|
||||
('daily_snapshots', now()),
|
||||
('product_metrics', now())
|
||||
ON CONFLICT (module_name) DO UPDATE
|
||||
SET last_calculation_timestamp = now();
|
||||
|
||||
-- Finally, update the ranks for products
|
||||
UPDATE public.product_metrics pm SET
|
||||
rank_by_revenue = rev_ranks.rank
|
||||
FROM (
|
||||
SELECT pid, RANK() OVER (ORDER BY net_revenue DESC) AS rank
|
||||
FROM public.product_metrics
|
||||
WHERE net_revenue > 0
|
||||
) rev_ranks
|
||||
WHERE pm.pid = rev_ranks.pid;
|
||||
|
||||
UPDATE public.product_metrics pm SET
|
||||
rank_by_quantity = qty_ranks.rank
|
||||
FROM (
|
||||
SELECT pid, RANK() OVER (ORDER BY total_units_sold DESC) AS rank
|
||||
FROM public.product_metrics
|
||||
WHERE total_units_sold > 0
|
||||
) qty_ranks
|
||||
WHERE pm.pid = qty_ranks.pid;
|
||||
|
||||
UPDATE public.product_metrics pm SET
|
||||
rank_by_profit = profit_ranks.rank
|
||||
FROM (
|
||||
SELECT pid, RANK() OVER (ORDER BY total_profit DESC) AS rank
|
||||
FROM public.product_metrics
|
||||
WHERE total_profit > 0
|
||||
) profit_ranks
|
||||
WHERE pm.pid = profit_ranks.pid;
|
||||
|
||||
-- Return count of products with metrics
|
||||
SELECT COUNT(*) AS product_count FROM public.product_metrics
|
||||
`);
|
||||
};
|
||||
|
||||
async function populateInitialMetrics() {
|
||||
let connection;
|
||||
const startTime = Date.now();
|
||||
let calculateHistoryId;
|
||||
|
||||
try {
|
||||
// Clean up any previously running calculations
|
||||
connection = await getConnection({
|
||||
// Add performance-related settings
|
||||
application_name: 'populate_metrics',
|
||||
statement_timeout: PG_STATEMENT_TIMEOUT_MS, // 30 min timeout per statement
|
||||
});
|
||||
|
||||
// Ensure the calculate_status table exists and has the correct structure
|
||||
await connection.query(`
|
||||
CREATE TABLE IF NOT EXISTS calculate_status (
|
||||
module_name TEXT PRIMARY KEY,
|
||||
last_calculation_timestamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
`);
|
||||
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
status = 'cancelled',
|
||||
end_time = NOW(),
|
||||
duration_seconds = EXTRACT(EPOCH FROM (NOW() - start_time))::INTEGER,
|
||||
error_message = 'Previous calculation was not completed properly'
|
||||
WHERE status = 'running' AND additional_info->>'type' = 'populate_initial_metrics'
|
||||
`);
|
||||
|
||||
// Create history record for this calculation
|
||||
const historyResult = await connection.query(`
|
||||
INSERT INTO calculate_history (
|
||||
start_time,
|
||||
status,
|
||||
additional_info
|
||||
) VALUES (
|
||||
NOW(),
|
||||
'running',
|
||||
jsonb_build_object(
|
||||
'type', 'populate_initial_metrics',
|
||||
'sql_file', 'populate_initial_product_metrics.sql'
|
||||
)
|
||||
) RETURNING id
|
||||
`);
|
||||
calculateHistoryId = historyResult.rows[0].id;
|
||||
|
||||
// Initialize progress
|
||||
global.outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Starting initial product metrics population',
|
||||
current: 0,
|
||||
total: 100,
|
||||
elapsed: '0s',
|
||||
remaining: 'Calculating... (this may take a while)',
|
||||
rate: 0,
|
||||
percentage: '0',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
},
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
|
||||
// Prepare the database - analyze tables
|
||||
global.outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Analyzing database tables for better query performance',
|
||||
current: 2,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: 'Analyzing...',
|
||||
rate: 0,
|
||||
percentage: '2',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
},
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
|
||||
// Enable better query planning and parallel operations
|
||||
await connection.query(`
|
||||
-- Analyze tables for better query planning
|
||||
ANALYZE public.products;
|
||||
ANALYZE public.purchase_orders;
|
||||
ANALYZE public.daily_product_snapshots;
|
||||
ANALYZE public.orders;
|
||||
|
||||
-- Enable parallel operations
|
||||
SET LOCAL enable_parallel_append = on;
|
||||
SET LOCAL enable_parallel_hash = on;
|
||||
SET LOCAL max_parallel_workers_per_gather = 4;
|
||||
|
||||
-- Larger work memory for complex sorts/joins
|
||||
SET LOCAL work_mem = '128MB';
|
||||
`).catch(err => {
|
||||
// Non-fatal if analyze fails
|
||||
console.warn('Failed to analyze tables (non-fatal):', err.message);
|
||||
});
|
||||
|
||||
// Execute the SQL query
|
||||
global.outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Executing initial metrics SQL query',
|
||||
current: 5,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: 'Calculating... (this could take several hours with 150M+ records)',
|
||||
rate: 0,
|
||||
percentage: '5',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
},
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
|
||||
// Read the SQL file
|
||||
const sqlFilePath = path.resolve(__dirname, 'populate_initial_product_metrics.sql');
|
||||
console.log('Base directory:', baseDir);
|
||||
console.log('Script directory:', __dirname);
|
||||
console.log('SQL file path:', sqlFilePath);
|
||||
console.log('Current working directory:', process.cwd());
|
||||
|
||||
if (!fs.existsSync(sqlFilePath)) {
|
||||
throw new Error(`SQL file not found at ${sqlFilePath}`);
|
||||
}
|
||||
|
||||
// Read and clean up the SQL (Slightly more robust cleaning)
|
||||
const sqlQuery = fs.readFileSync(sqlFilePath, 'utf8')
|
||||
.replace(/\r\n/g, '\n') // Handle Windows endings
|
||||
.replace(/\r/g, '\n') // Handle old Mac endings
|
||||
.trim(); // Remove leading/trailing whitespace VERY IMPORTANT
|
||||
|
||||
// Log details again AFTER cleaning
|
||||
console.log('SQL Query length (cleaned):', sqlQuery.length);
|
||||
console.log('SQL Query structure validation:');
|
||||
console.log('- Contains DO block:', sqlQuery.includes('DO $$') || sqlQuery.includes('DO $')); // Check both types of tag start
|
||||
console.log('- Contains BEGIN:', sqlQuery.includes('BEGIN'));
|
||||
console.log('- Contains END:', sqlQuery.includes('END $$;') || sqlQuery.includes('END $')); // Check both types of tag end
|
||||
console.log('- First 50 chars:', JSON.stringify(sqlQuery.slice(0, 50)));
|
||||
console.log('- Last 100 chars (cleaned):', JSON.stringify(sqlQuery.slice(-100)));
|
||||
|
||||
// Final check to ensure clean SQL ending
|
||||
if (!sqlQuery.endsWith('END $$;')) {
|
||||
console.warn('WARNING: SQL does not end with "END $$;". This might cause issues.');
|
||||
console.log('Exact ending:', JSON.stringify(sqlQuery.slice(-20)));
|
||||
}
|
||||
|
||||
// Execute the script
|
||||
console.log('Starting initial product metrics population...');
|
||||
|
||||
// Track the query promise for potential cancellation
|
||||
runningQueryPromise = connection.query({
|
||||
text: sqlQuery,
|
||||
rowMode: 'array'
|
||||
});
|
||||
await runningQueryPromise;
|
||||
runningQueryPromise = null;
|
||||
|
||||
// Update progress to 100%
|
||||
global.outputProgress({
|
||||
status: 'complete',
|
||||
operation: 'Initial product metrics population complete',
|
||||
current: 100,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: '0s',
|
||||
rate: 0,
|
||||
percentage: '100',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: Math.round((Date.now() - startTime) / 1000)
|
||||
},
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
|
||||
// Update history with completion
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1,
|
||||
status = 'completed'
|
||||
WHERE id = $2
|
||||
`, [Math.round((Date.now() - startTime) / 1000), calculateHistoryId]);
|
||||
|
||||
// Clear progress file on successful completion
|
||||
global.clearProgress();
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: 'Initial product metrics population completed successfully',
|
||||
duration: Math.round((Date.now() - startTime) / 1000)
|
||||
};
|
||||
} catch (error) {
|
||||
const endTime = Date.now();
|
||||
const totalElapsedSeconds = Math.round((endTime - startTime) / 1000);
|
||||
|
||||
// Enhanced error logging
|
||||
console.error('Error details:', {
|
||||
message: error.message,
|
||||
code: error.code,
|
||||
hint: error.hint,
|
||||
position: error.position,
|
||||
detail: error.detail,
|
||||
where: error.where ? error.where.substring(0, 500) + '...' : undefined, // Truncate to avoid huge logs
|
||||
severity: error.severity,
|
||||
file: error.file,
|
||||
line: error.line,
|
||||
routine: error.routine
|
||||
});
|
||||
|
||||
// Update history with error
|
||||
if (connection && calculateHistoryId) {
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1,
|
||||
status = $2,
|
||||
error_message = $3
|
||||
WHERE id = $4
|
||||
`, [
|
||||
totalElapsedSeconds,
|
||||
isCancelled ? 'cancelled' : 'failed',
|
||||
error.message,
|
||||
calculateHistoryId
|
||||
]);
|
||||
}
|
||||
|
||||
if (isCancelled) {
|
||||
global.outputProgress({
|
||||
status: 'cancelled',
|
||||
operation: 'Calculation cancelled',
|
||||
current: 50,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: null,
|
||||
rate: 0,
|
||||
percentage: '50',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: totalElapsedSeconds
|
||||
},
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
} else {
|
||||
global.outputProgress({
|
||||
status: 'error',
|
||||
operation: 'Error during initial product metrics population',
|
||||
message: error.message,
|
||||
current: 0,
|
||||
total: 100,
|
||||
elapsed: global.formatElapsedTime(startTime),
|
||||
remaining: null,
|
||||
rate: 0,
|
||||
percentage: '0',
|
||||
timing: {
|
||||
start_time: new Date(startTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: totalElapsedSeconds
|
||||
},
|
||||
historyId: calculateHistoryId
|
||||
});
|
||||
}
|
||||
|
||||
console.error('Error during initial product metrics population:', error);
|
||||
return {
|
||||
success: false,
|
||||
error: error.message,
|
||||
duration: totalElapsedSeconds
|
||||
};
|
||||
} finally {
|
||||
if (connection) {
|
||||
connection.release();
|
||||
}
|
||||
await closePool();
|
||||
}
|
||||
}
|
||||
|
||||
// Start population process
|
||||
populateInitialMetrics()
|
||||
.then(result => {
|
||||
if (result.success) {
|
||||
console.log(`Initial product metrics population completed successfully in ${result.duration} seconds`);
|
||||
process.exit(0);
|
||||
} else {
|
||||
console.error(`Initial product metrics population failed: ${result.error}`);
|
||||
process.exit(1);
|
||||
}
|
||||
})
|
||||
.catch(err => {
|
||||
console.error('Unexpected error:', err);
|
||||
process.exit(1);
|
||||
});
|
||||
428
inventory-server/old/psql-csv-import.sh
Executable file
428
inventory-server/old/psql-csv-import.sh
Executable file
@@ -0,0 +1,428 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Simple script to import CSV to PostgreSQL using psql
|
||||
# Usage: ./psql-csv-import.sh <csv-file> <table-name> [start-batch]
|
||||
|
||||
# Exit on error
|
||||
set -e
|
||||
|
||||
# Get arguments
|
||||
CSV_FILE=$1
|
||||
TABLE_NAME=$2
|
||||
BATCH_SIZE=500000 # Process 500,000 rows at a time
|
||||
START_BATCH=${3:-1} # Optional third parameter to start from a specific batch
|
||||
|
||||
if [ -z "$CSV_FILE" ] || [ -z "$TABLE_NAME" ]; then
|
||||
echo "Usage: ./psql-csv-import.sh <csv-file> <table-name> [start-batch]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if file exists (only needed for batch 1)
|
||||
if [ "$START_BATCH" -eq 1 ] && [ ! -f "$CSV_FILE" ]; then
|
||||
echo "Error: CSV file '$CSV_FILE' not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Load environment variables
|
||||
if [ -f "../.env" ]; then
|
||||
source "../.env"
|
||||
else
|
||||
echo "Warning: .env file not found, using default connection parameters"
|
||||
fi
|
||||
|
||||
# Set default connection parameters if not from .env
|
||||
DB_HOST=${DB_HOST:-localhost}
|
||||
DB_PORT=${DB_PORT:-5432}
|
||||
DB_NAME=${DB_NAME:-inventory_db}
|
||||
DB_USER=${DB_USER:-postgres}
|
||||
export PGPASSWORD=${DB_PASSWORD:-} # Export password for psql
|
||||
|
||||
# Common psql parameters
|
||||
PSQL_OPTS="-h $DB_HOST -p $DB_PORT -U $DB_USER -d $DB_NAME"
|
||||
|
||||
# Function to clean up database state
|
||||
cleanup_and_optimize() {
|
||||
echo "Cleaning up and optimizing database state..."
|
||||
|
||||
# Analyze the target table to update statistics
|
||||
psql $PSQL_OPTS -c "ANALYZE $TABLE_NAME;"
|
||||
|
||||
# Perform vacuum to reclaim space and update stats
|
||||
psql $PSQL_OPTS -c "VACUUM $TABLE_NAME;"
|
||||
|
||||
# Reset connection pool
|
||||
psql $PSQL_OPTS -c "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = current_database() AND pid <> pg_backend_pid();"
|
||||
|
||||
# Clean up shared memory
|
||||
psql $PSQL_OPTS -c "DISCARD ALL;"
|
||||
|
||||
echo "Optimization complete."
|
||||
}
|
||||
|
||||
# Show connection info
|
||||
echo "Importing $CSV_FILE into $TABLE_NAME"
|
||||
echo "Database: $DB_NAME on $DB_HOST:$DB_PORT with batch size: $BATCH_SIZE starting at batch $START_BATCH"
|
||||
|
||||
# Start timer
|
||||
START_TIME=$(date +%s)
|
||||
|
||||
# Create progress tracking file
|
||||
PROGRESS_FILE="/tmp/import_progress_${TABLE_NAME}.txt"
|
||||
touch "$PROGRESS_FILE"
|
||||
echo "Starting import at $(date), batch $START_BATCH" >> "$PROGRESS_FILE"
|
||||
|
||||
# If we're resuming, run cleanup first
|
||||
if [ "$START_BATCH" -gt 1 ]; then
|
||||
cleanup_and_optimize
|
||||
fi
|
||||
|
||||
# For imported_product_stat_history, use optimized approach with hardcoded column names
|
||||
if [ "$TABLE_NAME" = "imported_product_stat_history" ]; then
|
||||
echo "Using optimized import for $TABLE_NAME"
|
||||
|
||||
# Only drop constraints/indexes and create staging table for batch 1
|
||||
if [ "$START_BATCH" -eq 1 ]; then
|
||||
# Extract CSV header
|
||||
CSV_HEADER=$(head -n 1 "$CSV_FILE")
|
||||
echo "CSV header: $CSV_HEADER"
|
||||
|
||||
# Step 1: Drop constraints and indexes
|
||||
echo "Dropping constraints and indexes..."
|
||||
psql $PSQL_OPTS -c "
|
||||
DO \$\$
|
||||
DECLARE
|
||||
constraint_name TEXT;
|
||||
BEGIN
|
||||
-- Drop primary key constraint if exists
|
||||
SELECT conname INTO constraint_name
|
||||
FROM pg_constraint
|
||||
WHERE conrelid = '$TABLE_NAME'::regclass AND contype = 'p';
|
||||
|
||||
IF FOUND THEN
|
||||
EXECUTE 'ALTER TABLE $TABLE_NAME DROP CONSTRAINT IF EXISTS ' || constraint_name;
|
||||
RAISE NOTICE 'Dropped primary key constraint: %', constraint_name;
|
||||
END IF;
|
||||
END \$\$;
|
||||
"
|
||||
|
||||
# Drop all indexes on the table
|
||||
psql $PSQL_OPTS -c "
|
||||
DO \$\$
|
||||
DECLARE
|
||||
index_name TEXT;
|
||||
index_record RECORD;
|
||||
BEGIN
|
||||
FOR index_record IN
|
||||
SELECT indexname
|
||||
FROM pg_indexes
|
||||
WHERE tablename = '$TABLE_NAME'
|
||||
LOOP
|
||||
EXECUTE 'DROP INDEX IF EXISTS ' || index_record.indexname;
|
||||
RAISE NOTICE 'Dropped index: %', index_record.indexname;
|
||||
END LOOP;
|
||||
END \$\$;
|
||||
"
|
||||
|
||||
# Step 2: Set maintenance_work_mem and disable triggers
|
||||
echo "Setting maintenance_work_mem and disabling triggers..."
|
||||
psql $PSQL_OPTS -c "
|
||||
SET maintenance_work_mem = '1GB';
|
||||
ALTER TABLE $TABLE_NAME DISABLE TRIGGER ALL;
|
||||
"
|
||||
|
||||
# Step 3: Create staging table
|
||||
echo "Creating staging table..."
|
||||
psql $PSQL_OPTS -c "
|
||||
DROP TABLE IF EXISTS staging_import;
|
||||
CREATE UNLOGGED TABLE staging_import (
|
||||
pid TEXT,
|
||||
date TEXT,
|
||||
score TEXT,
|
||||
score2 TEXT,
|
||||
qty_in_baskets TEXT,
|
||||
qty_sold TEXT,
|
||||
notifies_set TEXT,
|
||||
visibility_score TEXT,
|
||||
health_score TEXT,
|
||||
sold_view_score TEXT
|
||||
);
|
||||
|
||||
-- Create an index on staging_import to improve OFFSET performance
|
||||
CREATE INDEX ON staging_import (pid);
|
||||
"
|
||||
|
||||
# Step 4: Import CSV into staging table
|
||||
echo "Importing CSV into staging table..."
|
||||
psql $PSQL_OPTS -c "\copy staging_import FROM '$CSV_FILE' WITH CSV HEADER DELIMITER ','"
|
||||
else
|
||||
echo "Resuming import from batch $START_BATCH - skipping table creation and CSV import"
|
||||
|
||||
# Check if staging table exists
|
||||
STAGING_EXISTS=$(psql $PSQL_OPTS -t -c "SELECT EXISTS(SELECT 1 FROM pg_tables WHERE tablename='staging_import');" | tr -d '[:space:]')
|
||||
|
||||
if [ "$STAGING_EXISTS" != "t" ]; then
|
||||
echo "Error: Staging table 'staging_import' does not exist. Run without batch parameter first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ensure triggers are disabled
|
||||
psql $PSQL_OPTS -c "ALTER TABLE $TABLE_NAME DISABLE TRIGGER ALL;"
|
||||
|
||||
# Optimize PostgreSQL for better performance
|
||||
psql $PSQL_OPTS -c "
|
||||
-- Increase work mem for this session
|
||||
SET work_mem = '256MB';
|
||||
SET maintenance_work_mem = '1GB';
|
||||
"
|
||||
fi
|
||||
|
||||
# Step 5: Get total row count
|
||||
TOTAL_ROWS=$(psql $PSQL_OPTS -t -c "SELECT COUNT(*) FROM staging_import;" | tr -d '[:space:]')
|
||||
echo "Total rows to import: $TOTAL_ROWS"
|
||||
|
||||
# Calculate starting point
|
||||
PROCESSED=$(( ($START_BATCH - 1) * $BATCH_SIZE ))
|
||||
if [ $PROCESSED -ge $TOTAL_ROWS ]; then
|
||||
echo "Error: Start batch $START_BATCH is beyond the available rows ($TOTAL_ROWS)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Step 6: Process in batches with shell loop
|
||||
BATCH_NUM=$(( $START_BATCH - 1 ))
|
||||
|
||||
# We'll process batches in chunks of 10 before cleaning up
|
||||
CHUNKS_SINCE_CLEANUP=0
|
||||
|
||||
while [ $PROCESSED -lt $TOTAL_ROWS ]; do
|
||||
BATCH_NUM=$(( $BATCH_NUM + 1 ))
|
||||
BATCH_START=$(date +%s)
|
||||
MAX_ROWS=$(( $PROCESSED + $BATCH_SIZE ))
|
||||
if [ $MAX_ROWS -gt $TOTAL_ROWS ]; then
|
||||
MAX_ROWS=$TOTAL_ROWS
|
||||
fi
|
||||
|
||||
echo "Processing batch $BATCH_NUM (rows $PROCESSED to $MAX_ROWS)..."
|
||||
|
||||
# Optimize query buffer for this batch
|
||||
psql $PSQL_OPTS -c "SET work_mem = '256MB';"
|
||||
|
||||
# Insert batch with type casts
|
||||
psql $PSQL_OPTS -c "
|
||||
INSERT INTO $TABLE_NAME (
|
||||
pid, date, score, score2, qty_in_baskets, qty_sold,
|
||||
notifies_set, visibility_score, health_score, sold_view_score
|
||||
)
|
||||
SELECT
|
||||
pid::bigint,
|
||||
date::date,
|
||||
score::numeric,
|
||||
score2::numeric,
|
||||
qty_in_baskets::smallint,
|
||||
qty_sold::smallint,
|
||||
notifies_set::smallint,
|
||||
visibility_score::numeric,
|
||||
health_score::varchar,
|
||||
sold_view_score::numeric
|
||||
FROM staging_import
|
||||
LIMIT $BATCH_SIZE
|
||||
OFFSET $PROCESSED;
|
||||
"
|
||||
|
||||
# Update progress
|
||||
BATCH_END=$(date +%s)
|
||||
BATCH_ELAPSED=$(( $BATCH_END - $BATCH_START ))
|
||||
PROGRESS_PCT=$(echo "scale=2; $MAX_ROWS * 100 / $TOTAL_ROWS" | bc)
|
||||
|
||||
echo "Batch $BATCH_NUM committed in ${BATCH_ELAPSED}s, $MAX_ROWS of $TOTAL_ROWS rows processed ($PROGRESS_PCT%)" | tee -a "$PROGRESS_FILE"
|
||||
|
||||
# Increment counter
|
||||
PROCESSED=$(( $PROCESSED + $BATCH_SIZE ))
|
||||
CHUNKS_SINCE_CLEANUP=$(( $CHUNKS_SINCE_CLEANUP + 1 ))
|
||||
|
||||
# Check current row count every 10 batches
|
||||
if [ $(( $BATCH_NUM % 10 )) -eq 0 ]; then
|
||||
CURRENT_COUNT=$(psql $PSQL_OPTS -t -c "SELECT COUNT(*) FROM $TABLE_NAME;" | tr -d '[:space:]')
|
||||
echo "Current row count in $TABLE_NAME: $CURRENT_COUNT" | tee -a "$PROGRESS_FILE"
|
||||
|
||||
# Every 10 batches, run an intermediate cleanup
|
||||
if [ $CHUNKS_SINCE_CLEANUP -ge 10 ]; then
|
||||
echo "Running intermediate cleanup and optimization..."
|
||||
psql $PSQL_OPTS -c "VACUUM $TABLE_NAME;"
|
||||
CHUNKS_SINCE_CLEANUP=0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Optional - write a checkpoint file to know where to restart
|
||||
echo "$BATCH_NUM" > "/tmp/import_last_batch_${TABLE_NAME}.txt"
|
||||
done
|
||||
|
||||
# Only recreate indexes if we've completed the import
|
||||
if [ $PROCESSED -ge $TOTAL_ROWS ]; then
|
||||
# Step 7: Re-enable triggers and recreate primary key
|
||||
echo "Re-enabling triggers and recreating primary key..."
|
||||
psql $PSQL_OPTS -c "
|
||||
ALTER TABLE $TABLE_NAME ENABLE TRIGGER ALL;
|
||||
ALTER TABLE $TABLE_NAME ADD PRIMARY KEY (pid, date);
|
||||
"
|
||||
|
||||
# Step 8: Clean up and get final count
|
||||
echo "Cleaning up and getting final count..."
|
||||
psql $PSQL_OPTS -c "
|
||||
DROP TABLE staging_import;
|
||||
VACUUM ANALYZE $TABLE_NAME;
|
||||
SELECT COUNT(*) AS \"Total rows in $TABLE_NAME\" FROM $TABLE_NAME;
|
||||
"
|
||||
else
|
||||
echo "Import interrupted at batch $BATCH_NUM. To resume, run:"
|
||||
echo "./psql-csv-import.sh $CSV_FILE $TABLE_NAME $BATCH_NUM"
|
||||
fi
|
||||
|
||||
else
|
||||
# Generic approach for other tables
|
||||
if [ "$START_BATCH" -eq 1 ]; then
|
||||
# Extract CSV header
|
||||
CSV_HEADER=$(head -n 1 "$CSV_FILE")
|
||||
echo "CSV header: $CSV_HEADER"
|
||||
|
||||
# Extract CSV header and format it for SQL
|
||||
CSV_COLUMNS=$(echo "$CSV_HEADER" | tr ',' '\n' | sed 's/^/"/;s/$/"/' | tr '\n' ',' | sed 's/,$//')
|
||||
TEMP_COLUMNS=$(echo "$CSV_HEADER" | tr ',' '\n' | sed 's/$/ TEXT/' | tr '\n' ',' | sed 's/,$//')
|
||||
|
||||
echo "Importing columns: $CSV_COLUMNS"
|
||||
|
||||
# Step 1: Set maintenance_work_mem and disable triggers
|
||||
echo "Setting maintenance_work_mem and disabling triggers..."
|
||||
psql $PSQL_OPTS -c "
|
||||
SET maintenance_work_mem = '1GB';
|
||||
ALTER TABLE $TABLE_NAME DISABLE TRIGGER ALL;
|
||||
"
|
||||
|
||||
# Step 2: Create temp table
|
||||
echo "Creating temporary table..."
|
||||
psql $PSQL_OPTS -c "
|
||||
DROP TABLE IF EXISTS temp_import;
|
||||
CREATE UNLOGGED TABLE temp_import ($TEMP_COLUMNS);
|
||||
|
||||
-- Create an index on temp_import to improve OFFSET performance
|
||||
CREATE INDEX ON temp_import ((1)); -- Index on first column
|
||||
"
|
||||
|
||||
# Step 3: Import CSV into temp table
|
||||
echo "Importing CSV into temporary table..."
|
||||
psql $PSQL_OPTS -c "\copy temp_import FROM '$CSV_FILE' WITH CSV HEADER DELIMITER ','"
|
||||
else
|
||||
echo "Resuming import from batch $START_BATCH - skipping table creation and CSV import"
|
||||
|
||||
# Check if temp table exists
|
||||
TEMP_EXISTS=$(psql $PSQL_OPTS -t -c "SELECT EXISTS(SELECT 1 FROM pg_tables WHERE tablename='temp_import');" | tr -d '[:space:]')
|
||||
|
||||
if [ "$TEMP_EXISTS" != "t" ]; then
|
||||
echo "Error: Temporary table 'temp_import' does not exist. Run without batch parameter first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ensure triggers are disabled
|
||||
psql $PSQL_OPTS -c "ALTER TABLE $TABLE_NAME DISABLE TRIGGER ALL;"
|
||||
|
||||
# Optimize PostgreSQL for better performance
|
||||
psql $PSQL_OPTS -c "
|
||||
-- Increase work mem for this session
|
||||
SET work_mem = '256MB';
|
||||
SET maintenance_work_mem = '1GB';
|
||||
"
|
||||
|
||||
# Hard-code columns since we know them
|
||||
CSV_COLUMNS='"pid","date","score","score2","qty_in_baskets","qty_sold","notifies_set","visibility_score","health_score","sold_view_score"'
|
||||
|
||||
echo "Using standard columns: $CSV_COLUMNS"
|
||||
fi
|
||||
|
||||
# Step 4: Get total row count
|
||||
TOTAL_ROWS=$(psql $PSQL_OPTS -t -c "SELECT COUNT(*) FROM temp_import;" | tr -d '[:space:]')
|
||||
echo "Total rows to import: $TOTAL_ROWS"
|
||||
|
||||
# Calculate starting point
|
||||
PROCESSED=$(( ($START_BATCH - 1) * $BATCH_SIZE ))
|
||||
if [ $PROCESSED -ge $TOTAL_ROWS ]; then
|
||||
echo "Error: Start batch $START_BATCH is beyond the available rows ($TOTAL_ROWS)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Step 5: Process in batches with shell loop
|
||||
BATCH_NUM=$(( $START_BATCH - 1 ))
|
||||
|
||||
# We'll process batches in chunks of 10 before cleaning up
|
||||
CHUNKS_SINCE_CLEANUP=0
|
||||
|
||||
while [ $PROCESSED -lt $TOTAL_ROWS ]; do
|
||||
BATCH_NUM=$(( $BATCH_NUM + 1 ))
|
||||
BATCH_START=$(date +%s)
|
||||
MAX_ROWS=$(( $PROCESSED + $BATCH_SIZE ))
|
||||
if [ $MAX_ROWS -gt $TOTAL_ROWS ]; then
|
||||
MAX_ROWS=$TOTAL_ROWS
|
||||
fi
|
||||
|
||||
echo "Processing batch $BATCH_NUM (rows $PROCESSED to $MAX_ROWS)..."
|
||||
|
||||
# Optimize query buffer for this batch
|
||||
psql $PSQL_OPTS -c "SET work_mem = '256MB';"
|
||||
|
||||
# Insert batch
|
||||
psql $PSQL_OPTS -c "
|
||||
INSERT INTO $TABLE_NAME ($CSV_COLUMNS)
|
||||
SELECT $CSV_COLUMNS
|
||||
FROM temp_import
|
||||
LIMIT $BATCH_SIZE
|
||||
OFFSET $PROCESSED;
|
||||
"
|
||||
|
||||
# Update progress
|
||||
BATCH_END=$(date +%s)
|
||||
BATCH_ELAPSED=$(( $BATCH_END - $BATCH_START ))
|
||||
PROGRESS_PCT=$(echo "scale=2; $MAX_ROWS * 100 / $TOTAL_ROWS" | bc)
|
||||
|
||||
echo "Batch $BATCH_NUM committed in ${BATCH_ELAPSED}s, $MAX_ROWS of $TOTAL_ROWS rows processed ($PROGRESS_PCT%)" | tee -a "$PROGRESS_FILE"
|
||||
|
||||
# Increment counter
|
||||
PROCESSED=$(( $PROCESSED + $BATCH_SIZE ))
|
||||
CHUNKS_SINCE_CLEANUP=$(( $CHUNKS_SINCE_CLEANUP + 1 ))
|
||||
|
||||
# Check current row count every 10 batches
|
||||
if [ $(( $BATCH_NUM % 10 )) -eq 0 ]; then
|
||||
CURRENT_COUNT=$(psql $PSQL_OPTS -t -c "SELECT COUNT(*) FROM $TABLE_NAME;" | tr -d '[:space:]')
|
||||
echo "Current row count in $TABLE_NAME: $CURRENT_COUNT" | tee -a "$PROGRESS_FILE"
|
||||
|
||||
# Every 10 batches, run an intermediate cleanup
|
||||
if [ $CHUNKS_SINCE_CLEANUP -ge 10 ]; then
|
||||
echo "Running intermediate cleanup and optimization..."
|
||||
psql $PSQL_OPTS -c "VACUUM $TABLE_NAME;"
|
||||
CHUNKS_SINCE_CLEANUP=0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Optional - write a checkpoint file to know where to restart
|
||||
echo "$BATCH_NUM" > "/tmp/import_last_batch_${TABLE_NAME}.txt"
|
||||
done
|
||||
|
||||
# Only clean up if we've completed the import
|
||||
if [ $PROCESSED -ge $TOTAL_ROWS ]; then
|
||||
# Step 6: Re-enable triggers and clean up
|
||||
echo "Re-enabling triggers and cleaning up..."
|
||||
psql $PSQL_OPTS -c "
|
||||
ALTER TABLE $TABLE_NAME ENABLE TRIGGER ALL;
|
||||
DROP TABLE temp_import;
|
||||
VACUUM ANALYZE $TABLE_NAME;
|
||||
SELECT COUNT(*) AS \"Total rows in $TABLE_NAME\" FROM $TABLE_NAME;
|
||||
"
|
||||
else
|
||||
echo "Import interrupted at batch $BATCH_NUM. To resume, run:"
|
||||
echo "./psql-csv-import.sh $CSV_FILE $TABLE_NAME $BATCH_NUM"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Calculate elapsed time
|
||||
END_TIME=$(date +%s)
|
||||
ELAPSED=$((END_TIME - START_TIME))
|
||||
|
||||
echo "Import completed successfully in ${ELAPSED}s ($(($ELAPSED / 60)) minutes)"
|
||||
echo "Progress log saved to $PROGRESS_FILE"
|
||||
@@ -1,4 +1,4 @@
|
||||
const mysql = require('mysql2/promise');
|
||||
const { Client } = require('pg');
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
require('dotenv').config({ path: path.resolve(__dirname, '../.env') });
|
||||
@@ -8,7 +8,7 @@ const dbConfig = {
|
||||
user: process.env.DB_USER,
|
||||
password: process.env.DB_PASSWORD,
|
||||
database: process.env.DB_NAME,
|
||||
multipleStatements: true
|
||||
port: process.env.DB_PORT || 5432
|
||||
};
|
||||
|
||||
function outputProgress(data) {
|
||||
@@ -34,11 +34,24 @@ const METRICS_TABLES = [
|
||||
'sales_forecasts',
|
||||
'temp_purchase_metrics',
|
||||
'temp_sales_metrics',
|
||||
'vendor_metrics', //before vendor_details for foreign key
|
||||
'vendor_time_metrics', //before vendor_details for foreign key
|
||||
'vendor_metrics',
|
||||
'vendor_time_metrics',
|
||||
'vendor_details'
|
||||
];
|
||||
|
||||
// Tables to always protect from being dropped
|
||||
const PROTECTED_TABLES = [
|
||||
'users',
|
||||
'permissions',
|
||||
'user_permissions',
|
||||
'calculate_history',
|
||||
'import_history',
|
||||
'ai_prompts',
|
||||
'ai_validation_performance',
|
||||
'templates',
|
||||
'reusable_images'
|
||||
];
|
||||
|
||||
// Split SQL into individual statements
|
||||
function splitSQLStatements(sql) {
|
||||
sql = sql.replace(/\r\n/g, '\n');
|
||||
@@ -90,31 +103,35 @@ function splitSQLStatements(sql) {
|
||||
}
|
||||
|
||||
async function resetMetrics() {
|
||||
let connection;
|
||||
let client;
|
||||
try {
|
||||
outputProgress({
|
||||
operation: 'Starting metrics reset',
|
||||
message: 'Connecting to database...'
|
||||
});
|
||||
|
||||
connection = await mysql.createConnection(dbConfig);
|
||||
await connection.beginTransaction();
|
||||
client = new Client(dbConfig);
|
||||
await client.connect();
|
||||
|
||||
// Explicitly begin a transaction
|
||||
await client.query('BEGIN');
|
||||
|
||||
// First verify current state
|
||||
const [initialTables] = await connection.query(`
|
||||
SELECT TABLE_NAME as name
|
||||
FROM information_schema.tables
|
||||
WHERE TABLE_SCHEMA = DATABASE()
|
||||
AND TABLE_NAME IN (?)
|
||||
`, [METRICS_TABLES]);
|
||||
const initialTables = await client.query(`
|
||||
SELECT tablename as name
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename = ANY($1)
|
||||
AND tablename NOT IN (SELECT unnest($2::text[]))
|
||||
`, [METRICS_TABLES, PROTECTED_TABLES]);
|
||||
|
||||
outputProgress({
|
||||
operation: 'Initial state',
|
||||
message: `Found ${initialTables.length} existing metrics tables: ${initialTables.map(t => t.name).join(', ')}`
|
||||
message: `Found ${initialTables.rows.length} existing metrics tables: ${initialTables.rows.map(t => t.name).join(', ')}`
|
||||
});
|
||||
|
||||
// Disable foreign key checks at the start
|
||||
await connection.query('SET FOREIGN_KEY_CHECKS = 0');
|
||||
await client.query('SET session_replication_role = \'replica\'');
|
||||
|
||||
// Drop all metrics tables in reverse order to handle dependencies
|
||||
outputProgress({
|
||||
@@ -123,18 +140,28 @@ async function resetMetrics() {
|
||||
});
|
||||
|
||||
for (const table of [...METRICS_TABLES].reverse()) {
|
||||
// Skip protected tables
|
||||
if (PROTECTED_TABLES.includes(table)) {
|
||||
outputProgress({
|
||||
operation: 'Protected table',
|
||||
message: `Skipping protected table: ${table}`
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
await connection.query(`DROP TABLE IF EXISTS ${table}`);
|
||||
// Use NOWAIT to avoid hanging if there's a lock
|
||||
await client.query(`DROP TABLE IF EXISTS "${table}" CASCADE`);
|
||||
|
||||
// Verify the table was actually dropped
|
||||
const [checkDrop] = await connection.query(`
|
||||
const checkDrop = await client.query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM information_schema.tables
|
||||
WHERE TABLE_SCHEMA = DATABASE()
|
||||
AND TABLE_NAME = ?
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename = $1
|
||||
`, [table]);
|
||||
|
||||
if (checkDrop[0].count > 0) {
|
||||
if (parseInt(checkDrop.rows[0].count) > 0) {
|
||||
throw new Error(`Failed to drop table ${table} - table still exists`);
|
||||
}
|
||||
|
||||
@@ -142,28 +169,43 @@ async function resetMetrics() {
|
||||
operation: 'Table dropped',
|
||||
message: `Successfully dropped table: ${table}`
|
||||
});
|
||||
|
||||
// Commit after each table drop to ensure locks are released
|
||||
await client.query('COMMIT');
|
||||
// Start a new transaction for the next table
|
||||
await client.query('BEGIN');
|
||||
// Re-disable foreign key constraints for the new transaction
|
||||
await client.query('SET session_replication_role = \'replica\'');
|
||||
} catch (err) {
|
||||
outputProgress({
|
||||
status: 'error',
|
||||
operation: 'Drop table error',
|
||||
message: `Error dropping table ${table}: ${err.message}`
|
||||
});
|
||||
throw err;
|
||||
await client.query('ROLLBACK');
|
||||
// Re-start transaction for next table
|
||||
await client.query('BEGIN');
|
||||
await client.query('SET session_replication_role = \'replica\'');
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all tables were dropped
|
||||
const [afterDrop] = await connection.query(`
|
||||
SELECT TABLE_NAME as name
|
||||
FROM information_schema.tables
|
||||
WHERE TABLE_SCHEMA = DATABASE()
|
||||
AND TABLE_NAME IN (?)
|
||||
const afterDrop = await client.query(`
|
||||
SELECT tablename as name
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename = ANY($1)
|
||||
`, [METRICS_TABLES]);
|
||||
|
||||
if (afterDrop.length > 0) {
|
||||
throw new Error(`Failed to drop all tables. Remaining tables: ${afterDrop.map(t => t.name).join(', ')}`);
|
||||
if (afterDrop.rows.length > 0) {
|
||||
throw new Error(`Failed to drop all tables. Remaining tables: ${afterDrop.rows.map(t => t.name).join(', ')}`);
|
||||
}
|
||||
|
||||
// Make sure we have a fresh transaction here
|
||||
await client.query('COMMIT');
|
||||
await client.query('BEGIN');
|
||||
await client.query('SET session_replication_role = \'replica\'');
|
||||
|
||||
// Read metrics schema
|
||||
outputProgress({
|
||||
operation: 'Reading schema',
|
||||
@@ -187,39 +229,26 @@ async function resetMetrics() {
|
||||
for (let i = 0; i < statements.length; i++) {
|
||||
const stmt = statements[i];
|
||||
try {
|
||||
await connection.query(stmt);
|
||||
|
||||
// Check for warnings
|
||||
const [warnings] = await connection.query('SHOW WARNINGS');
|
||||
if (warnings && warnings.length > 0) {
|
||||
outputProgress({
|
||||
status: 'warning',
|
||||
operation: 'SQL Warning',
|
||||
message: {
|
||||
statement: i + 1,
|
||||
warnings: warnings
|
||||
}
|
||||
});
|
||||
}
|
||||
const result = await client.query(stmt);
|
||||
|
||||
// If this is a CREATE TABLE statement, verify the table was created
|
||||
if (stmt.trim().toLowerCase().startsWith('create table')) {
|
||||
const tableName = stmt.match(/create\s+table\s+(?:if\s+not\s+exists\s+)?`?(\w+)`?/i)?.[1];
|
||||
const tableName = stmt.match(/create\s+table\s+(?:if\s+not\s+exists\s+)?["]?(\w+)["]?/i)?.[1];
|
||||
if (tableName) {
|
||||
const [checkCreate] = await connection.query(`
|
||||
SELECT TABLE_NAME as name, CREATE_TIME as created
|
||||
FROM information_schema.tables
|
||||
WHERE TABLE_SCHEMA = DATABASE()
|
||||
AND TABLE_NAME = ?
|
||||
const checkCreate = await client.query(`
|
||||
SELECT tablename as name
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename = $1
|
||||
`, [tableName]);
|
||||
|
||||
if (checkCreate.length === 0) {
|
||||
if (checkCreate.rows.length === 0) {
|
||||
throw new Error(`Failed to create table ${tableName} - table does not exist after CREATE statement`);
|
||||
}
|
||||
|
||||
outputProgress({
|
||||
operation: 'Table created',
|
||||
message: `Successfully created table: ${tableName} at ${checkCreate[0].created}`
|
||||
message: `Successfully created table: ${tableName}`
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -229,27 +258,40 @@ async function resetMetrics() {
|
||||
message: {
|
||||
statement: i + 1,
|
||||
total: statements.length,
|
||||
preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : '')
|
||||
preview: stmt.substring(0, 100) + (stmt.length > 100 ? '...' : ''),
|
||||
rowCount: result.rowCount
|
||||
}
|
||||
});
|
||||
|
||||
// Commit every 10 statements to avoid long-running transactions
|
||||
if (i > 0 && i % 10 === 0) {
|
||||
await client.query('COMMIT');
|
||||
await client.query('BEGIN');
|
||||
await client.query('SET session_replication_role = \'replica\'');
|
||||
}
|
||||
} catch (sqlError) {
|
||||
outputProgress({
|
||||
status: 'error',
|
||||
operation: 'SQL Error',
|
||||
message: {
|
||||
error: sqlError.message,
|
||||
sqlState: sqlError.sqlState,
|
||||
errno: sqlError.errno,
|
||||
statement: stmt,
|
||||
statementNumber: i + 1
|
||||
}
|
||||
});
|
||||
await client.query('ROLLBACK');
|
||||
throw sqlError;
|
||||
}
|
||||
}
|
||||
|
||||
// Final commit for any pending statements
|
||||
await client.query('COMMIT');
|
||||
|
||||
// Start new transaction for final checks
|
||||
await client.query('BEGIN');
|
||||
|
||||
// Re-enable foreign key checks after all tables are created
|
||||
await connection.query('SET FOREIGN_KEY_CHECKS = 1');
|
||||
await client.query('SET session_replication_role = \'origin\'');
|
||||
|
||||
// Verify metrics tables were created
|
||||
outputProgress({
|
||||
@@ -257,37 +299,38 @@ async function resetMetrics() {
|
||||
message: 'Checking all metrics tables were created...'
|
||||
});
|
||||
|
||||
const [metricsTablesResult] = await connection.query(`
|
||||
SELECT
|
||||
TABLE_NAME as name,
|
||||
TABLE_ROWS as \`rows\`,
|
||||
CREATE_TIME as created
|
||||
FROM information_schema.tables
|
||||
WHERE TABLE_SCHEMA = DATABASE()
|
||||
AND TABLE_NAME IN (?)
|
||||
const metricsTablesResult = await client.query(`
|
||||
SELECT tablename as name
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename = ANY($1)
|
||||
`, [METRICS_TABLES]);
|
||||
|
||||
outputProgress({
|
||||
operation: 'Tables found',
|
||||
message: `Found ${metricsTablesResult.length} tables: ${metricsTablesResult.map(t =>
|
||||
`${t.name} (created: ${t.created})`
|
||||
).join(', ')}`
|
||||
message: `Found ${metricsTablesResult.rows.length} tables: ${metricsTablesResult.rows.map(t => t.name).join(', ')}`
|
||||
});
|
||||
|
||||
const existingMetricsTables = metricsTablesResult.map(t => t.name);
|
||||
const existingMetricsTables = metricsTablesResult.rows.map(t => t.name);
|
||||
const missingMetricsTables = METRICS_TABLES.filter(t => !existingMetricsTables.includes(t));
|
||||
|
||||
if (missingMetricsTables.length > 0) {
|
||||
// Do one final check of the actual tables
|
||||
const [finalCheck] = await connection.query('SHOW TABLES');
|
||||
const finalCheck = await client.query(`
|
||||
SELECT tablename as name
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
`);
|
||||
outputProgress({
|
||||
operation: 'Final table check',
|
||||
message: `All database tables: ${finalCheck.map(t => Object.values(t)[0]).join(', ')}`
|
||||
message: `All database tables: ${finalCheck.rows.map(t => t.name).join(', ')}`
|
||||
});
|
||||
await client.query('ROLLBACK');
|
||||
throw new Error(`Failed to create metrics tables: ${missingMetricsTables.join(', ')}`);
|
||||
}
|
||||
|
||||
await connection.commit();
|
||||
// Commit final transaction
|
||||
await client.query('COMMIT');
|
||||
|
||||
outputProgress({
|
||||
status: 'complete',
|
||||
@@ -302,17 +345,21 @@ async function resetMetrics() {
|
||||
stack: error.stack
|
||||
});
|
||||
|
||||
if (connection) {
|
||||
await connection.rollback();
|
||||
if (client) {
|
||||
try {
|
||||
await client.query('ROLLBACK');
|
||||
} catch (rollbackError) {
|
||||
console.error('Error during rollback:', rollbackError);
|
||||
}
|
||||
// Make sure to re-enable foreign key checks even if there's an error
|
||||
await connection.query('SET FOREIGN_KEY_CHECKS = 1').catch(() => {});
|
||||
await client.query('SET session_replication_role = \'origin\'').catch(() => {});
|
||||
}
|
||||
throw error;
|
||||
} finally {
|
||||
if (connection) {
|
||||
if (client) {
|
||||
// One final attempt to ensure foreign key checks are enabled
|
||||
await connection.query('SET FOREIGN_KEY_CHECKS = 1').catch(() => {});
|
||||
await connection.end();
|
||||
await client.query('SET session_replication_role = \'origin\'').catch(() => {});
|
||||
await client.end();
|
||||
}
|
||||
}
|
||||
}
|
||||
337
inventory-server/old/update-order-costs.js
Normal file
337
inventory-server/old/update-order-costs.js
Normal file
@@ -0,0 +1,337 @@
|
||||
/**
|
||||
* This script updates the costeach values for existing orders from the original MySQL database
|
||||
* without needing to run the full import process.
|
||||
*/
|
||||
const dotenv = require("dotenv");
|
||||
const path = require("path");
|
||||
const fs = require("fs");
|
||||
const { setupConnections, closeConnections } = require('../scripts/import/utils');
|
||||
const { outputProgress, formatElapsedTime } = require('./metrics/utils/progress');
|
||||
|
||||
dotenv.config({ path: path.join(__dirname, "../.env") });
|
||||
|
||||
// SSH configuration
|
||||
const sshConfig = {
|
||||
ssh: {
|
||||
host: process.env.PROD_SSH_HOST,
|
||||
port: process.env.PROD_SSH_PORT || 22,
|
||||
username: process.env.PROD_SSH_USER,
|
||||
privateKey: process.env.PROD_SSH_KEY_PATH
|
||||
? fs.readFileSync(process.env.PROD_SSH_KEY_PATH)
|
||||
: undefined,
|
||||
compress: true, // Enable SSH compression
|
||||
},
|
||||
prodDbConfig: {
|
||||
// MySQL config for production
|
||||
host: process.env.PROD_DB_HOST || "localhost",
|
||||
user: process.env.PROD_DB_USER,
|
||||
password: process.env.PROD_DB_PASSWORD,
|
||||
database: process.env.PROD_DB_NAME,
|
||||
port: process.env.PROD_DB_PORT || 3306,
|
||||
timezone: 'Z',
|
||||
},
|
||||
localDbConfig: {
|
||||
// PostgreSQL config for local
|
||||
host: process.env.DB_HOST,
|
||||
user: process.env.DB_USER,
|
||||
password: process.env.DB_PASSWORD,
|
||||
database: process.env.DB_NAME,
|
||||
port: process.env.DB_PORT || 5432,
|
||||
ssl: process.env.DB_SSL === 'true',
|
||||
connectionTimeoutMillis: 60000,
|
||||
idleTimeoutMillis: 30000,
|
||||
max: 10 // connection pool max size
|
||||
}
|
||||
};
|
||||
|
||||
async function updateOrderCosts() {
|
||||
const startTime = Date.now();
|
||||
let connections;
|
||||
let updatedCount = 0;
|
||||
let errorCount = 0;
|
||||
|
||||
try {
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Order costs update",
|
||||
message: "Initializing SSH tunnel..."
|
||||
});
|
||||
|
||||
connections = await setupConnections(sshConfig);
|
||||
const { prodConnection, localConnection } = connections;
|
||||
|
||||
// 1. Get all orders from local database that need cost updates
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Order costs update",
|
||||
message: "Getting orders from local database..."
|
||||
});
|
||||
|
||||
const [orders] = await localConnection.query(`
|
||||
SELECT DISTINCT order_number, pid
|
||||
FROM orders
|
||||
WHERE costeach = 0 OR costeach IS NULL
|
||||
ORDER BY order_number
|
||||
`);
|
||||
|
||||
if (!orders || !orders.rows || orders.rows.length === 0) {
|
||||
console.log("No orders found that need cost updates");
|
||||
return { updatedCount: 0, errorCount: 0 };
|
||||
}
|
||||
|
||||
const totalOrders = orders.rows.length;
|
||||
console.log(`Found ${totalOrders} orders that need cost updates`);
|
||||
|
||||
// Process in batches of 1000 orders
|
||||
const BATCH_SIZE = 500;
|
||||
for (let i = 0; i < orders.rows.length; i += BATCH_SIZE) {
|
||||
try {
|
||||
// Start transaction for this batch
|
||||
await localConnection.beginTransaction();
|
||||
|
||||
const batch = orders.rows.slice(i, i + BATCH_SIZE);
|
||||
|
||||
const orderNumbers = [...new Set(batch.map(o => o.order_number))];
|
||||
|
||||
// 2. Fetch costs from production database for these orders
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Order costs update",
|
||||
message: `Fetching costs for orders ${i + 1} to ${Math.min(i + BATCH_SIZE, totalOrders)} of ${totalOrders}`,
|
||||
current: i,
|
||||
total: totalOrders,
|
||||
elapsed: formatElapsedTime((Date.now() - startTime) / 1000)
|
||||
});
|
||||
|
||||
const [costs] = await prodConnection.query(`
|
||||
SELECT
|
||||
oc.orderid as order_number,
|
||||
oc.pid,
|
||||
oc.costeach
|
||||
FROM order_costs oc
|
||||
INNER JOIN (
|
||||
SELECT
|
||||
orderid,
|
||||
pid,
|
||||
MAX(id) as max_id
|
||||
FROM order_costs
|
||||
WHERE orderid IN (?)
|
||||
AND pending = 0
|
||||
GROUP BY orderid, pid
|
||||
) latest ON oc.orderid = latest.orderid AND oc.pid = latest.pid AND oc.id = latest.max_id
|
||||
`, [orderNumbers]);
|
||||
|
||||
// Create a map of costs for easy lookup
|
||||
const costMap = {};
|
||||
if (costs && costs.length) {
|
||||
costs.forEach(c => {
|
||||
costMap[`${c.order_number}-${c.pid}`] = c.costeach || 0;
|
||||
});
|
||||
}
|
||||
|
||||
// 3. Update costs in local database by batches
|
||||
// Using a more efficient update approach with a temporary table
|
||||
|
||||
// Create a temporary table for each batch
|
||||
await localConnection.query(`
|
||||
DROP TABLE IF EXISTS temp_order_costs;
|
||||
CREATE TEMP TABLE temp_order_costs (
|
||||
order_number VARCHAR(50) NOT NULL,
|
||||
pid BIGINT NOT NULL,
|
||||
costeach DECIMAL(10,3) NOT NULL,
|
||||
PRIMARY KEY (order_number, pid)
|
||||
);
|
||||
`);
|
||||
|
||||
// Insert cost data into the temporary table
|
||||
const costEntries = [];
|
||||
for (const order of batch) {
|
||||
const key = `${order.order_number}-${order.pid}`;
|
||||
if (key in costMap) {
|
||||
costEntries.push({
|
||||
order_number: order.order_number,
|
||||
pid: order.pid,
|
||||
costeach: costMap[key]
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Insert in sub-batches of 100
|
||||
const DB_BATCH_SIZE = 50;
|
||||
for (let j = 0; j < costEntries.length; j += DB_BATCH_SIZE) {
|
||||
const subBatch = costEntries.slice(j, j + DB_BATCH_SIZE);
|
||||
if (subBatch.length === 0) continue;
|
||||
|
||||
const placeholders = subBatch.map((_, idx) =>
|
||||
`($${idx * 3 + 1}, $${idx * 3 + 2}, $${idx * 3 + 3})`
|
||||
).join(',');
|
||||
|
||||
const values = subBatch.flatMap(item => [
|
||||
item.order_number,
|
||||
item.pid,
|
||||
item.costeach
|
||||
]);
|
||||
|
||||
await localConnection.query(`
|
||||
INSERT INTO temp_order_costs (order_number, pid, costeach)
|
||||
VALUES ${placeholders}
|
||||
`, values);
|
||||
}
|
||||
|
||||
// Perform bulk update from the temporary table
|
||||
const [updateResult] = await localConnection.query(`
|
||||
UPDATE orders o
|
||||
SET costeach = t.costeach
|
||||
FROM temp_order_costs t
|
||||
WHERE o.order_number = t.order_number AND o.pid = t.pid
|
||||
RETURNING o.id
|
||||
`);
|
||||
|
||||
const batchUpdated = updateResult.rowCount || 0;
|
||||
updatedCount += batchUpdated;
|
||||
|
||||
// Commit transaction for this batch
|
||||
await localConnection.commit();
|
||||
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Order costs update",
|
||||
message: `Updated ${updatedCount} orders with costs from production (batch: ${batchUpdated})`,
|
||||
current: i + batch.length,
|
||||
total: totalOrders,
|
||||
elapsed: formatElapsedTime((Date.now() - startTime) / 1000)
|
||||
});
|
||||
} catch (error) {
|
||||
// If a batch fails, roll back that batch's transaction and continue
|
||||
try {
|
||||
await localConnection.rollback();
|
||||
} catch (rollbackError) {
|
||||
console.error("Error during batch rollback:", rollbackError);
|
||||
}
|
||||
|
||||
console.error(`Error processing batch ${i}-${i + BATCH_SIZE}:`, error);
|
||||
errorCount++;
|
||||
}
|
||||
}
|
||||
|
||||
// 4. For orders with no matching costs, set a default based on price
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Order costs update",
|
||||
message: "Setting default costs for remaining orders..."
|
||||
});
|
||||
|
||||
// Process remaining updates in smaller batches
|
||||
const DEFAULT_BATCH_SIZE = 10000;
|
||||
let totalDefaultUpdated = 0;
|
||||
|
||||
try {
|
||||
// Start with a count query to determine how many records need the default update
|
||||
const [countResult] = await localConnection.query(`
|
||||
SELECT COUNT(*) as count FROM orders
|
||||
WHERE (costeach = 0 OR costeach IS NULL)
|
||||
`);
|
||||
|
||||
const totalToUpdate = parseInt(countResult.rows[0]?.count || 0);
|
||||
|
||||
if (totalToUpdate > 0) {
|
||||
console.log(`Applying default cost to ${totalToUpdate} orders`);
|
||||
|
||||
// Apply the default in batches with separate transactions
|
||||
for (let i = 0; i < totalToUpdate; i += DEFAULT_BATCH_SIZE) {
|
||||
try {
|
||||
await localConnection.beginTransaction();
|
||||
|
||||
const [defaultUpdates] = await localConnection.query(`
|
||||
WITH orders_to_update AS (
|
||||
SELECT id FROM orders
|
||||
WHERE (costeach = 0 OR costeach IS NULL)
|
||||
LIMIT ${DEFAULT_BATCH_SIZE}
|
||||
)
|
||||
UPDATE orders o
|
||||
SET costeach = price * 0.5
|
||||
FROM orders_to_update otu
|
||||
WHERE o.id = otu.id
|
||||
RETURNING o.id
|
||||
`);
|
||||
|
||||
const batchDefaultUpdated = defaultUpdates.rowCount || 0;
|
||||
totalDefaultUpdated += batchDefaultUpdated;
|
||||
|
||||
await localConnection.commit();
|
||||
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Order costs update",
|
||||
message: `Applied default costs to ${totalDefaultUpdated} of ${totalToUpdate} orders`,
|
||||
current: totalDefaultUpdated,
|
||||
total: totalToUpdate,
|
||||
elapsed: formatElapsedTime((Date.now() - startTime) / 1000)
|
||||
});
|
||||
} catch (error) {
|
||||
try {
|
||||
await localConnection.rollback();
|
||||
} catch (rollbackError) {
|
||||
console.error("Error during default update rollback:", rollbackError);
|
||||
}
|
||||
|
||||
console.error(`Error applying default costs batch ${i}-${i + DEFAULT_BATCH_SIZE}:`, error);
|
||||
errorCount++;
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Error counting or updating remaining orders:", error);
|
||||
errorCount++;
|
||||
}
|
||||
|
||||
updatedCount += totalDefaultUpdated;
|
||||
|
||||
const endTime = Date.now();
|
||||
const totalSeconds = (endTime - startTime) / 1000;
|
||||
|
||||
outputProgress({
|
||||
status: "complete",
|
||||
operation: "Order costs update",
|
||||
message: `Updated ${updatedCount} orders (${totalDefaultUpdated} with default values) in ${formatElapsedTime(totalSeconds)}`,
|
||||
elapsed: formatElapsedTime(totalSeconds)
|
||||
});
|
||||
|
||||
return {
|
||||
status: "complete",
|
||||
updatedCount,
|
||||
errorCount
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("Error during order costs update:", error);
|
||||
|
||||
return {
|
||||
status: "error",
|
||||
error: error.message,
|
||||
updatedCount,
|
||||
errorCount
|
||||
};
|
||||
} finally {
|
||||
if (connections) {
|
||||
await closeConnections(connections).catch(err => {
|
||||
console.error("Error closing connections:", err);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run the script only if this is the main module
|
||||
if (require.main === module) {
|
||||
updateOrderCosts().then((results) => {
|
||||
console.log('Cost update completed:', results);
|
||||
// Force exit after a small delay to ensure all logs are written
|
||||
setTimeout(() => process.exit(0), 500);
|
||||
}).catch((error) => {
|
||||
console.error("Unhandled error:", error);
|
||||
// Force exit with error code after a small delay
|
||||
setTimeout(() => process.exit(1), 500);
|
||||
});
|
||||
}
|
||||
|
||||
// Export the function for use in other scripts
|
||||
module.exports = updateOrderCosts;
|
||||
977
inventory-server/package-lock.json
generated
977
inventory-server/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -18,12 +18,19 @@
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@types/diff": "^7.0.1",
|
||||
"axios": "^1.8.1",
|
||||
"bcrypt": "^5.1.1",
|
||||
"commander": "^13.1.0",
|
||||
"cors": "^2.8.5",
|
||||
"csv-parse": "^5.6.0",
|
||||
"diff": "^7.0.0",
|
||||
"dotenv": "^16.4.7",
|
||||
"express": "^4.18.2",
|
||||
"multer": "^1.4.5-lts.1",
|
||||
"mysql2": "^3.12.0",
|
||||
"openai": "^4.85.3",
|
||||
"pg": "^8.14.1",
|
||||
"pm2": "^5.3.0",
|
||||
"ssh2": "^1.16.0",
|
||||
"uuid": "^9.0.1"
|
||||
|
||||
908
inventory-server/scripts/calculate-metrics-new.js
Normal file
908
inventory-server/scripts/calculate-metrics-new.js
Normal file
@@ -0,0 +1,908 @@
|
||||
// run-all-updates.js
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const { Pool } = require('pg'); // Assuming you use 'pg'
|
||||
|
||||
// --- Configuration ---
|
||||
// Toggle these constants to enable/disable specific steps for testing
|
||||
const RUN_DAILY_SNAPSHOTS = true;
|
||||
const RUN_PRODUCT_METRICS = true;
|
||||
const RUN_PERIODIC_METRICS = true;
|
||||
const RUN_BRAND_METRICS = true;
|
||||
const RUN_VENDOR_METRICS = true;
|
||||
const RUN_CATEGORY_METRICS = true;
|
||||
|
||||
// Maximum execution time for the entire sequence (e.g., 90 minutes)
|
||||
const MAX_EXECUTION_TIME_TOTAL = 90 * 60 * 1000;
|
||||
// Maximum execution time per individual SQL step (e.g., 30 minutes)
|
||||
const MAX_EXECUTION_TIME_PER_STEP = 30 * 60 * 1000;
|
||||
// Query cancellation timeout
|
||||
const CANCEL_QUERY_AFTER_SECONDS = 5;
|
||||
// --- End Configuration ---
|
||||
|
||||
// Change working directory to script directory
|
||||
process.chdir(path.dirname(__filename));
|
||||
|
||||
// Log script path for debugging
|
||||
console.log('Script running from:', __dirname);
|
||||
|
||||
// Try to load environment variables from multiple locations
|
||||
const envPaths = [
|
||||
path.resolve(__dirname, '../..', '.env'), // Two levels up (inventory/.env)
|
||||
path.resolve(__dirname, '..', '.env'), // One level up (inventory-server/.env)
|
||||
path.resolve(__dirname, '.env'), // Same directory
|
||||
'/var/www/html/inventory/.env' // Server absolute path
|
||||
];
|
||||
|
||||
let envLoaded = false;
|
||||
for (const envPath of envPaths) {
|
||||
if (fs.existsSync(envPath)) {
|
||||
console.log(`Loading environment from: ${envPath}`);
|
||||
require('dotenv').config({ path: envPath });
|
||||
envLoaded = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!envLoaded) {
|
||||
console.warn('WARNING: Could not find .env file in any of the expected locations.');
|
||||
console.warn('Checked paths:', envPaths);
|
||||
}
|
||||
|
||||
// --- Database Setup ---
|
||||
// Make sure we have the required DB credentials
|
||||
if (!process.env.DB_HOST && !process.env.DATABASE_URL) {
|
||||
console.error('WARNING: Neither DB_HOST nor DATABASE_URL environment variables found');
|
||||
}
|
||||
|
||||
// Only validate individual parameters if not using connection string
|
||||
if (!process.env.DATABASE_URL) {
|
||||
if (!process.env.DB_USER) console.error('WARNING: DB_USER environment variable is missing');
|
||||
if (!process.env.DB_NAME) console.error('WARNING: DB_NAME environment variable is missing');
|
||||
|
||||
// Password must be a string for PostgreSQL SCRAM authentication
|
||||
if (!process.env.DB_PASSWORD || typeof process.env.DB_PASSWORD !== 'string') {
|
||||
console.error('WARNING: DB_PASSWORD environment variable is missing or not a string');
|
||||
}
|
||||
}
|
||||
|
||||
// Configure database connection to match individual scripts
|
||||
let dbConfig;
|
||||
|
||||
// Check if a DATABASE_URL exists (common in production environments)
|
||||
if (process.env.DATABASE_URL && typeof process.env.DATABASE_URL === 'string') {
|
||||
console.log('Using DATABASE_URL for connection');
|
||||
dbConfig = {
|
||||
connectionString: process.env.DATABASE_URL,
|
||||
ssl: process.env.DB_SSL === 'true' ? { rejectUnauthorized: false } : false,
|
||||
// Add performance optimizations
|
||||
max: 10, // connection pool max size
|
||||
idleTimeoutMillis: 30000,
|
||||
connectionTimeoutMillis: 60000,
|
||||
// Set timeouts for long-running queries
|
||||
statement_timeout: 1800000, // 30 minutes
|
||||
query_timeout: 1800000 // 30 minutes
|
||||
};
|
||||
} else {
|
||||
// Use individual connection parameters
|
||||
dbConfig = {
|
||||
host: process.env.DB_HOST,
|
||||
user: process.env.DB_USER,
|
||||
password: process.env.DB_PASSWORD,
|
||||
database: process.env.DB_NAME,
|
||||
port: process.env.DB_PORT || 5432,
|
||||
ssl: process.env.DB_SSL === 'true',
|
||||
// Add performance optimizations
|
||||
max: 10, // connection pool max size
|
||||
idleTimeoutMillis: 30000,
|
||||
connectionTimeoutMillis: 60000,
|
||||
// Set timeouts for long-running queries
|
||||
statement_timeout: 1800000, // 30 minutes
|
||||
query_timeout: 1800000 // 30 minutes
|
||||
};
|
||||
}
|
||||
|
||||
// Try to load from utils DB module as a last resort
|
||||
try {
|
||||
if (!process.env.DB_HOST && !process.env.DATABASE_URL) {
|
||||
console.log('Attempting to load DB config from individual script modules...');
|
||||
const dbModule = require('./metrics-new/utils/db');
|
||||
if (dbModule && dbModule.dbConfig) {
|
||||
console.log('Found DB config in individual script module');
|
||||
dbConfig = {
|
||||
...dbModule.dbConfig,
|
||||
// Add performance optimizations if not present
|
||||
max: dbModule.dbConfig.max || 10,
|
||||
idleTimeoutMillis: dbModule.dbConfig.idleTimeoutMillis || 30000,
|
||||
connectionTimeoutMillis: dbModule.dbConfig.connectionTimeoutMillis || 60000,
|
||||
statement_timeout: 1800000,
|
||||
query_timeout: 1800000
|
||||
};
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn('Could not load DB config from individual script modules:', err.message);
|
||||
}
|
||||
|
||||
// Debug log connection info (without password)
|
||||
console.log('DB Connection Info:', {
|
||||
connectionString: dbConfig.connectionString ? 'PROVIDED' : undefined,
|
||||
host: dbConfig.host,
|
||||
user: dbConfig.user,
|
||||
database: dbConfig.database,
|
||||
port: dbConfig.port,
|
||||
ssl: dbConfig.ssl ? 'ENABLED' : 'DISABLED',
|
||||
password: (dbConfig.password || dbConfig.connectionString) ? '****' : 'MISSING' // Only show if credentials exist
|
||||
});
|
||||
|
||||
const pool = new Pool(dbConfig);
|
||||
|
||||
const getConnection = () => {
|
||||
return pool.connect();
|
||||
};
|
||||
|
||||
const closePool = () => {
|
||||
console.log("Closing database connection pool.");
|
||||
return pool.end();
|
||||
};
|
||||
|
||||
// --- Progress Utilities ---
|
||||
// Using functions directly instead of globals
|
||||
const progressUtils = require('./metrics-new/utils/progress'); // Assuming utils/progress.js exports these
|
||||
|
||||
// --- State & Cancellation ---
|
||||
let isCancelled = false;
|
||||
let currentStep = ''; // Track which step is running for cancellation message
|
||||
let overallStartTime = null;
|
||||
let mainTimeoutHandle = null;
|
||||
let stepTimeoutHandle = null;
|
||||
let combinedHistoryId = null; // ID for the combined history record
|
||||
|
||||
async function cancelCalculation(reason = 'cancelled by user') {
|
||||
if (isCancelled) return; // Prevent multiple cancellations
|
||||
isCancelled = true;
|
||||
console.log(`Calculation ${reason}. Attempting to cancel active step: ${currentStep}`);
|
||||
|
||||
// Clear timeouts
|
||||
if (mainTimeoutHandle) clearTimeout(mainTimeoutHandle);
|
||||
if (stepTimeoutHandle) clearTimeout(stepTimeoutHandle);
|
||||
|
||||
// Attempt to cancel the long-running query in Postgres
|
||||
let conn = null;
|
||||
try {
|
||||
console.log(`Attempting to cancel queries running longer than ${CANCEL_QUERY_AFTER_SECONDS} seconds...`);
|
||||
conn = await getConnection();
|
||||
const result = await conn.query(`
|
||||
SELECT pg_cancel_backend(pid)
|
||||
FROM pg_stat_activity
|
||||
WHERE query_start < now() - interval '${CANCEL_QUERY_AFTER_SECONDS} seconds'
|
||||
AND application_name = 'node-metrics-calculator' -- Match specific app name
|
||||
AND state = 'active' -- Only cancel active queries
|
||||
AND query NOT LIKE '%pg_cancel_backend%'
|
||||
AND pid <> pg_backend_pid(); -- Don't cancel self
|
||||
`);
|
||||
console.log(`Sent ${result.rowCount} cancellation signal(s).`);
|
||||
|
||||
// Update the combined history record to show cancellation
|
||||
if (combinedHistoryId) {
|
||||
const totalDuration = Math.round((Date.now() - overallStartTime) / 1000);
|
||||
await conn.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
status = 'cancelled'::calculation_status,
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1::integer,
|
||||
error_message = $2::text
|
||||
WHERE id = $3::integer;
|
||||
`, [totalDuration, `Calculation ${reason} during step: ${currentStep}`, combinedHistoryId]);
|
||||
console.log(`Updated combined history record ${combinedHistoryId} with cancellation status`);
|
||||
}
|
||||
|
||||
conn.release();
|
||||
} catch (err) {
|
||||
console.error('Error during database query cancellation:', err.message);
|
||||
if (conn) {
|
||||
try { conn.release(); } catch (e) { console.error("Error releasing cancellation connection", e); }
|
||||
}
|
||||
// Proceed with script termination attempt even if DB cancel fails
|
||||
} finally {
|
||||
// Update progress to show cancellation
|
||||
progressUtils.outputProgress({
|
||||
status: 'cancelled',
|
||||
operation: `Calculation ${reason} during step: ${currentStep}`,
|
||||
current: 0, // Reset progress indicators
|
||||
total: 100,
|
||||
elapsed: overallStartTime ? progressUtils.formatElapsedTime(overallStartTime) : 'N/A',
|
||||
remaining: null,
|
||||
rate: 0,
|
||||
percentage: '0', // Or keep last known percentage?
|
||||
timing: {
|
||||
start_time: overallStartTime ? new Date(overallStartTime).toISOString() : 'N/A',
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: overallStartTime ? Math.round((Date.now() - overallStartTime) / 1000) : 0
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Note: We don't force exit here anymore. We let the main function's error
|
||||
// handling catch the cancellation error thrown by executeSqlStep or the timeout.
|
||||
return {
|
||||
success: true, // Indicates cancellation was initiated
|
||||
message: `Calculation ${reason}`
|
||||
};
|
||||
}
|
||||
|
||||
// Handle SIGINT (Ctrl+C) and SIGTERM (kill) signals
|
||||
process.on('SIGINT', () => {
|
||||
console.log('\nReceived SIGINT (Ctrl+C).');
|
||||
cancelCalculation('cancelled by user (SIGINT)');
|
||||
// Give cancellation a moment to propagate before force-exiting if needed
|
||||
setTimeout(() => process.exit(1), 2000);
|
||||
});
|
||||
process.on('SIGTERM', () => {
|
||||
console.log('Received SIGTERM.');
|
||||
cancelCalculation('cancelled by system (SIGTERM)');
|
||||
// Give cancellation a moment to propagate before force-exiting if needed
|
||||
setTimeout(() => process.exit(1), 2000);
|
||||
});
|
||||
|
||||
// Add error handlers for uncaught exceptions/rejections
|
||||
process.on('uncaughtException', (error) => {
|
||||
console.error('Uncaught Exception:', error);
|
||||
// Attempt graceful shutdown/logging if possible, then exit
|
||||
cancelCalculation('failed due to uncaught exception').finally(() => {
|
||||
closePool().finally(() => process.exit(1));
|
||||
});
|
||||
});
|
||||
|
||||
process.on('unhandledRejection', (reason, promise) => {
|
||||
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
||||
// Attempt graceful shutdown/logging if possible, then exit
|
||||
cancelCalculation('failed due to unhandled rejection').finally(() => {
|
||||
closePool().finally(() => process.exit(1));
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
// --- Core Logic ---
|
||||
|
||||
/**
|
||||
* Ensures all products have entries in the settings_product table
|
||||
* This is important after importing new products
|
||||
*/
|
||||
async function syncSettingsProductTable() {
|
||||
let conn = null;
|
||||
try {
|
||||
currentStep = 'Syncing settings_product table';
|
||||
progressUtils.outputProgress({
|
||||
operation: 'Syncing product settings',
|
||||
message: 'Ensuring all products have settings entries'
|
||||
});
|
||||
|
||||
conn = await getConnection();
|
||||
|
||||
// Get counts before sync
|
||||
const beforeCounts = await conn.query(`
|
||||
SELECT
|
||||
(SELECT COUNT(*) FROM products) AS products_count,
|
||||
(SELECT COUNT(*) FROM settings_product) AS settings_count
|
||||
`);
|
||||
|
||||
const productsCount = parseInt(beforeCounts.rows[0].products_count);
|
||||
const settingsCount = parseInt(beforeCounts.rows[0].settings_count);
|
||||
|
||||
progressUtils.outputProgress({
|
||||
operation: 'Settings product sync',
|
||||
message: `Found ${productsCount} products and ${settingsCount} settings entries`
|
||||
});
|
||||
|
||||
// Insert missing product settings
|
||||
const result = await conn.query(`
|
||||
INSERT INTO settings_product (
|
||||
pid,
|
||||
lead_time_days,
|
||||
days_of_stock,
|
||||
safety_stock,
|
||||
forecast_method,
|
||||
exclude_from_forecast
|
||||
)
|
||||
SELECT
|
||||
p.pid,
|
||||
CAST(NULL AS INTEGER),
|
||||
CAST(NULL AS INTEGER),
|
||||
COALESCE((SELECT setting_value::int FROM settings_global WHERE setting_key = 'default_safety_stock_units'), 0),
|
||||
CAST(NULL AS VARCHAR),
|
||||
FALSE
|
||||
FROM
|
||||
public.products p
|
||||
WHERE
|
||||
NOT EXISTS (
|
||||
SELECT 1 FROM settings_product sp WHERE sp.pid = p.pid
|
||||
)
|
||||
ON CONFLICT (pid) DO NOTHING
|
||||
`);
|
||||
|
||||
// Get counts after sync
|
||||
const afterCounts = await conn.query(`
|
||||
SELECT COUNT(*) AS settings_count FROM settings_product
|
||||
`);
|
||||
|
||||
const newSettingsCount = parseInt(afterCounts.rows[0].settings_count);
|
||||
const addedCount = newSettingsCount - settingsCount;
|
||||
|
||||
progressUtils.outputProgress({
|
||||
operation: 'Settings product sync',
|
||||
message: `Added ${addedCount} new settings entries. Now have ${newSettingsCount} total entries.`,
|
||||
status: 'complete'
|
||||
});
|
||||
|
||||
conn.release();
|
||||
return addedCount;
|
||||
} catch (err) {
|
||||
progressUtils.outputProgress({
|
||||
status: 'error',
|
||||
operation: 'Settings product sync failed',
|
||||
error: err.message
|
||||
});
|
||||
if (conn) conn.release();
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a single SQL calculation step.
|
||||
* @param {object} config - Configuration for the step.
|
||||
* @param {string} config.name - User-friendly name of the step.
|
||||
* @param {string} config.sqlFile - Path to the SQL file.
|
||||
* @param {string} config.historyType - Type identifier for calculate_history.
|
||||
* @param {string} config.statusModule - Module name for calculate_status.
|
||||
* @param {object} progress - Progress utility functions.
|
||||
* @returns {Promise<{success: boolean, message: string, duration: number, rowsAffected: number}>}
|
||||
*/
|
||||
async function executeSqlStep(config, progress) {
|
||||
if (isCancelled) throw new Error(`Calculation skipped step ${config.name} due to prior cancellation.`);
|
||||
|
||||
currentStep = config.name; // Update global state
|
||||
console.log(`\n--- Starting Step: ${config.name} ---`);
|
||||
const stepStartTime = Date.now();
|
||||
let connection = null;
|
||||
let rowsAffected = 0; // Track rows affected by this step
|
||||
|
||||
// Set timeout for this specific step
|
||||
if (stepTimeoutHandle) clearTimeout(stepTimeoutHandle); // Clear previous step's timeout
|
||||
stepTimeoutHandle = setTimeout(() => {
|
||||
// Don't exit directly, throw an error to be caught by the main loop
|
||||
const timeoutError = new Error(`Step "${config.name}" timed out after ${MAX_EXECUTION_TIME_PER_STEP / 1000} seconds.`);
|
||||
cancelCalculation(`timed out during step: ${config.name}`); // Initiate cancellation process
|
||||
// The error will likely be thrown before cancelCalculation fully completes,
|
||||
// but cancelCalculation attempts to stop the query.
|
||||
// The main catch block will handle cleanup.
|
||||
}, MAX_EXECUTION_TIME_PER_STEP);
|
||||
|
||||
|
||||
try {
|
||||
// 1. Read SQL File
|
||||
const sqlFilePath = path.resolve(__dirname, config.sqlFile);
|
||||
if (!fs.existsSync(sqlFilePath)) {
|
||||
throw new Error(`SQL file not found: ${sqlFilePath}`);
|
||||
}
|
||||
const sqlQuery = fs.readFileSync(sqlFilePath, 'utf8');
|
||||
console.log(`Read SQL file: ${config.sqlFile}`);
|
||||
|
||||
// Check for potential parameter references that might cause issues
|
||||
const parameterMatches = sqlQuery.match(/\$\d+(?!\:\:)/g);
|
||||
if (parameterMatches && parameterMatches.length > 0) {
|
||||
console.warn(`WARNING: Found ${parameterMatches.length} untyped parameters in SQL: ${parameterMatches.slice(0, 5).join(', ')}${parameterMatches.length > 5 ? '...' : ''}`);
|
||||
console.warn('These might cause "could not determine data type of parameter" errors.');
|
||||
}
|
||||
|
||||
// 2. Get Database Connection
|
||||
connection = await getConnection();
|
||||
console.log("Database connection acquired.");
|
||||
|
||||
// 3. Ensure calculate_status table exists
|
||||
await connection.query(`
|
||||
CREATE TABLE IF NOT EXISTS calculate_status (
|
||||
module_name TEXT PRIMARY KEY,
|
||||
last_calculation_timestamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
`);
|
||||
|
||||
// 4. Initial Progress Update
|
||||
progress.outputProgress({
|
||||
status: 'running',
|
||||
operation: `Starting: ${config.name}`,
|
||||
current: 0, total: 100,
|
||||
elapsed: progress.formatElapsedTime(stepStartTime),
|
||||
remaining: 'Calculating...', rate: 0, percentage: '0',
|
||||
timing: {
|
||||
start_time: new Date(stepStartTime).toISOString(),
|
||||
step_start_ms: stepStartTime
|
||||
}
|
||||
});
|
||||
|
||||
// 5. Execute the Main SQL Query
|
||||
progress.outputProgress({
|
||||
status: 'running',
|
||||
operation: `Executing SQL: ${config.name}`,
|
||||
current: 25, total: 100,
|
||||
elapsed: progress.formatElapsedTime(stepStartTime),
|
||||
remaining: 'Executing query...', rate: 0, percentage: '25',
|
||||
timing: {
|
||||
start_time: new Date(stepStartTime).toISOString(),
|
||||
step_start_ms: stepStartTime
|
||||
}
|
||||
});
|
||||
console.log(`Executing SQL for ${config.name}...`);
|
||||
|
||||
try {
|
||||
// Try executing exactly as individual scripts do
|
||||
const result = await connection.query(sqlQuery);
|
||||
|
||||
// Try to extract row count from result
|
||||
if (result && result.rowCount !== undefined) {
|
||||
rowsAffected = result.rowCount;
|
||||
} else if (Array.isArray(result) && result[0] && result[0].rowCount !== undefined) {
|
||||
rowsAffected = result[0].rowCount;
|
||||
}
|
||||
|
||||
// Check if the query returned a result set with row count info
|
||||
if (result && result.rows && result.rows.length > 0 && result.rows[0].rows_processed) {
|
||||
rowsAffected = parseInt(result.rows[0].rows_processed) || rowsAffected;
|
||||
console.log(`SQL returned metrics: ${JSON.stringify(result.rows[0])}`);
|
||||
} else if (Array.isArray(result) && result[0] && result[0].rows && result[0].rows[0] && result[0].rows[0].rows_processed) {
|
||||
rowsAffected = parseInt(result[0].rows[0].rows_processed) || rowsAffected;
|
||||
console.log(`SQL returned metrics: ${JSON.stringify(result[0].rows[0])}`);
|
||||
}
|
||||
|
||||
console.log(`SQL affected ${rowsAffected} rows`);
|
||||
} catch (sqlError) {
|
||||
if (sqlError.message.includes('could not determine data type of parameter')) {
|
||||
console.log('Simple query failed with parameter type error, trying alternative method...');
|
||||
try {
|
||||
// Execute with explicit text mode to avoid parameter confusion
|
||||
await connection.query({
|
||||
text: sqlQuery,
|
||||
rowMode: 'text'
|
||||
});
|
||||
} catch (altError) {
|
||||
console.error('Alternative execution method also failed:', altError.message);
|
||||
throw altError; // Re-throw the alternative error
|
||||
}
|
||||
} else {
|
||||
console.error('SQL Execution Error:', sqlError.message);
|
||||
if (sqlError.position) {
|
||||
// If the error has a position, try to show the relevant part of the SQL query
|
||||
const position = parseInt(sqlError.position, 10);
|
||||
const startPos = Math.max(0, position - 100);
|
||||
const endPos = Math.min(sqlQuery.length, position + 100);
|
||||
console.error('SQL Error Context:');
|
||||
console.error('...' + sqlQuery.substring(startPos, position) + ' [ERROR HERE] ' + sqlQuery.substring(position, endPos) + '...');
|
||||
}
|
||||
throw sqlError; // Re-throw to be caught by the main try/catch
|
||||
}
|
||||
}
|
||||
|
||||
// Check for cancellation immediately after query finishes
|
||||
if (isCancelled) throw new Error(`Calculation cancelled during SQL execution for ${config.name}`);
|
||||
|
||||
console.log(`SQL execution finished for ${config.name}.`);
|
||||
|
||||
// 6. Update Status table only
|
||||
await connection.query(`
|
||||
INSERT INTO calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES ($1::text, NOW())
|
||||
ON CONFLICT (module_name) DO UPDATE
|
||||
SET last_calculation_timestamp = EXCLUDED.last_calculation_timestamp;
|
||||
`, [config.statusModule]);
|
||||
|
||||
const stepDuration = Math.round((Date.now() - stepStartTime) / 1000);
|
||||
|
||||
// 7. Final Progress Update for Step
|
||||
progress.outputProgress({
|
||||
status: 'complete',
|
||||
operation: `Completed: ${config.name}`,
|
||||
current: 100, total: 100,
|
||||
elapsed: progress.formatElapsedTime(stepStartTime),
|
||||
remaining: '0s', rate: 0, percentage: '100',
|
||||
timing: {
|
||||
start_time: new Date(stepStartTime).toISOString(),
|
||||
end_time: new Date().toISOString(),
|
||||
elapsed_seconds: stepDuration
|
||||
}
|
||||
});
|
||||
console.log(`--- Finished Step: ${config.name} (Duration: ${progress.formatElapsedTime(stepStartTime)}) ---`);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: `${config.name} completed successfully`,
|
||||
duration: stepDuration,
|
||||
rowsAffected: rowsAffected
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
clearTimeout(stepTimeoutHandle); // Clear timeout on error
|
||||
const errorEndTime = Date.now();
|
||||
const errorDuration = Math.round((errorEndTime - stepStartTime) / 1000);
|
||||
const finalStatus = isCancelled ? 'cancelled' : 'failed';
|
||||
const errorMessage = error.message || 'Unknown error';
|
||||
|
||||
console.error(`--- ERROR in Step: ${config.name} ---`);
|
||||
console.error(error); // Log the full error
|
||||
console.error(`------------------------------------`);
|
||||
|
||||
// Update progress file with error/cancellation
|
||||
progress.outputProgress({
|
||||
status: finalStatus,
|
||||
operation: `Error in ${config.name}: ${errorMessage.split('\n')[0]}`, // Show first line of error
|
||||
current: 50, total: 100, // Indicate partial completion
|
||||
elapsed: progress.formatElapsedTime(stepStartTime),
|
||||
remaining: null, rate: 0, percentage: '50',
|
||||
timing: {
|
||||
start_time: new Date(stepStartTime).toISOString(),
|
||||
end_time: new Date(errorEndTime).toISOString(),
|
||||
elapsed_seconds: errorDuration
|
||||
}
|
||||
});
|
||||
|
||||
// Rethrow the error to be caught by the main runCalculations function
|
||||
throw error; // Add context if needed: new Error(`Step ${config.name} failed: ${errorMessage}`)
|
||||
|
||||
} finally {
|
||||
clearTimeout(stepTimeoutHandle); // Ensure timeout is cleared
|
||||
currentStep = ''; // Reset current step
|
||||
if (connection) {
|
||||
try {
|
||||
await connection.release();
|
||||
console.log("Database connection released.");
|
||||
} catch (releaseError) {
|
||||
console.error("Error releasing database connection:", releaseError);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Main function to run all calculation steps sequentially.
|
||||
*/
|
||||
async function runAllCalculations() {
|
||||
overallStartTime = Date.now();
|
||||
isCancelled = false; // Reset cancellation flag at start
|
||||
|
||||
// Overall timeout for the entire script
|
||||
mainTimeoutHandle = setTimeout(() => {
|
||||
console.error(`--- OVERALL TIMEOUT REACHED (${MAX_EXECUTION_TIME_TOTAL / 1000}s) ---`);
|
||||
cancelCalculation(`overall timeout reached`);
|
||||
// The process should exit via the unhandled rejection/exception handlers
|
||||
// or the SIGTERM/SIGINT handlers after cancellation attempt.
|
||||
}, MAX_EXECUTION_TIME_TOTAL);
|
||||
|
||||
const steps = [
|
||||
{
|
||||
run: RUN_DAILY_SNAPSHOTS,
|
||||
name: 'Daily Snapshots Update',
|
||||
sqlFile: 'metrics-new/update_daily_snapshots.sql',
|
||||
historyType: 'daily_snapshots',
|
||||
statusModule: 'daily_snapshots'
|
||||
},
|
||||
{
|
||||
run: RUN_PRODUCT_METRICS,
|
||||
name: 'Product Metrics Update',
|
||||
sqlFile: 'metrics-new/update_product_metrics.sql', // ASSUMING the initial population is now part of a regular update
|
||||
historyType: 'product_metrics',
|
||||
statusModule: 'product_metrics'
|
||||
},
|
||||
{
|
||||
run: RUN_PERIODIC_METRICS,
|
||||
name: 'Periodic Metrics Update',
|
||||
sqlFile: 'metrics-new/update_periodic_metrics.sql',
|
||||
historyType: 'periodic_metrics',
|
||||
statusModule: 'periodic_metrics'
|
||||
},
|
||||
{
|
||||
run: RUN_BRAND_METRICS,
|
||||
name: 'Brand Metrics Update',
|
||||
sqlFile: 'metrics-new/calculate_brand_metrics.sql',
|
||||
historyType: 'brand_metrics',
|
||||
statusModule: 'brand_metrics'
|
||||
},
|
||||
{
|
||||
run: RUN_VENDOR_METRICS,
|
||||
name: 'Vendor Metrics Update',
|
||||
sqlFile: 'metrics-new/calculate_vendor_metrics.sql',
|
||||
historyType: 'vendor_metrics',
|
||||
statusModule: 'vendor_metrics'
|
||||
},
|
||||
{
|
||||
run: RUN_CATEGORY_METRICS,
|
||||
name: 'Category Metrics Update',
|
||||
sqlFile: 'metrics-new/calculate_category_metrics.sql',
|
||||
historyType: 'category_metrics',
|
||||
statusModule: 'category_metrics'
|
||||
}
|
||||
];
|
||||
|
||||
// Build a list of steps we will actually run
|
||||
const stepsToRun = steps.filter(step => step.run);
|
||||
const stepNames = stepsToRun.map(step => step.name);
|
||||
const sqlFiles = stepsToRun.map(step => step.sqlFile);
|
||||
|
||||
let overallSuccess = true;
|
||||
let connection = null;
|
||||
|
||||
try {
|
||||
// Create a single history record before starting all calculations
|
||||
try {
|
||||
connection = await getConnection();
|
||||
|
||||
// Ensure calculate_history table exists (basic structure)
|
||||
await connection.query(`
|
||||
CREATE TABLE IF NOT EXISTS calculate_history (
|
||||
id SERIAL PRIMARY KEY,
|
||||
start_time TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
end_time TIMESTAMP WITH TIME ZONE,
|
||||
duration_seconds INTEGER,
|
||||
status TEXT, -- Will be altered to enum if needed below
|
||||
error_message TEXT,
|
||||
additional_info JSONB
|
||||
);
|
||||
`);
|
||||
|
||||
// Ensure the calculation_status enum type exists if needed
|
||||
await connection.query(`
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'calculation_status') THEN
|
||||
CREATE TYPE calculation_status AS ENUM ('running', 'completed', 'failed', 'cancelled');
|
||||
|
||||
-- If needed, alter the existing table to use the enum
|
||||
ALTER TABLE calculate_history
|
||||
ALTER COLUMN status TYPE calculation_status
|
||||
USING status::calculation_status;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
`);
|
||||
|
||||
// Mark any previous running combined calculations as cancelled
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
status = 'cancelled'::calculation_status,
|
||||
end_time = NOW(),
|
||||
duration_seconds = EXTRACT(EPOCH FROM (NOW() - start_time))::INTEGER,
|
||||
error_message = 'Previous calculation was not completed properly or was superseded.'
|
||||
WHERE status = 'running'::calculation_status AND additional_info->>'type' = 'combined_metrics';
|
||||
`);
|
||||
|
||||
// Create a single history record for this run
|
||||
const historyResult = await connection.query(`
|
||||
INSERT INTO calculate_history (status, additional_info)
|
||||
VALUES ('running'::calculation_status, jsonb_build_object(
|
||||
'type', 'combined_metrics',
|
||||
'steps', $1::jsonb,
|
||||
'sql_files', $2::jsonb
|
||||
))
|
||||
RETURNING id;
|
||||
`, [JSON.stringify(stepNames), JSON.stringify(sqlFiles)]);
|
||||
|
||||
combinedHistoryId = historyResult.rows[0].id;
|
||||
console.log(`Created combined history record ID: ${combinedHistoryId}`);
|
||||
|
||||
// Get initial counts for tracking
|
||||
const productCount = await connection.query('SELECT COUNT(*) as count FROM products');
|
||||
const totalProducts = parseInt(productCount.rows[0].count);
|
||||
|
||||
// Update history with initial counts
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET additional_info = additional_info || jsonb_build_object('total_products', $1::integer)
|
||||
WHERE id = $2
|
||||
`, [totalProducts, combinedHistoryId]);
|
||||
|
||||
connection.release();
|
||||
} catch (historyError) {
|
||||
console.error('Error creating combined history record:', historyError);
|
||||
if (connection) connection.release();
|
||||
// Continue without history tracking if it fails
|
||||
}
|
||||
|
||||
// First, sync the settings_product table to ensure all products have entries
|
||||
progressUtils.outputProgress({
|
||||
operation: 'Starting metrics calculation',
|
||||
message: 'Preparing product settings...'
|
||||
});
|
||||
|
||||
try {
|
||||
const addedCount = await syncSettingsProductTable();
|
||||
|
||||
progressUtils.outputProgress({
|
||||
operation: 'Preparation complete',
|
||||
message: `Added ${addedCount} missing product settings entries`,
|
||||
status: 'complete'
|
||||
});
|
||||
} catch (syncError) {
|
||||
console.error('Warning: Failed to sync product settings, continuing with metrics calculations:', syncError);
|
||||
// Don't fail the entire process if settings sync fails
|
||||
}
|
||||
|
||||
// Track completed steps
|
||||
const completedSteps = [];
|
||||
const stepTimings = {};
|
||||
const stepRowCounts = {};
|
||||
let currentStepIndex = 0;
|
||||
|
||||
// Now run the calculation steps
|
||||
for (const step of stepsToRun) {
|
||||
if (isCancelled) {
|
||||
console.log(`Skipping step "${step.name}" due to cancellation.`);
|
||||
overallSuccess = false; // Mark as not fully successful if steps are skipped due to cancel
|
||||
continue; // Skip to next step
|
||||
}
|
||||
|
||||
currentStepIndex++;
|
||||
|
||||
// Update overall progress
|
||||
progressUtils.outputProgress({
|
||||
status: 'running',
|
||||
operation: 'Running calculations',
|
||||
message: `Step ${currentStepIndex} of ${stepsToRun.length}: ${step.name}`,
|
||||
current: currentStepIndex - 1,
|
||||
total: stepsToRun.length,
|
||||
elapsed: progressUtils.formatElapsedTime(overallStartTime),
|
||||
remaining: progressUtils.estimateRemaining(overallStartTime, currentStepIndex - 1, stepsToRun.length),
|
||||
percentage: Math.round(((currentStepIndex - 1) / stepsToRun.length) * 100).toString(),
|
||||
timing: {
|
||||
overall_start_time: new Date(overallStartTime).toISOString(),
|
||||
current_step: step.name,
|
||||
completed_steps: completedSteps.length
|
||||
}
|
||||
});
|
||||
|
||||
// Pass the progress utilities to the step executor
|
||||
const result = await executeSqlStep(step, progressUtils);
|
||||
|
||||
if (result.success) {
|
||||
completedSteps.push({
|
||||
name: step.name,
|
||||
duration: result.duration,
|
||||
status: 'completed',
|
||||
rowsAffected: result.rowsAffected
|
||||
});
|
||||
stepTimings[step.name] = result.duration;
|
||||
stepRowCounts[step.name] = result.rowsAffected;
|
||||
}
|
||||
}
|
||||
|
||||
// If we finished naturally (no errors thrown out)
|
||||
clearTimeout(mainTimeoutHandle); // Clear the main timeout
|
||||
|
||||
// Update the combined history record on successful completion
|
||||
if (combinedHistoryId) {
|
||||
try {
|
||||
connection = await getConnection();
|
||||
const totalDuration = Math.round((Date.now() - overallStartTime) / 1000);
|
||||
|
||||
// Get final processed counts
|
||||
const processedCounts = await connection.query(`
|
||||
SELECT
|
||||
(SELECT COUNT(*) FROM product_metrics WHERE last_calculated >= $1) as processed_products
|
||||
`, [new Date(overallStartTime)]);
|
||||
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1::integer,
|
||||
status = $2::calculation_status,
|
||||
additional_info = additional_info || jsonb_build_object(
|
||||
'processed_products', $3::integer,
|
||||
'completed_steps', $4::jsonb,
|
||||
'step_timings', $5::jsonb,
|
||||
'step_row_counts', $6::jsonb
|
||||
)
|
||||
WHERE id = $7::integer;
|
||||
`, [
|
||||
totalDuration,
|
||||
isCancelled ? 'cancelled' : 'completed',
|
||||
processedCounts.rows[0].processed_products,
|
||||
JSON.stringify(completedSteps),
|
||||
JSON.stringify(stepTimings),
|
||||
JSON.stringify(stepRowCounts),
|
||||
combinedHistoryId
|
||||
]);
|
||||
|
||||
connection.release();
|
||||
} catch (historyError) {
|
||||
console.error('Error updating combined history record on completion:', historyError);
|
||||
if (connection) connection.release();
|
||||
}
|
||||
}
|
||||
|
||||
if (isCancelled) {
|
||||
console.log("\n--- Calculation finished with cancellation ---");
|
||||
overallSuccess = false;
|
||||
} else {
|
||||
console.log("\n--- All enabled calculations finished successfully ---");
|
||||
|
||||
// Send final completion progress
|
||||
progressUtils.outputProgress({
|
||||
status: 'complete',
|
||||
operation: 'All calculations completed',
|
||||
message: `Successfully completed ${completedSteps.length} of ${stepsToRun.length} steps`,
|
||||
current: stepsToRun.length,
|
||||
total: stepsToRun.length,
|
||||
elapsed: progressUtils.formatElapsedTime(overallStartTime),
|
||||
remaining: '0s',
|
||||
percentage: '100',
|
||||
timing: {
|
||||
overall_start_time: new Date(overallStartTime).toISOString(),
|
||||
overall_end_time: new Date().toISOString(),
|
||||
total_duration_seconds: Math.round((Date.now() - overallStartTime) / 1000),
|
||||
step_timings: stepTimings,
|
||||
completed_steps: completedSteps.length
|
||||
}
|
||||
});
|
||||
|
||||
progressUtils.clearProgress(); // Clear progress only on full success
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
clearTimeout(mainTimeoutHandle); // Clear the main timeout
|
||||
console.error("\n--- SCRIPT EXECUTION FAILED ---");
|
||||
// Error details were already logged by executeSqlStep or global handlers
|
||||
overallSuccess = false;
|
||||
|
||||
// Update the combined history record on error
|
||||
if (combinedHistoryId) {
|
||||
try {
|
||||
connection = await getConnection();
|
||||
const totalDuration = Math.round((Date.now() - overallStartTime) / 1000);
|
||||
|
||||
await connection.query(`
|
||||
UPDATE calculate_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = $1::integer,
|
||||
status = $2::calculation_status,
|
||||
error_message = $3::text
|
||||
WHERE id = $4::integer;
|
||||
`, [
|
||||
totalDuration,
|
||||
isCancelled ? 'cancelled' : 'failed',
|
||||
error.message.substring(0, 1000),
|
||||
combinedHistoryId
|
||||
]);
|
||||
|
||||
connection.release();
|
||||
} catch (historyError) {
|
||||
console.error('Error updating combined history record on error:', historyError);
|
||||
if (connection) connection.release();
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
await closePool();
|
||||
console.log(`Total execution time: ${progressUtils.formatElapsedTime(overallStartTime)}`);
|
||||
process.exit(overallSuccess ? 0 : 1);
|
||||
}
|
||||
}
|
||||
|
||||
// --- Script Execution ---
|
||||
if (require.main === module) {
|
||||
runAllCalculations();
|
||||
} else {
|
||||
// Export functions if needed as a module (e.g., for testing or API)
|
||||
module.exports = {
|
||||
runAllCalculations,
|
||||
cancelCalculation,
|
||||
syncSettingsProductTable,
|
||||
// Expose individual steps if useful, wrapping them slightly
|
||||
runDailySnapshots: () => executeSqlStep({ name: 'Daily Snapshots Update', sqlFile: 'update_daily_snapshots.sql', historyType: 'daily_snapshots', statusModule: 'daily_snapshots' }, progressUtils),
|
||||
runProductMetrics: () => executeSqlStep({ name: 'Product Metrics Update', sqlFile: 'update_product_metrics.sql', historyType: 'product_metrics', statusModule: 'product_metrics' }, progressUtils),
|
||||
runPeriodicMetrics: () => executeSqlStep({ name: 'Periodic Metrics Update', sqlFile: 'update_periodic_metrics.sql', historyType: 'periodic_metrics', statusModule: 'periodic_metrics' }, progressUtils),
|
||||
runBrandMetrics: () => executeSqlStep({ name: 'Brand Metrics Update', sqlFile: 'calculate_brand_metrics.sql', historyType: 'brand_metrics', statusModule: 'brand_metrics' }, progressUtils),
|
||||
runVendorMetrics: () => executeSqlStep({ name: 'Vendor Metrics Update', sqlFile: 'calculate_vendor_metrics.sql', historyType: 'vendor_metrics', statusModule: 'vendor_metrics' }, progressUtils),
|
||||
runCategoryMetrics: () => executeSqlStep({ name: 'Category Metrics Update', sqlFile: 'calculate_category_metrics.sql', historyType: 'category_metrics', statusModule: 'category_metrics' }, progressUtils),
|
||||
getProgress: progressUtils.getProgress
|
||||
};
|
||||
}
|
||||
@@ -14,7 +14,15 @@ function outputProgress(data) {
|
||||
function runScript(scriptPath) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const child = spawn('node', [scriptPath], {
|
||||
stdio: ['inherit', 'pipe', 'pipe']
|
||||
stdio: ['inherit', 'pipe', 'pipe'],
|
||||
env: {
|
||||
...process.env,
|
||||
PGHOST: process.env.DB_HOST,
|
||||
PGUSER: process.env.DB_USER,
|
||||
PGPASSWORD: process.env.DB_PASSWORD,
|
||||
PGDATABASE: process.env.DB_NAME,
|
||||
PGPORT: process.env.DB_PORT || '5432'
|
||||
}
|
||||
});
|
||||
|
||||
let output = '';
|
||||
@@ -80,7 +88,7 @@ async function fullReset() {
|
||||
operation: 'Starting metrics calculation',
|
||||
message: 'Step 3/3: Calculating metrics...'
|
||||
});
|
||||
await runScript(path.join(__dirname, 'calculate-metrics.js'));
|
||||
await runScript(path.join(__dirname, 'calculate-metrics-new.js'));
|
||||
|
||||
// Final completion message
|
||||
outputProgress({
|
||||
|
||||
@@ -68,7 +68,7 @@ async function fullUpdate() {
|
||||
operation: 'Starting metrics calculation',
|
||||
message: 'Step 2/2: Calculating metrics...'
|
||||
});
|
||||
await runScript(path.join(__dirname, 'calculate-metrics.js'));
|
||||
await runScript(path.join(__dirname, 'calculate-metrics-new.js'));
|
||||
outputProgress({
|
||||
status: 'complete',
|
||||
operation: 'Metrics step complete',
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
const dotenv = require("dotenv");
|
||||
const path = require("path");
|
||||
const { outputProgress, formatElapsedTime } = require('./metrics/utils/progress');
|
||||
const { outputProgress, formatElapsedTime } = require('./metrics-new/utils/progress');
|
||||
const { setupConnections, closeConnections } = require('./import/utils');
|
||||
const importCategories = require('./import/categories');
|
||||
const { importProducts } = require('./import/products');
|
||||
@@ -19,7 +19,6 @@ const IMPORT_PURCHASE_ORDERS = true;
|
||||
const INCREMENTAL_UPDATE = process.env.INCREMENTAL_UPDATE !== 'false'; // Default to true unless explicitly set to false
|
||||
|
||||
// SSH configuration
|
||||
// In import-from-prod.js
|
||||
const sshConfig = {
|
||||
ssh: {
|
||||
host: process.env.PROD_SSH_HOST,
|
||||
@@ -31,29 +30,25 @@ const sshConfig = {
|
||||
compress: true, // Enable SSH compression
|
||||
},
|
||||
prodDbConfig: {
|
||||
// MySQL config for production
|
||||
host: process.env.PROD_DB_HOST || "localhost",
|
||||
user: process.env.PROD_DB_USER,
|
||||
password: process.env.PROD_DB_PASSWORD,
|
||||
database: process.env.PROD_DB_NAME,
|
||||
port: process.env.PROD_DB_PORT || 3306,
|
||||
timezone: 'Z',
|
||||
timezone: '-05:00', // Production DB always stores times in EST (UTC-5) regardless of DST
|
||||
},
|
||||
localDbConfig: {
|
||||
// PostgreSQL config for local
|
||||
host: process.env.DB_HOST,
|
||||
user: process.env.DB_USER,
|
||||
password: process.env.DB_PASSWORD,
|
||||
database: process.env.DB_NAME,
|
||||
multipleStatements: true,
|
||||
waitForConnections: true,
|
||||
connectionLimit: 10,
|
||||
queueLimit: 0,
|
||||
namedPlaceholders: true,
|
||||
connectTimeout: 60000,
|
||||
enableKeepAlive: true,
|
||||
keepAliveInitialDelay: 10000,
|
||||
compress: true,
|
||||
timezone: 'Z',
|
||||
stringifyObjects: false,
|
||||
port: process.env.DB_PORT || 5432,
|
||||
ssl: process.env.DB_SSL === 'true',
|
||||
connectionTimeoutMillis: 60000,
|
||||
idleTimeoutMillis: 30000,
|
||||
max: 10 // connection pool max size
|
||||
}
|
||||
};
|
||||
|
||||
@@ -108,43 +103,44 @@ async function main() {
|
||||
SET
|
||||
status = 'cancelled',
|
||||
end_time = NOW(),
|
||||
duration_seconds = TIMESTAMPDIFF(SECOND, start_time, NOW()),
|
||||
duration_seconds = EXTRACT(EPOCH FROM (NOW() - start_time))::INTEGER,
|
||||
error_message = 'Previous import was not completed properly'
|
||||
WHERE status = 'running'
|
||||
`);
|
||||
|
||||
// Initialize sync_status table if it doesn't exist
|
||||
await localConnection.query(`
|
||||
CREATE TABLE IF NOT EXISTS sync_status (
|
||||
table_name VARCHAR(50) PRIMARY KEY,
|
||||
last_sync_timestamp TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
last_sync_id BIGINT,
|
||||
INDEX idx_last_sync (last_sync_timestamp)
|
||||
);
|
||||
`);
|
||||
|
||||
// Create import history record for the overall session
|
||||
const [historyResult] = await localConnection.query(`
|
||||
INSERT INTO import_history (
|
||||
table_name,
|
||||
start_time,
|
||||
is_incremental,
|
||||
status,
|
||||
additional_info
|
||||
) VALUES (
|
||||
'all_tables',
|
||||
NOW(),
|
||||
?,
|
||||
'running',
|
||||
JSON_OBJECT(
|
||||
'categories_enabled', ?,
|
||||
'products_enabled', ?,
|
||||
'orders_enabled', ?,
|
||||
'purchase_orders_enabled', ?
|
||||
)
|
||||
)
|
||||
`, [INCREMENTAL_UPDATE, IMPORT_CATEGORIES, IMPORT_PRODUCTS, IMPORT_ORDERS, IMPORT_PURCHASE_ORDERS]);
|
||||
importHistoryId = historyResult.insertId;
|
||||
try {
|
||||
const [historyResult] = await localConnection.query(`
|
||||
INSERT INTO import_history (
|
||||
table_name,
|
||||
start_time,
|
||||
is_incremental,
|
||||
status,
|
||||
additional_info
|
||||
) VALUES (
|
||||
'all_tables',
|
||||
NOW(),
|
||||
$1::boolean,
|
||||
'running',
|
||||
jsonb_build_object(
|
||||
'categories_enabled', $2::boolean,
|
||||
'products_enabled', $3::boolean,
|
||||
'orders_enabled', $4::boolean,
|
||||
'purchase_orders_enabled', $5::boolean
|
||||
)
|
||||
) RETURNING id
|
||||
`, [INCREMENTAL_UPDATE, IMPORT_CATEGORIES, IMPORT_PRODUCTS, IMPORT_ORDERS, IMPORT_PURCHASE_ORDERS]);
|
||||
importHistoryId = historyResult.rows[0].id;
|
||||
} catch (error) {
|
||||
console.error("Error creating import history record:", error);
|
||||
outputProgress({
|
||||
status: "error",
|
||||
operation: "Import process",
|
||||
message: "Failed to create import history record",
|
||||
error: error.message
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
|
||||
const results = {
|
||||
categories: null,
|
||||
@@ -155,42 +151,77 @@ async function main() {
|
||||
|
||||
let totalRecordsAdded = 0;
|
||||
let totalRecordsUpdated = 0;
|
||||
let totalRecordsDeleted = 0; // Add tracking for deleted records
|
||||
let totalRecordsSkipped = 0; // Track skipped/filtered records
|
||||
const stepTimings = {};
|
||||
|
||||
// Run each import based on constants
|
||||
if (IMPORT_CATEGORIES) {
|
||||
const stepStart = Date.now();
|
||||
results.categories = await importCategories(prodConnection, localConnection);
|
||||
stepTimings.categories = Math.round((Date.now() - stepStart) / 1000);
|
||||
|
||||
if (isImportCancelled) throw new Error("Import cancelled");
|
||||
completedSteps++;
|
||||
console.log('Categories import result:', results.categories);
|
||||
totalRecordsAdded += results.categories?.recordsAdded || 0;
|
||||
totalRecordsUpdated += results.categories?.recordsUpdated || 0;
|
||||
totalRecordsAdded += parseInt(results.categories?.recordsAdded || 0);
|
||||
totalRecordsUpdated += parseInt(results.categories?.recordsUpdated || 0);
|
||||
}
|
||||
|
||||
if (IMPORT_PRODUCTS) {
|
||||
const stepStart = Date.now();
|
||||
results.products = await importProducts(prodConnection, localConnection, INCREMENTAL_UPDATE);
|
||||
stepTimings.products = Math.round((Date.now() - stepStart) / 1000);
|
||||
|
||||
if (isImportCancelled) throw new Error("Import cancelled");
|
||||
completedSteps++;
|
||||
console.log('Products import result:', results.products);
|
||||
totalRecordsAdded += results.products?.recordsAdded || 0;
|
||||
totalRecordsUpdated += results.products?.recordsUpdated || 0;
|
||||
totalRecordsAdded += parseInt(results.products?.recordsAdded || 0);
|
||||
totalRecordsUpdated += parseInt(results.products?.recordsUpdated || 0);
|
||||
totalRecordsSkipped += parseInt(results.products?.skippedUnchanged || 0);
|
||||
}
|
||||
|
||||
if (IMPORT_ORDERS) {
|
||||
const stepStart = Date.now();
|
||||
results.orders = await importOrders(prodConnection, localConnection, INCREMENTAL_UPDATE);
|
||||
stepTimings.orders = Math.round((Date.now() - stepStart) / 1000);
|
||||
|
||||
if (isImportCancelled) throw new Error("Import cancelled");
|
||||
completedSteps++;
|
||||
console.log('Orders import result:', results.orders);
|
||||
totalRecordsAdded += results.orders?.recordsAdded || 0;
|
||||
totalRecordsUpdated += results.orders?.recordsUpdated || 0;
|
||||
totalRecordsAdded += parseInt(results.orders?.recordsAdded || 0);
|
||||
totalRecordsUpdated += parseInt(results.orders?.recordsUpdated || 0);
|
||||
totalRecordsSkipped += parseInt(results.orders?.totalSkipped || 0);
|
||||
}
|
||||
|
||||
if (IMPORT_PURCHASE_ORDERS) {
|
||||
results.purchaseOrders = await importPurchaseOrders(prodConnection, localConnection, INCREMENTAL_UPDATE);
|
||||
if (isImportCancelled) throw new Error("Import cancelled");
|
||||
completedSteps++;
|
||||
console.log('Purchase orders import result:', results.purchaseOrders);
|
||||
totalRecordsAdded += results.purchaseOrders?.recordsAdded || 0;
|
||||
totalRecordsUpdated += results.purchaseOrders?.recordsUpdated || 0;
|
||||
try {
|
||||
const stepStart = Date.now();
|
||||
results.purchaseOrders = await importPurchaseOrders(prodConnection, localConnection, INCREMENTAL_UPDATE);
|
||||
stepTimings.purchaseOrders = Math.round((Date.now() - stepStart) / 1000);
|
||||
|
||||
if (isImportCancelled) throw new Error("Import cancelled");
|
||||
completedSteps++;
|
||||
console.log('Purchase orders import result:', results.purchaseOrders);
|
||||
|
||||
// Handle potential error status
|
||||
if (results.purchaseOrders?.status === 'error') {
|
||||
console.error('Purchase orders import had an error:', results.purchaseOrders.error);
|
||||
} else {
|
||||
totalRecordsAdded += parseInt(results.purchaseOrders?.recordsAdded || 0);
|
||||
totalRecordsUpdated += parseInt(results.purchaseOrders?.recordsUpdated || 0);
|
||||
totalRecordsDeleted += parseInt(results.purchaseOrders?.recordsDeleted || 0);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error during purchase orders import:', error);
|
||||
// Continue with other imports, don't fail the whole process
|
||||
results.purchaseOrders = {
|
||||
status: 'error',
|
||||
error: error.message,
|
||||
recordsAdded: 0,
|
||||
recordsUpdated: 0
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
const endTime = Date.now();
|
||||
@@ -201,25 +232,28 @@ async function main() {
|
||||
UPDATE import_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = ?,
|
||||
records_added = ?,
|
||||
records_updated = ?,
|
||||
duration_seconds = $1,
|
||||
records_added = $2,
|
||||
records_updated = $3,
|
||||
status = 'completed',
|
||||
additional_info = JSON_OBJECT(
|
||||
'categories_enabled', ?,
|
||||
'products_enabled', ?,
|
||||
'orders_enabled', ?,
|
||||
'purchase_orders_enabled', ?,
|
||||
'categories_result', CAST(? AS JSON),
|
||||
'products_result', CAST(? AS JSON),
|
||||
'orders_result', CAST(? AS JSON),
|
||||
'purchase_orders_result', CAST(? AS JSON)
|
||||
additional_info = jsonb_build_object(
|
||||
'categories_enabled', $4::boolean,
|
||||
'products_enabled', $5::boolean,
|
||||
'orders_enabled', $6::boolean,
|
||||
'purchase_orders_enabled', $7::boolean,
|
||||
'categories_result', COALESCE($8::jsonb, 'null'::jsonb),
|
||||
'products_result', COALESCE($9::jsonb, 'null'::jsonb),
|
||||
'orders_result', COALESCE($10::jsonb, 'null'::jsonb),
|
||||
'purchase_orders_result', COALESCE($11::jsonb, 'null'::jsonb),
|
||||
'total_deleted', $12::integer,
|
||||
'total_skipped', $13::integer,
|
||||
'step_timings', $14::jsonb
|
||||
)
|
||||
WHERE id = ?
|
||||
WHERE id = $15
|
||||
`, [
|
||||
totalElapsedSeconds,
|
||||
totalRecordsAdded,
|
||||
totalRecordsUpdated,
|
||||
parseInt(totalRecordsAdded),
|
||||
parseInt(totalRecordsUpdated),
|
||||
IMPORT_CATEGORIES,
|
||||
IMPORT_PRODUCTS,
|
||||
IMPORT_ORDERS,
|
||||
@@ -228,6 +262,9 @@ async function main() {
|
||||
JSON.stringify(results.products),
|
||||
JSON.stringify(results.orders),
|
||||
JSON.stringify(results.purchaseOrders),
|
||||
totalRecordsDeleted,
|
||||
totalRecordsSkipped,
|
||||
JSON.stringify(stepTimings),
|
||||
importHistoryId
|
||||
]);
|
||||
|
||||
@@ -259,10 +296,10 @@ async function main() {
|
||||
UPDATE import_history
|
||||
SET
|
||||
end_time = NOW(),
|
||||
duration_seconds = ?,
|
||||
status = ?,
|
||||
error_message = ?
|
||||
WHERE id = ?
|
||||
duration_seconds = $1,
|
||||
status = $2,
|
||||
error_message = $3
|
||||
WHERE id = $4
|
||||
`, [totalElapsedSeconds, error.message === "Import cancelled" ? 'cancelled' : 'failed', error.message, importHistoryId]);
|
||||
}
|
||||
|
||||
@@ -288,16 +325,23 @@ async function main() {
|
||||
throw error;
|
||||
} finally {
|
||||
if (connections) {
|
||||
await closeConnections(connections);
|
||||
await closeConnections(connections).catch(err => {
|
||||
console.error("Error closing connections:", err);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run the import only if this is the main module
|
||||
if (require.main === module) {
|
||||
main().catch((error) => {
|
||||
main().then((results) => {
|
||||
console.log('Import completed successfully:', results);
|
||||
// Force exit after a small delay to ensure all logs are written
|
||||
setTimeout(() => process.exit(0), 500);
|
||||
}).catch((error) => {
|
||||
console.error("Unhandled error in main process:", error);
|
||||
process.exit(1);
|
||||
// Force exit with error code after a small delay
|
||||
setTimeout(() => process.exit(1), 500);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
const { outputProgress, formatElapsedTime } = require('../metrics/utils/progress');
|
||||
const { outputProgress, formatElapsedTime } = require('../metrics-new/utils/progress');
|
||||
|
||||
async function importCategories(prodConnection, localConnection) {
|
||||
outputProgress({
|
||||
@@ -9,170 +9,198 @@ async function importCategories(prodConnection, localConnection) {
|
||||
const startTime = Date.now();
|
||||
const typeOrder = [10, 20, 11, 21, 12, 13];
|
||||
let totalInserted = 0;
|
||||
let totalUpdated = 0;
|
||||
let skippedCategories = [];
|
||||
|
||||
try {
|
||||
// Process each type in order with its own query
|
||||
// Start a single transaction for the entire import
|
||||
await localConnection.query('BEGIN');
|
||||
|
||||
// Temporarily disable the trigger that's causing problems
|
||||
await localConnection.query('ALTER TABLE categories DISABLE TRIGGER update_categories_updated_at');
|
||||
|
||||
// Process each type in order with its own savepoint
|
||||
for (const type of typeOrder) {
|
||||
const [categories] = await prodConnection.query(
|
||||
`
|
||||
SELECT
|
||||
pc.cat_id,
|
||||
pc.name,
|
||||
pc.type,
|
||||
CASE
|
||||
WHEN pc.type IN (10, 20) THEN NULL -- Top level categories should have no parent
|
||||
WHEN pc.master_cat_id IS NULL THEN NULL
|
||||
ELSE pc.master_cat_id
|
||||
END as parent_id,
|
||||
pc.combined_name as description
|
||||
FROM product_categories pc
|
||||
WHERE pc.type = ?
|
||||
ORDER BY pc.cat_id
|
||||
`,
|
||||
[type]
|
||||
);
|
||||
try {
|
||||
// Create a savepoint for this type
|
||||
await localConnection.query(`SAVEPOINT category_type_${type}`);
|
||||
|
||||
if (categories.length === 0) continue;
|
||||
|
||||
console.log(`\nProcessing ${categories.length} type ${type} categories`);
|
||||
if (type === 10) {
|
||||
console.log("Type 10 categories:", JSON.stringify(categories, null, 2));
|
||||
}
|
||||
|
||||
// For types that can have parents (11, 21, 12, 13), verify parent existence
|
||||
let categoriesToInsert = categories;
|
||||
if (![10, 20].includes(type)) {
|
||||
// Get all parent IDs
|
||||
const parentIds = [
|
||||
...new Set(
|
||||
categories.map((c) => c.parent_id).filter((id) => id !== null)
|
||||
),
|
||||
];
|
||||
|
||||
// Check which parents exist
|
||||
const [existingParents] = await localConnection.query(
|
||||
"SELECT cat_id FROM categories WHERE cat_id IN (?)",
|
||||
[parentIds]
|
||||
);
|
||||
const existingParentIds = new Set(existingParents.map((p) => p.cat_id));
|
||||
|
||||
// Filter categories and track skipped ones
|
||||
categoriesToInsert = categories.filter(
|
||||
(cat) =>
|
||||
cat.parent_id === null || existingParentIds.has(cat.parent_id)
|
||||
);
|
||||
const invalidCategories = categories.filter(
|
||||
(cat) =>
|
||||
cat.parent_id !== null && !existingParentIds.has(cat.parent_id)
|
||||
// Production query remains MySQL compatible
|
||||
const [categories] = await prodConnection.query(
|
||||
`
|
||||
SELECT
|
||||
pc.cat_id,
|
||||
pc.name,
|
||||
pc.type,
|
||||
CASE
|
||||
WHEN pc.type IN (10, 20) THEN NULL -- Top level categories should have no parent
|
||||
WHEN pc.master_cat_id IS NULL THEN NULL
|
||||
ELSE pc.master_cat_id
|
||||
END as parent_id,
|
||||
pc.combined_name as description
|
||||
FROM product_categories pc
|
||||
WHERE pc.type = ?
|
||||
ORDER BY pc.cat_id
|
||||
`,
|
||||
[type]
|
||||
);
|
||||
|
||||
if (invalidCategories.length > 0) {
|
||||
const skippedInfo = invalidCategories.map((c) => ({
|
||||
id: c.cat_id,
|
||||
name: c.name,
|
||||
type: c.type,
|
||||
missing_parent: c.parent_id,
|
||||
}));
|
||||
skippedCategories.push(...skippedInfo);
|
||||
|
||||
console.log(
|
||||
"\nSkipping categories with missing parents:",
|
||||
invalidCategories
|
||||
.map(
|
||||
(c) =>
|
||||
`${c.cat_id} - ${c.name} (missing parent: ${c.parent_id})`
|
||||
)
|
||||
.join("\n")
|
||||
);
|
||||
}
|
||||
|
||||
if (categoriesToInsert.length === 0) {
|
||||
console.log(
|
||||
`No valid categories of type ${type} to insert - all had missing parents`
|
||||
);
|
||||
if (categories.length === 0) {
|
||||
await localConnection.query(`RELEASE SAVEPOINT category_type_${type}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
console.log(`Processing ${categories.length} type ${type} categories`);
|
||||
|
||||
// For types that can have parents (11, 21, 12, 13), we'll proceed directly
|
||||
// No need to check for parent existence since we process in hierarchical order
|
||||
let categoriesToInsert = categories;
|
||||
|
||||
if (categoriesToInsert.length === 0) {
|
||||
console.log(`No valid categories of type ${type} to insert`);
|
||||
await localConnection.query(`RELEASE SAVEPOINT category_type_${type}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
// PostgreSQL upsert query with parameterized values
|
||||
const values = categoriesToInsert.flatMap((cat) => [
|
||||
cat.cat_id,
|
||||
cat.name,
|
||||
cat.type,
|
||||
cat.parent_id,
|
||||
cat.description,
|
||||
'active',
|
||||
new Date(),
|
||||
new Date()
|
||||
]);
|
||||
|
||||
const placeholders = categoriesToInsert
|
||||
.map((_, i) => `($${i * 8 + 1}, $${i * 8 + 2}, $${i * 8 + 3}, $${i * 8 + 4}, $${i * 8 + 5}, $${i * 8 + 6}, $${i * 8 + 7}, $${i * 8 + 8})`)
|
||||
.join(',');
|
||||
|
||||
// Insert categories with ON CONFLICT clause for PostgreSQL
|
||||
const query = `
|
||||
WITH inserted_categories AS (
|
||||
INSERT INTO categories (
|
||||
cat_id, name, type, parent_id, description, status, created_at, updated_at
|
||||
)
|
||||
VALUES ${placeholders}
|
||||
ON CONFLICT (cat_id) DO UPDATE SET
|
||||
name = EXCLUDED.name,
|
||||
type = EXCLUDED.type,
|
||||
parent_id = EXCLUDED.parent_id,
|
||||
description = EXCLUDED.description,
|
||||
status = EXCLUDED.status,
|
||||
updated_at = EXCLUDED.updated_at
|
||||
WHERE -- Only update if at least one field has changed
|
||||
categories.name IS DISTINCT FROM EXCLUDED.name OR
|
||||
categories.type IS DISTINCT FROM EXCLUDED.type OR
|
||||
categories.parent_id IS DISTINCT FROM EXCLUDED.parent_id OR
|
||||
categories.description IS DISTINCT FROM EXCLUDED.description OR
|
||||
categories.status IS DISTINCT FROM EXCLUDED.status
|
||||
RETURNING
|
||||
cat_id,
|
||||
CASE
|
||||
WHEN xmax = 0 THEN true
|
||||
ELSE false
|
||||
END as is_insert
|
||||
)
|
||||
SELECT
|
||||
COUNT(*) as total,
|
||||
COUNT(*) FILTER (WHERE is_insert) as inserted,
|
||||
COUNT(*) FILTER (WHERE NOT is_insert) as updated
|
||||
FROM inserted_categories`;
|
||||
|
||||
const result = await localConnection.query(query, values);
|
||||
|
||||
// Get the first result since query returns an array
|
||||
const queryResult = Array.isArray(result) ? result[0] : result;
|
||||
|
||||
if (!queryResult || !queryResult.rows || !queryResult.rows[0]) {
|
||||
console.error('Query failed to return results');
|
||||
throw new Error('Query did not return expected results');
|
||||
}
|
||||
|
||||
const total = parseInt(queryResult.rows[0].total) || 0;
|
||||
const inserted = parseInt(queryResult.rows[0].inserted) || 0;
|
||||
const updated = parseInt(queryResult.rows[0].updated) || 0;
|
||||
|
||||
console.log(`Total: ${total}, Inserted: ${inserted}, Updated: ${updated}`);
|
||||
|
||||
totalInserted += inserted;
|
||||
totalUpdated += updated;
|
||||
|
||||
// Release the savepoint for this type
|
||||
await localConnection.query(`RELEASE SAVEPOINT category_type_${type}`);
|
||||
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Categories import",
|
||||
message: `Imported ${inserted} (updated ${updated}) categories of type ${type}`,
|
||||
current: totalInserted + totalUpdated,
|
||||
total: categories.length,
|
||||
elapsed: formatElapsedTime(startTime),
|
||||
});
|
||||
} catch (error) {
|
||||
// Rollback to the savepoint for this type
|
||||
await localConnection.query(`ROLLBACK TO SAVEPOINT category_type_${type}`);
|
||||
throw error;
|
||||
}
|
||||
|
||||
console.log(
|
||||
`Inserting ${categoriesToInsert.length} type ${type} categories`
|
||||
);
|
||||
|
||||
const placeholders = categoriesToInsert
|
||||
.map(() => "(?, ?, ?, ?, ?, ?, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP)")
|
||||
.join(",");
|
||||
|
||||
const values = categoriesToInsert.flatMap((cat) => [
|
||||
cat.cat_id,
|
||||
cat.name,
|
||||
cat.type,
|
||||
cat.parent_id,
|
||||
cat.description,
|
||||
"active",
|
||||
]);
|
||||
|
||||
// Insert categories and create relationships in one query to avoid race conditions
|
||||
await localConnection.query(
|
||||
`
|
||||
INSERT INTO categories (cat_id, name, type, parent_id, description, status, created_at, updated_at)
|
||||
VALUES ${placeholders}
|
||||
ON DUPLICATE KEY UPDATE
|
||||
name = VALUES(name),
|
||||
type = VALUES(type),
|
||||
parent_id = VALUES(parent_id),
|
||||
description = VALUES(description),
|
||||
status = VALUES(status),
|
||||
updated_at = CURRENT_TIMESTAMP
|
||||
`,
|
||||
values
|
||||
);
|
||||
|
||||
totalInserted += categoriesToInsert.length;
|
||||
outputProgress({
|
||||
status: "running",
|
||||
operation: "Categories import",
|
||||
current: totalInserted,
|
||||
total: totalInserted,
|
||||
elapsed: formatElapsedTime((Date.now() - startTime) / 1000),
|
||||
});
|
||||
}
|
||||
|
||||
// After all imports, if we skipped any categories, throw an error
|
||||
if (skippedCategories.length > 0) {
|
||||
const error = new Error(
|
||||
"Categories import completed with errors - some categories were skipped due to missing parents"
|
||||
);
|
||||
error.skippedCategories = skippedCategories;
|
||||
throw error;
|
||||
}
|
||||
// Commit the entire transaction - we'll do this even if we have skipped categories
|
||||
await localConnection.query('COMMIT');
|
||||
|
||||
// Update sync status
|
||||
await localConnection.query(`
|
||||
INSERT INTO sync_status (table_name, last_sync_timestamp)
|
||||
VALUES ('categories', NOW())
|
||||
ON CONFLICT (table_name) DO UPDATE SET
|
||||
last_sync_timestamp = NOW()
|
||||
`);
|
||||
|
||||
// Re-enable the trigger
|
||||
await localConnection.query('ALTER TABLE categories ENABLE TRIGGER update_categories_updated_at');
|
||||
|
||||
outputProgress({
|
||||
status: "complete",
|
||||
operation: "Categories import completed",
|
||||
current: totalInserted,
|
||||
total: totalInserted,
|
||||
duration: formatElapsedTime((Date.now() - startTime) / 1000),
|
||||
current: totalInserted + totalUpdated,
|
||||
total: totalInserted + totalUpdated,
|
||||
duration: formatElapsedTime(startTime),
|
||||
warnings: skippedCategories.length > 0 ? {
|
||||
message: "Some categories were skipped due to missing parents",
|
||||
skippedCategories
|
||||
} : undefined
|
||||
});
|
||||
|
||||
return {
|
||||
status: "complete",
|
||||
totalImported: totalInserted
|
||||
recordsAdded: totalInserted,
|
||||
recordsUpdated: totalUpdated,
|
||||
totalRecords: totalInserted + totalUpdated,
|
||||
warnings: skippedCategories.length > 0 ? {
|
||||
message: "Some categories were skipped due to missing parents",
|
||||
skippedCategories
|
||||
} : undefined
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("Error importing categories:", error);
|
||||
if (error.skippedCategories) {
|
||||
console.error(
|
||||
"Skipped categories:",
|
||||
JSON.stringify(error.skippedCategories, null, 2)
|
||||
);
|
||||
|
||||
// Only rollback if we haven't committed yet
|
||||
try {
|
||||
await localConnection.query('ROLLBACK');
|
||||
|
||||
// Make sure we re-enable the trigger even if there was an error
|
||||
await localConnection.query('ALTER TABLE categories ENABLE TRIGGER update_categories_updated_at');
|
||||
} catch (rollbackError) {
|
||||
console.error("Error during rollback:", rollbackError);
|
||||
}
|
||||
|
||||
outputProgress({
|
||||
status: "error",
|
||||
operation: "Categories import failed",
|
||||
error: error.message,
|
||||
skippedCategories: error.skippedCategories
|
||||
error: error.message
|
||||
});
|
||||
|
||||
throw error;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,6 @@
|
||||
const mysql = require("mysql2/promise");
|
||||
const { Client } = require("ssh2");
|
||||
const { Pool } = require('pg');
|
||||
const dotenv = require("dotenv");
|
||||
const path = require("path");
|
||||
|
||||
@@ -41,23 +42,90 @@ async function setupSshTunnel(sshConfig) {
|
||||
async function setupConnections(sshConfig) {
|
||||
const tunnel = await setupSshTunnel(sshConfig);
|
||||
|
||||
// Setup MySQL connection for production
|
||||
const prodConnection = await mysql.createConnection({
|
||||
...sshConfig.prodDbConfig,
|
||||
stream: tunnel.stream,
|
||||
});
|
||||
|
||||
const localConnection = await mysql.createPool({
|
||||
...sshConfig.localDbConfig,
|
||||
waitForConnections: true,
|
||||
connectionLimit: 10,
|
||||
queueLimit: 0
|
||||
});
|
||||
// Setup PostgreSQL connection pool for local
|
||||
const localPool = new Pool(sshConfig.localDbConfig);
|
||||
|
||||
return {
|
||||
ssh: tunnel.ssh,
|
||||
prodConnection,
|
||||
localConnection
|
||||
// Test the PostgreSQL connection
|
||||
try {
|
||||
const client = await localPool.connect();
|
||||
await client.query('SELECT NOW()');
|
||||
client.release();
|
||||
console.log('PostgreSQL connection successful');
|
||||
} catch (err) {
|
||||
console.error('PostgreSQL connection error:', err);
|
||||
throw err;
|
||||
}
|
||||
|
||||
// Create a wrapper for the PostgreSQL pool to match MySQL interface
|
||||
const localConnection = {
|
||||
_client: null,
|
||||
_transactionActive: false,
|
||||
|
||||
query: async (text, params) => {
|
||||
// If we're not in a transaction, use the pool directly
|
||||
if (!localConnection._transactionActive) {
|
||||
const client = await localPool.connect();
|
||||
try {
|
||||
const result = await client.query(text, params);
|
||||
return [result];
|
||||
} finally {
|
||||
client.release();
|
||||
}
|
||||
}
|
||||
|
||||
// If we're in a transaction, use the dedicated client
|
||||
if (!localConnection._client) {
|
||||
throw new Error('No active transaction client');
|
||||
}
|
||||
const result = await localConnection._client.query(text, params);
|
||||
return [result];
|
||||
},
|
||||
|
||||
beginTransaction: async () => {
|
||||
if (localConnection._transactionActive) {
|
||||
throw new Error('Transaction already active');
|
||||
}
|
||||
localConnection._client = await localPool.connect();
|
||||
await localConnection._client.query('BEGIN');
|
||||
localConnection._transactionActive = true;
|
||||
},
|
||||
|
||||
commit: async () => {
|
||||
if (!localConnection._transactionActive) {
|
||||
throw new Error('No active transaction to commit');
|
||||
}
|
||||
await localConnection._client.query('COMMIT');
|
||||
localConnection._client.release();
|
||||
localConnection._client = null;
|
||||
localConnection._transactionActive = false;
|
||||
},
|
||||
|
||||
rollback: async () => {
|
||||
if (!localConnection._transactionActive) {
|
||||
throw new Error('No active transaction to rollback');
|
||||
}
|
||||
await localConnection._client.query('ROLLBACK');
|
||||
localConnection._client.release();
|
||||
localConnection._client = null;
|
||||
localConnection._transactionActive = false;
|
||||
},
|
||||
|
||||
end: async () => {
|
||||
if (localConnection._client) {
|
||||
localConnection._client.release();
|
||||
localConnection._client = null;
|
||||
}
|
||||
await localPool.end();
|
||||
}
|
||||
};
|
||||
|
||||
return { prodConnection, localConnection, tunnel };
|
||||
}
|
||||
|
||||
// Helper function to close connections
|
||||
|
||||
@@ -0,0 +1,444 @@
|
||||
-- Description: Performs the first population OR full recalculation of the product_metrics table based on
|
||||
-- historically backfilled daily_product_snapshots and current product/PO data.
|
||||
-- Calculates all metrics considering the full available history up to 'yesterday'.
|
||||
-- Run ONCE after backfill_historical_snapshots_final.sql completes successfully.
|
||||
-- Dependencies: Core import tables (products, purchase_orders, receivings), daily_product_snapshots (historically populated),
|
||||
-- configuration tables (settings_*), product_metrics table must exist.
|
||||
-- Frequency: Run ONCE.
|
||||
DO $$
|
||||
DECLARE
|
||||
_module_name VARCHAR := 'product_metrics_population'; -- Generic name
|
||||
_start_time TIMESTAMPTZ := clock_timestamp();
|
||||
-- Calculate metrics AS OF the end of the last fully completed day
|
||||
_calculation_date DATE := CURRENT_DATE - INTERVAL '1 day';
|
||||
BEGIN
|
||||
RAISE NOTICE 'Running % module. Calculating AS OF: %. Start Time: %', _module_name, _calculation_date, _start_time;
|
||||
|
||||
-- Optional: Consider TRUNCATE if you want a completely fresh start,
|
||||
-- otherwise ON CONFLICT will update existing rows if this is rerun.
|
||||
-- TRUNCATE TABLE public.product_metrics;
|
||||
RAISE NOTICE 'Populating product_metrics table. This may take some time...';
|
||||
|
||||
-- CTEs to gather necessary information AS OF _calculation_date
|
||||
WITH CurrentInfo AS (
|
||||
-- Fetches current product details, including costs/prices used for forecasting & fallbacks
|
||||
SELECT
|
||||
p.pid, p.sku, p.title, p.brand, p.vendor, COALESCE(p.image_175, p.image) as image_url,
|
||||
p.visible as is_visible, p.replenishable,
|
||||
COALESCE(p.price, 0.00) as current_price, COALESCE(p.regular_price, 0.00) as current_regular_price,
|
||||
COALESCE(p.cost_price, 0.00) as current_cost_price,
|
||||
COALESCE(p.landing_cost_price, p.cost_price, 0.00) as current_effective_cost, -- Use landing if available, else cost
|
||||
p.stock_quantity as current_stock, -- Use actual current stock for forecast base
|
||||
p.created_at, p.first_received, p.date_last_sold,
|
||||
p.moq,
|
||||
p.uom,
|
||||
p.total_sold as historical_total_sold -- Add historical total_sold from products table
|
||||
FROM public.products p
|
||||
),
|
||||
OnOrderInfo AS (
|
||||
-- Calculates current on-order quantities and costs
|
||||
SELECT
|
||||
pid,
|
||||
SUM(ordered) AS on_order_qty,
|
||||
SUM(ordered * po_cost_price) AS on_order_cost,
|
||||
MIN(expected_date) AS earliest_expected_date
|
||||
FROM public.purchase_orders
|
||||
-- Use the most common statuses representing active, unfulfilled POs
|
||||
WHERE status IN ('created', 'ordered', 'preordered', 'electronically_sent', 'electronically_ready_send', 'receiving_started')
|
||||
AND status NOT IN ('canceled', 'done')
|
||||
GROUP BY pid
|
||||
),
|
||||
HistoricalDates AS (
|
||||
-- Determines key historical dates from orders and receivings
|
||||
SELECT
|
||||
p.pid,
|
||||
MIN(o.date)::date AS date_first_sold,
|
||||
MAX(o.date)::date AS max_order_date, -- Used as fallback for date_last_sold
|
||||
MIN(r.received_date)::date AS date_first_received_calc,
|
||||
MAX(r.received_date)::date AS date_last_received_calc
|
||||
FROM public.products p
|
||||
LEFT JOIN public.orders o ON p.pid = o.pid AND o.quantity > 0 AND o.status NOT IN ('canceled', 'returned')
|
||||
LEFT JOIN public.receivings r ON p.pid = r.pid
|
||||
GROUP BY p.pid
|
||||
),
|
||||
SnapshotAggregates AS (
|
||||
-- Aggregates metrics from historical snapshots up to the _calculation_date
|
||||
SELECT
|
||||
pid,
|
||||
-- Rolling periods relative to _calculation_date
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '6 days' AND _calculation_date THEN units_sold ELSE 0 END) AS sales_7d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '6 days' AND _calculation_date THEN net_revenue ELSE 0 END) AS revenue_7d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '13 days' AND _calculation_date THEN units_sold ELSE 0 END) AS sales_14d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '13 days' AND _calculation_date THEN net_revenue ELSE 0 END) AS revenue_14d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN units_sold ELSE 0 END) AS sales_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN net_revenue ELSE 0 END) AS revenue_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN cogs ELSE 0 END) AS cogs_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN profit ELSE 0 END) AS profit_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN units_returned ELSE 0 END) AS returns_units_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN returns_revenue ELSE 0 END) AS returns_revenue_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN discounts ELSE 0 END) AS discounts_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN gross_revenue ELSE 0 END) AS gross_revenue_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN gross_regular_revenue ELSE 0 END) AS gross_regular_revenue_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date AND stockout_flag THEN 1 ELSE 0 END) AS stockout_days_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '364 days' AND _calculation_date THEN units_sold ELSE 0 END) AS sales_365d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '364 days' AND _calculation_date THEN net_revenue ELSE 0 END) AS revenue_365d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN units_received ELSE 0 END) AS received_qty_30d,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN cost_received ELSE 0 END) AS received_cost_30d,
|
||||
|
||||
-- Averages over the last 30 days ending _calculation_date
|
||||
AVG(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN eod_stock_quantity END) AS avg_stock_units_30d,
|
||||
AVG(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN eod_stock_cost END) AS avg_stock_cost_30d,
|
||||
AVG(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN eod_stock_retail END) AS avg_stock_retail_30d,
|
||||
AVG(CASE WHEN snapshot_date BETWEEN _calculation_date - INTERVAL '29 days' AND _calculation_date THEN eod_stock_gross END) AS avg_stock_gross_30d,
|
||||
|
||||
-- Lifetime (Using historical total from products table)
|
||||
(SELECT total_sold FROM public.products WHERE public.products.pid = daily_product_snapshots.pid) AS lifetime_sales,
|
||||
COALESCE(
|
||||
-- Option 1: Use 30-day average price if available
|
||||
CASE WHEN SUM(CASE WHEN snapshot_date >= _calculation_date - INTERVAL '29 days' AND snapshot_date <= _calculation_date THEN units_sold ELSE 0 END) > 0 THEN
|
||||
(SELECT total_sold FROM public.products WHERE public.products.pid = daily_product_snapshots.pid) * (
|
||||
SUM(CASE WHEN snapshot_date >= _calculation_date - INTERVAL '29 days' AND snapshot_date <= _calculation_date THEN net_revenue ELSE 0 END) /
|
||||
NULLIF(SUM(CASE WHEN snapshot_date >= _calculation_date - INTERVAL '29 days' AND snapshot_date <= _calculation_date THEN units_sold ELSE 0 END), 0)
|
||||
)
|
||||
ELSE NULL END,
|
||||
-- Option 2: Try 365-day average price if available
|
||||
CASE WHEN SUM(CASE WHEN snapshot_date >= _calculation_date - INTERVAL '364 days' AND snapshot_date <= _calculation_date THEN units_sold ELSE 0 END) > 0 THEN
|
||||
(SELECT total_sold FROM public.products WHERE public.products.pid = daily_product_snapshots.pid) * (
|
||||
SUM(CASE WHEN snapshot_date >= _calculation_date - INTERVAL '364 days' AND snapshot_date <= _calculation_date THEN net_revenue ELSE 0 END) /
|
||||
NULLIF(SUM(CASE WHEN snapshot_date >= _calculation_date - INTERVAL '364 days' AND snapshot_date <= _calculation_date THEN units_sold ELSE 0 END), 0)
|
||||
)
|
||||
ELSE NULL END,
|
||||
-- Option 3: Use current price from products table
|
||||
(SELECT total_sold * price FROM public.products WHERE public.products.pid = daily_product_snapshots.pid),
|
||||
-- Option 4: Use regular price if current price might be zero
|
||||
(SELECT total_sold * regular_price FROM public.products WHERE public.products.pid = daily_product_snapshots.pid),
|
||||
-- Final fallback: Use accumulated revenue (less accurate for old products)
|
||||
SUM(net_revenue)
|
||||
) AS lifetime_revenue,
|
||||
|
||||
-- Yesterday (Sales for the specific _calculation_date)
|
||||
SUM(CASE WHEN snapshot_date = _calculation_date THEN units_sold ELSE 0 END) as yesterday_sales
|
||||
|
||||
FROM public.daily_product_snapshots
|
||||
WHERE snapshot_date <= _calculation_date -- Ensure we only use data up to the calculation point
|
||||
GROUP BY pid
|
||||
),
|
||||
FirstPeriodMetrics AS (
|
||||
-- Calculates sales/revenue for first X days after first sale date
|
||||
-- Uses HistoricalDates CTE to get the first sale date
|
||||
SELECT
|
||||
pid, date_first_sold,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '6 days' THEN units_sold ELSE 0 END) AS first_7_days_sales,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '6 days' THEN net_revenue ELSE 0 END) AS first_7_days_revenue,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '29 days' THEN units_sold ELSE 0 END) AS first_30_days_sales,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '29 days' THEN net_revenue ELSE 0 END) AS first_30_days_revenue,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '59 days' THEN units_sold ELSE 0 END) AS first_60_days_sales,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '59 days' THEN net_revenue ELSE 0 END) AS first_60_days_revenue,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '89 days' THEN units_sold ELSE 0 END) AS first_90_days_sales,
|
||||
SUM(CASE WHEN snapshot_date BETWEEN date_first_sold AND date_first_sold + INTERVAL '89 days' THEN net_revenue ELSE 0 END) AS first_90_days_revenue
|
||||
FROM public.daily_product_snapshots ds
|
||||
JOIN HistoricalDates hd USING(pid)
|
||||
WHERE date_first_sold IS NOT NULL
|
||||
AND snapshot_date >= date_first_sold -- Only consider snapshots after first sale
|
||||
AND snapshot_date <= _calculation_date -- Only up to the overall calculation date
|
||||
GROUP BY pid, date_first_sold
|
||||
),
|
||||
Settings AS (
|
||||
-- Fetches effective configuration settings (Product > Vendor > Global)
|
||||
SELECT
|
||||
p.pid,
|
||||
COALESCE(sp.lead_time_days, sv.default_lead_time_days, (SELECT setting_value FROM settings_global WHERE setting_key = 'default_lead_time_days')::int, 14) AS effective_lead_time,
|
||||
COALESCE(sp.days_of_stock, sv.default_days_of_stock, (SELECT setting_value FROM settings_global WHERE setting_key = 'default_days_of_stock')::int, 30) AS effective_days_of_stock,
|
||||
COALESCE(sp.safety_stock, (SELECT setting_value::int FROM settings_global WHERE setting_key = 'default_safety_stock_units'), 0) AS effective_safety_stock,
|
||||
COALESCE(sp.exclude_from_forecast, FALSE) AS exclude_forecast
|
||||
FROM public.products p
|
||||
LEFT JOIN public.settings_product sp ON p.pid = sp.pid
|
||||
LEFT JOIN public.settings_vendor sv ON p.vendor = sv.vendor
|
||||
),
|
||||
AvgLeadTime AS (
|
||||
-- Calculate Average Lead Time by joining purchase_orders with receivings
|
||||
SELECT
|
||||
po.pid,
|
||||
AVG(GREATEST(1,
|
||||
CASE
|
||||
WHEN r.received_date IS NOT NULL AND po.date IS NOT NULL
|
||||
THEN (r.received_date::date - po.date::date)
|
||||
ELSE 1
|
||||
END
|
||||
))::int AS avg_lead_time_days_calc
|
||||
FROM public.purchase_orders po
|
||||
JOIN public.receivings r ON r.pid = po.pid
|
||||
WHERE po.status = 'done' -- Completed POs
|
||||
AND r.received_date IS NOT NULL
|
||||
AND po.date IS NOT NULL
|
||||
AND r.received_date >= po.date
|
||||
GROUP BY po.pid
|
||||
),
|
||||
RankedForABC AS (
|
||||
-- Ranks products based on the configured ABC metric (using historical data)
|
||||
SELECT
|
||||
p.pid,
|
||||
CASE COALESCE((SELECT setting_value FROM settings_global WHERE setting_key = 'abc_calculation_basis'), 'revenue_30d')
|
||||
WHEN 'sales_30d' THEN COALESCE(sa.sales_30d, 0)
|
||||
WHEN 'lifetime_revenue' THEN COALESCE(sa.lifetime_revenue, 0)::numeric
|
||||
ELSE COALESCE(sa.revenue_30d, 0) -- Default to revenue_30d
|
||||
END AS metric_value
|
||||
FROM public.products p -- Use products as the base
|
||||
JOIN SnapshotAggregates sa ON p.pid = sa.pid
|
||||
WHERE p.replenishable = TRUE -- Only rank replenishable products
|
||||
AND (CASE COALESCE((SELECT setting_value FROM settings_global WHERE setting_key = 'abc_calculation_basis'), 'revenue_30d')
|
||||
WHEN 'sales_30d' THEN COALESCE(sa.sales_30d, 0)
|
||||
WHEN 'lifetime_revenue' THEN COALESCE(sa.lifetime_revenue, 0)::numeric
|
||||
ELSE COALESCE(sa.revenue_30d, 0)
|
||||
END) > 0 -- Only include products with non-zero contribution
|
||||
),
|
||||
CumulativeABC AS (
|
||||
-- Calculates cumulative metric values for ABC ranking
|
||||
SELECT
|
||||
pid, metric_value,
|
||||
SUM(metric_value) OVER (ORDER BY metric_value DESC NULLS LAST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) as cumulative_metric,
|
||||
SUM(metric_value) OVER () as total_metric
|
||||
FROM RankedForABC
|
||||
),
|
||||
FinalABC AS (
|
||||
-- Assigns A, B, or C class based on thresholds
|
||||
SELECT
|
||||
pid,
|
||||
CASE
|
||||
WHEN cumulative_metric / NULLIF(total_metric, 0) <= COALESCE((SELECT setting_value::numeric FROM settings_global WHERE setting_key = 'abc_revenue_threshold_a'), 0.8) THEN 'A'::char(1)
|
||||
WHEN cumulative_metric / NULLIF(total_metric, 0) <= COALESCE((SELECT setting_value::numeric FROM settings_global WHERE setting_key = 'abc_revenue_threshold_b'), 0.95) THEN 'B'::char(1)
|
||||
ELSE 'C'::char(1)
|
||||
END AS abc_class_calc
|
||||
FROM CumulativeABC
|
||||
)
|
||||
-- Final INSERT/UPDATE statement using all the prepared CTEs
|
||||
INSERT INTO public.product_metrics (
|
||||
pid, last_calculated, sku, title, brand, vendor, image_url, is_visible, is_replenishable,
|
||||
current_price, current_regular_price, current_cost_price, current_landing_cost_price,
|
||||
current_stock, current_stock_cost, current_stock_retail, current_stock_gross,
|
||||
on_order_qty, on_order_cost, on_order_retail, earliest_expected_date,
|
||||
date_created, date_first_received, date_last_received, date_first_sold, date_last_sold, age_days,
|
||||
sales_7d, revenue_7d, sales_14d, revenue_14d, sales_30d, revenue_30d, cogs_30d, profit_30d,
|
||||
returns_units_30d, returns_revenue_30d, discounts_30d, gross_revenue_30d, gross_regular_revenue_30d,
|
||||
stockout_days_30d, sales_365d, revenue_365d,
|
||||
avg_stock_units_30d, avg_stock_cost_30d, avg_stock_retail_30d, avg_stock_gross_30d,
|
||||
received_qty_30d, received_cost_30d,
|
||||
lifetime_sales, lifetime_revenue,
|
||||
first_7_days_sales, first_7_days_revenue, first_30_days_sales, first_30_days_revenue,
|
||||
first_60_days_sales, first_60_days_revenue, first_90_days_sales, first_90_days_revenue,
|
||||
asp_30d, acp_30d, avg_ros_30d, avg_sales_per_day_30d,
|
||||
margin_30d, markup_30d, gmroi_30d, stockturn_30d, return_rate_30d, discount_rate_30d,
|
||||
stockout_rate_30d, markdown_30d, markdown_rate_30d, sell_through_30d,
|
||||
avg_lead_time_days, abc_class,
|
||||
sales_velocity_daily, config_lead_time, config_days_of_stock, config_safety_stock,
|
||||
planning_period_days, lead_time_forecast_units, days_of_stock_forecast_units,
|
||||
planning_period_forecast_units, lead_time_closing_stock, days_of_stock_closing_stock,
|
||||
replenishment_needed_raw, replenishment_units, replenishment_cost, replenishment_retail, replenishment_profit,
|
||||
to_order_units, forecast_lost_sales_units, forecast_lost_revenue,
|
||||
stock_cover_in_days, po_cover_in_days, sells_out_in_days, replenish_date,
|
||||
overstocked_units, overstocked_cost, overstocked_retail, is_old_stock,
|
||||
yesterday_sales
|
||||
)
|
||||
SELECT
|
||||
-- Select columns in order, joining all CTEs by pid
|
||||
ci.pid, _start_time, ci.sku, ci.title, ci.brand, ci.vendor, ci.image_url, ci.is_visible, ci.replenishable,
|
||||
ci.current_price, ci.current_regular_price, ci.current_cost_price, ci.current_effective_cost,
|
||||
ci.current_stock, (ci.current_stock * COALESCE(ci.current_effective_cost, 0.00))::numeric(12,2), (ci.current_stock * COALESCE(ci.current_price, 0.00))::numeric(12,2), (ci.current_stock * COALESCE(ci.current_regular_price, 0.00))::numeric(12,2),
|
||||
COALESCE(ooi.on_order_qty, 0), COALESCE(ooi.on_order_cost, 0.00)::numeric(12,2), (COALESCE(ooi.on_order_qty, 0) * COALESCE(ci.current_price, 0.00))::numeric(12,2), ooi.earliest_expected_date,
|
||||
|
||||
-- Fix type issue with date calculation - properly cast timestamps to dates before arithmetic
|
||||
ci.created_at::date,
|
||||
COALESCE(ci.first_received::date, hd.date_first_received_calc),
|
||||
hd.date_last_received_calc,
|
||||
hd.date_first_sold,
|
||||
COALESCE(ci.date_last_sold, hd.max_order_date),
|
||||
-- Fix timestamp + integer error by ensuring we work only with dates
|
||||
CASE
|
||||
WHEN LEAST(ci.created_at::date, COALESCE(hd.date_first_sold, ci.created_at::date)) IS NOT NULL
|
||||
THEN (_calculation_date::date - LEAST(ci.created_at::date, COALESCE(hd.date_first_sold, ci.created_at::date)))::int
|
||||
ELSE NULL
|
||||
END,
|
||||
|
||||
COALESCE(sa.sales_7d, 0), COALESCE(sa.revenue_7d, 0), COALESCE(sa.sales_14d, 0), COALESCE(sa.revenue_14d, 0), COALESCE(sa.sales_30d, 0), COALESCE(sa.revenue_30d, 0), COALESCE(sa.cogs_30d, 0), COALESCE(sa.profit_30d, 0),
|
||||
COALESCE(sa.returns_units_30d, 0), COALESCE(sa.returns_revenue_30d, 0), COALESCE(sa.discounts_30d, 0), COALESCE(sa.gross_revenue_30d, 0), COALESCE(sa.gross_regular_revenue_30d, 0),
|
||||
COALESCE(sa.stockout_days_30d, 0), COALESCE(sa.sales_365d, 0), COALESCE(sa.revenue_365d, 0),
|
||||
sa.avg_stock_units_30d, sa.avg_stock_cost_30d, sa.avg_stock_retail_30d, sa.avg_stock_gross_30d, -- Averages can be NULL if no data
|
||||
COALESCE(sa.received_qty_30d, 0), COALESCE(sa.received_cost_30d, 0),
|
||||
COALESCE(sa.lifetime_sales, 0), COALESCE(sa.lifetime_revenue, 0),
|
||||
fpm.first_7_days_sales, fpm.first_7_days_revenue, fpm.first_30_days_sales, fpm.first_30_days_revenue,
|
||||
fpm.first_60_days_sales, fpm.first_60_days_revenue, fpm.first_90_days_sales, fpm.first_90_days_revenue,
|
||||
|
||||
-- Calculated KPIs (using COALESCE on inputs where appropriate)
|
||||
sa.revenue_30d / NULLIF(sa.sales_30d, 0) AS asp_30d,
|
||||
sa.cogs_30d / NULLIF(sa.sales_30d, 0) AS acp_30d,
|
||||
sa.profit_30d / NULLIF(sa.sales_30d, 0) AS avg_ros_30d,
|
||||
COALESCE(sa.sales_30d, 0) / 30.0 AS avg_sales_per_day_30d,
|
||||
|
||||
-- Fix for percentages - cast to numeric with appropriate precision
|
||||
((sa.profit_30d / NULLIF(sa.revenue_30d, 0)) * 100)::numeric(8,2) AS margin_30d,
|
||||
((sa.profit_30d / NULLIF(sa.cogs_30d, 0)) * 100)::numeric(8,2) AS markup_30d,
|
||||
sa.profit_30d / NULLIF(sa.avg_stock_cost_30d, 0) AS gmroi_30d,
|
||||
sa.sales_30d / NULLIF(sa.avg_stock_units_30d, 0) AS stockturn_30d,
|
||||
((sa.returns_units_30d / NULLIF(COALESCE(sa.sales_30d, 0) + COALESCE(sa.returns_units_30d, 0), 0)) * 100)::numeric(8,2) AS return_rate_30d,
|
||||
((sa.discounts_30d / NULLIF(sa.gross_revenue_30d, 0)) * 100)::numeric(8,2) AS discount_rate_30d,
|
||||
((COALESCE(sa.stockout_days_30d, 0) / 30.0) * 100)::numeric(8,2) AS stockout_rate_30d,
|
||||
GREATEST(0, sa.gross_regular_revenue_30d - sa.gross_revenue_30d) AS markdown_30d, -- Ensure markdown isn't negative
|
||||
((GREATEST(0, sa.gross_regular_revenue_30d - sa.gross_revenue_30d) / NULLIF(sa.gross_regular_revenue_30d, 0)) * 100)::numeric(8,2) AS markdown_rate_30d,
|
||||
-- Sell Through Rate: Sales / (Stock at end of period + Sales). This is one definition proxying for Sales / Beginning Stock.
|
||||
((sa.sales_30d / NULLIF(
|
||||
(SELECT eod_stock_quantity FROM daily_product_snapshots WHERE snapshot_date = _calculation_date AND pid = ci.pid LIMIT 1) + COALESCE(sa.sales_30d, 0)
|
||||
, 0)) * 100)::numeric(8,2) AS sell_through_30d,
|
||||
|
||||
-- Use calculated periodic metrics
|
||||
alt.avg_lead_time_days_calc,
|
||||
CASE
|
||||
WHEN ci.replenishable = FALSE THEN NULL -- Non-replenishable don't get a class
|
||||
ELSE COALESCE(fa.abc_class_calc, 'C') -- Default ranked replenishable but non-contributing to C
|
||||
END,
|
||||
|
||||
-- Forecasting intermediate values (based on historical aggregates ending _calculation_date)
|
||||
(COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) AS sales_velocity_daily, -- Ensure divisor > 0
|
||||
s.effective_lead_time AS config_lead_time, s.effective_days_of_stock AS config_days_of_stock, s.effective_safety_stock AS config_safety_stock,
|
||||
(s.effective_lead_time + s.effective_days_of_stock) AS planning_period_days,
|
||||
-- Calculate raw forecast need components (using safe velocity)
|
||||
(COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time AS lead_time_forecast_units,
|
||||
(COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock AS days_of_stock_forecast_units,
|
||||
-- Planning period forecast units (sum of lead time and DOS units)
|
||||
CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock) AS planning_period_forecast_units,
|
||||
-- Closing stock calculations (using raw forecast components for accuracy before rounding)
|
||||
(ci.current_stock + COALESCE(ooi.on_order_qty, 0) - ((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)) AS lead_time_closing_stock,
|
||||
((ci.current_stock + COALESCE(ooi.on_order_qty, 0) - ((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)))
|
||||
- ((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock) AS days_of_stock_closing_stock,
|
||||
-- Raw replenishment needed
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time) -- Use rounded forecast units
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0) AS replenishment_needed_raw,
|
||||
|
||||
-- Final Forecasting Metrics
|
||||
-- Replenishment Units (calculated need, before MOQ)
|
||||
CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
))::int AS replenishment_units,
|
||||
-- Replenishment Cost/Retail/Profit (based on replenishment_units)
|
||||
(CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
))::int) * COALESCE(ci.current_effective_cost, 0.00)::numeric(12,2) AS replenishment_cost,
|
||||
(CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
))::int) * COALESCE(ci.current_price, 0.00)::numeric(12,2) AS replenishment_retail,
|
||||
(CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
))::int) * (COALESCE(ci.current_price, 0.00) - COALESCE(ci.current_effective_cost, 0.00))::numeric(12,2) AS replenishment_profit,
|
||||
|
||||
-- *** FIX: To Order Units (Apply MOQ rounding) ***
|
||||
CASE
|
||||
WHEN COALESCE(ci.moq, 0) <= 1 THEN -- Treat no/invalid MOQ or MOQ=1 as no rounding needed
|
||||
CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
))::int
|
||||
ELSE -- Apply MOQ rounding: Round UP to nearest multiple of MOQ
|
||||
(CEILING(GREATEST(0,
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
+ s.effective_safety_stock - ci.current_stock - COALESCE(ooi.on_order_qty, 0)
|
||||
) / NULLIF(ci.moq::numeric, 0)) * COALESCE(ci.moq, 1))::int
|
||||
END AS to_order_units,
|
||||
|
||||
-- Forecast Lost Sales (Units occurring during lead time if current+on_order is insufficient)
|
||||
CEILING(GREATEST(0,
|
||||
((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time) -- Demand during lead time
|
||||
- (ci.current_stock + COALESCE(ooi.on_order_qty, 0)) -- Supply available before order arrives
|
||||
))::int AS forecast_lost_sales_units,
|
||||
-- Forecast Lost Revenue
|
||||
(CEILING(GREATEST(0,
|
||||
((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
- (ci.current_stock + COALESCE(ooi.on_order_qty, 0))
|
||||
))::int) * COALESCE(ci.current_price, 0.00)::numeric(12,2) AS forecast_lost_revenue,
|
||||
|
||||
-- Stock Cover etc (using safe velocity)
|
||||
ci.current_stock / NULLIF((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)), 0) AS stock_cover_in_days,
|
||||
COALESCE(ooi.on_order_qty, 0) / NULLIF((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)), 0) AS po_cover_in_days,
|
||||
(ci.current_stock + COALESCE(ooi.on_order_qty, 0)) / NULLIF((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)), 0) AS sells_out_in_days,
|
||||
-- Replenish Date (Project forward from 'today', which is _calculation_date + 1 day)
|
||||
CASE
|
||||
WHEN (COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) > 0 -- Check for positive velocity
|
||||
THEN
|
||||
_calculation_date + INTERVAL '1 day' -- Today
|
||||
+ FLOOR(GREATEST(0, ci.current_stock - s.effective_safety_stock) -- Stock above safety
|
||||
/ (COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) -- divided by velocity
|
||||
)::integer * INTERVAL '1 day' -- Gives date safety stock is hit
|
||||
- s.effective_lead_time * INTERVAL '1 day' -- Subtract lead time
|
||||
ELSE NULL -- Cannot calculate if no sales velocity
|
||||
END AS replenish_date,
|
||||
-- Overstocked Units (Stock above safety + planning period demand)
|
||||
GREATEST(0, ci.current_stock - s.effective_safety_stock -
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time) -- Demand during lead time
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock)) -- Demand during DOS
|
||||
)::int AS overstocked_units,
|
||||
(GREATEST(0, ci.current_stock - s.effective_safety_stock -
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
)::int) * COALESCE(ci.current_effective_cost, 0.00)::numeric(12,2) AS overstocked_cost,
|
||||
(GREATEST(0, ci.current_stock - s.effective_safety_stock -
|
||||
(CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_lead_time)
|
||||
+ CEILING((COALESCE(sa.sales_30d, 0) / NULLIF(GREATEST(1.0, 30.0 - COALESCE(sa.stockout_days_30d, 0)), 0)) * s.effective_days_of_stock))
|
||||
)::int) * COALESCE(ci.current_price, 0.00)::numeric(12,2) AS overstocked_retail,
|
||||
-- Old Stock Flag
|
||||
(ci.created_at::date < (_calculation_date - INTERVAL '60 day')::date) AND
|
||||
(COALESCE(ci.date_last_sold, hd.max_order_date) IS NULL OR COALESCE(ci.date_last_sold, hd.max_order_date) < (_calculation_date - INTERVAL '60 day')::date) AND
|
||||
(hd.date_last_received_calc IS NULL OR hd.date_last_received_calc < (_calculation_date - INTERVAL '60 day')::date) AND
|
||||
COALESCE(ooi.on_order_qty, 0) = 0 AS is_old_stock,
|
||||
COALESCE(sa.yesterday_sales, 0) -- Sales for _calculation_date
|
||||
|
||||
FROM CurrentInfo ci
|
||||
LEFT JOIN OnOrderInfo ooi ON ci.pid = ooi.pid
|
||||
LEFT JOIN HistoricalDates hd ON ci.pid = hd.pid
|
||||
LEFT JOIN SnapshotAggregates sa ON ci.pid = sa.pid
|
||||
LEFT JOIN FirstPeriodMetrics fpm ON ci.pid = fpm.pid
|
||||
LEFT JOIN Settings s ON ci.pid = s.pid
|
||||
LEFT JOIN AvgLeadTime alt ON ci.pid = alt.pid -- Join calculated avg lead time
|
||||
LEFT JOIN FinalABC fa ON ci.pid = fa.pid -- Join calculated ABC class
|
||||
WHERE s.exclude_forecast IS FALSE OR s.exclude_forecast IS NULL
|
||||
|
||||
ON CONFLICT (pid) DO UPDATE SET
|
||||
-- *** IMPORTANT: List ALL columns here, ensuring order matches INSERT list ***
|
||||
-- Update ALL columns to ensure entire row is refreshed
|
||||
last_calculated = EXCLUDED.last_calculated, sku = EXCLUDED.sku, title = EXCLUDED.title, brand = EXCLUDED.brand, vendor = EXCLUDED.vendor, image_url = EXCLUDED.image_url, is_visible = EXCLUDED.is_visible, is_replenishable = EXCLUDED.is_replenishable,
|
||||
current_price = EXCLUDED.current_price, current_regular_price = EXCLUDED.current_regular_price, current_cost_price = EXCLUDED.current_cost_price, current_landing_cost_price = EXCLUDED.current_landing_cost_price,
|
||||
current_stock = EXCLUDED.current_stock, current_stock_cost = EXCLUDED.current_stock_cost, current_stock_retail = EXCLUDED.current_stock_retail, current_stock_gross = EXCLUDED.current_stock_gross,
|
||||
on_order_qty = EXCLUDED.on_order_qty, on_order_cost = EXCLUDED.on_order_cost, on_order_retail = EXCLUDED.on_order_retail, earliest_expected_date = EXCLUDED.earliest_expected_date,
|
||||
date_created = EXCLUDED.date_created, date_first_received = EXCLUDED.date_first_received, date_last_received = EXCLUDED.date_last_received, date_first_sold = EXCLUDED.date_first_sold, date_last_sold = EXCLUDED.date_last_sold, age_days = EXCLUDED.age_days,
|
||||
sales_7d = EXCLUDED.sales_7d, revenue_7d = EXCLUDED.revenue_7d, sales_14d = EXCLUDED.sales_14d, revenue_14d = EXCLUDED.revenue_14d, sales_30d = EXCLUDED.sales_30d, revenue_30d = EXCLUDED.revenue_30d, cogs_30d = EXCLUDED.cogs_30d, profit_30d = EXCLUDED.profit_30d,
|
||||
returns_units_30d = EXCLUDED.returns_units_30d, returns_revenue_30d = EXCLUDED.returns_revenue_30d, discounts_30d = EXCLUDED.discounts_30d, gross_revenue_30d = EXCLUDED.gross_revenue_30d, gross_regular_revenue_30d = EXCLUDED.gross_regular_revenue_30d,
|
||||
stockout_days_30d = EXCLUDED.stockout_days_30d, sales_365d = EXCLUDED.sales_365d, revenue_365d = EXCLUDED.revenue_365d,
|
||||
avg_stock_units_30d = EXCLUDED.avg_stock_units_30d, avg_stock_cost_30d = EXCLUDED.avg_stock_cost_30d, avg_stock_retail_30d = EXCLUDED.avg_stock_retail_30d, avg_stock_gross_30d = EXCLUDED.avg_stock_gross_30d,
|
||||
received_qty_30d = EXCLUDED.received_qty_30d, received_cost_30d = EXCLUDED.received_cost_30d,
|
||||
lifetime_sales = EXCLUDED.lifetime_sales, lifetime_revenue = EXCLUDED.lifetime_revenue,
|
||||
first_7_days_sales = EXCLUDED.first_7_days_sales, first_7_days_revenue = EXCLUDED.first_7_days_revenue, first_30_days_sales = EXCLUDED.first_30_days_sales, first_30_days_revenue = EXCLUDED.first_30_days_revenue,
|
||||
first_60_days_sales = EXCLUDED.first_60_days_sales, first_60_days_revenue = EXCLUDED.first_60_days_revenue, first_90_days_sales = EXCLUDED.first_90_days_sales, first_90_days_revenue = EXCLUDED.first_90_days_revenue,
|
||||
asp_30d = EXCLUDED.asp_30d, acp_30d = EXCLUDED.acp_30d, avg_ros_30d = EXCLUDED.avg_ros_30d, avg_sales_per_day_30d = EXCLUDED.avg_sales_per_day_30d,
|
||||
margin_30d = EXCLUDED.margin_30d, markup_30d = EXCLUDED.markup_30d, gmroi_30d = EXCLUDED.gmroi_30d, stockturn_30d = EXCLUDED.stockturn_30d, return_rate_30d = EXCLUDED.return_rate_30d, discount_rate_30d = EXCLUDED.discount_rate_30d,
|
||||
stockout_rate_30d = EXCLUDED.stockout_rate_30d, markdown_30d = EXCLUDED.markdown_30d, markdown_rate_30d = EXCLUDED.markdown_rate_30d, sell_through_30d = EXCLUDED.sell_through_30d,
|
||||
avg_lead_time_days = EXCLUDED.avg_lead_time_days, abc_class = EXCLUDED.abc_class,
|
||||
sales_velocity_daily = EXCLUDED.sales_velocity_daily, config_lead_time = EXCLUDED.config_lead_time, config_days_of_stock = EXCLUDED.config_days_of_stock, config_safety_stock = EXCLUDED.config_safety_stock,
|
||||
planning_period_days = EXCLUDED.planning_period_days, lead_time_forecast_units = EXCLUDED.lead_time_forecast_units, days_of_stock_forecast_units = EXCLUDED.days_of_stock_forecast_units,
|
||||
planning_period_forecast_units = EXCLUDED.planning_period_forecast_units, lead_time_closing_stock = EXCLUDED.lead_time_closing_stock, days_of_stock_closing_stock = EXCLUDED.days_of_stock_closing_stock,
|
||||
replenishment_needed_raw = EXCLUDED.replenishment_needed_raw, replenishment_units = EXCLUDED.replenishment_units, replenishment_cost = EXCLUDED.replenishment_cost, replenishment_retail = EXCLUDED.replenishment_retail, replenishment_profit = EXCLUDED.replenishment_profit,
|
||||
to_order_units = EXCLUDED.to_order_units, -- *** Update to use EXCLUDED ***
|
||||
forecast_lost_sales_units = EXCLUDED.forecast_lost_sales_units, forecast_lost_revenue = EXCLUDED.forecast_lost_revenue,
|
||||
stock_cover_in_days = EXCLUDED.stock_cover_in_days, po_cover_in_days = EXCLUDED.po_cover_in_days, sells_out_in_days = EXCLUDED.sells_out_in_days, replenish_date = EXCLUDED.replenish_date,
|
||||
overstocked_units = EXCLUDED.overstocked_units, overstocked_cost = EXCLUDED.overstocked_cost, overstocked_retail = EXCLUDED.overstocked_retail, is_old_stock = EXCLUDED.is_old_stock,
|
||||
yesterday_sales = EXCLUDED.yesterday_sales;
|
||||
RAISE NOTICE 'Finished % module. Duration: %', _module_name, clock_timestamp() - _start_time;
|
||||
END $$;
|
||||
@@ -0,0 +1,152 @@
|
||||
-- Description: Rebuilds daily product snapshots from scratch using real orders data.
|
||||
-- Fixes issues with duplicated/inflated metrics.
|
||||
-- Dependencies: Core import tables (products, orders, receivings).
|
||||
-- Frequency: One-time run to clear out problematic data.
|
||||
|
||||
DO $$
|
||||
DECLARE
|
||||
_module_name TEXT := 'rebuild_daily_snapshots';
|
||||
_start_time TIMESTAMPTZ := clock_timestamp();
|
||||
_date DATE;
|
||||
_count INT;
|
||||
_total_records INT := 0;
|
||||
_begin_date DATE := (SELECT MIN(date)::date FROM orders WHERE date >= '2024-01-01'); -- Starting point for data rebuild
|
||||
_end_date DATE := CURRENT_DATE;
|
||||
BEGIN
|
||||
RAISE NOTICE 'Beginning daily snapshots rebuild from % to %. Starting at %', _begin_date, _end_date, _start_time;
|
||||
|
||||
-- First truncate the existing snapshots to ensure a clean slate
|
||||
TRUNCATE TABLE public.daily_product_snapshots;
|
||||
RAISE NOTICE 'Cleared existing snapshot data';
|
||||
|
||||
-- Now rebuild the snapshots day by day
|
||||
_date := _begin_date;
|
||||
|
||||
WHILE _date <= _end_date LOOP
|
||||
RAISE NOTICE 'Processing date %...', _date;
|
||||
|
||||
-- Create snapshots for this date
|
||||
WITH SalesData AS (
|
||||
SELECT
|
||||
p.pid,
|
||||
p.sku,
|
||||
-- Count orders to ensure we only include products with real activity
|
||||
COUNT(o.id) as order_count,
|
||||
-- Aggregate Sales (Quantity > 0, Status not Canceled/Returned)
|
||||
COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN o.quantity ELSE 0 END), 0) AS units_sold,
|
||||
COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN o.price * o.quantity ELSE 0 END), 0.00) AS gross_revenue_unadjusted,
|
||||
COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN o.discount ELSE 0 END), 0.00) AS discounts,
|
||||
COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN COALESCE(o.costeach, p.landing_cost_price, p.cost_price) * o.quantity ELSE 0 END), 0.00) AS cogs,
|
||||
COALESCE(SUM(CASE WHEN o.quantity > 0 AND COALESCE(o.status, 'pending') NOT IN ('canceled', 'returned') THEN p.regular_price * o.quantity ELSE 0 END), 0.00) AS gross_regular_revenue,
|
||||
|
||||
-- Aggregate Returns (Quantity < 0 or Status = Returned)
|
||||
COALESCE(SUM(CASE WHEN o.quantity < 0 OR COALESCE(o.status, 'pending') = 'returned' THEN ABS(o.quantity) ELSE 0 END), 0) AS units_returned,
|
||||
COALESCE(SUM(CASE WHEN o.quantity < 0 OR COALESCE(o.status, 'pending') = 'returned' THEN o.price * ABS(o.quantity) ELSE 0 END), 0.00) AS returns_revenue
|
||||
FROM public.products p
|
||||
LEFT JOIN public.orders o
|
||||
ON p.pid = o.pid
|
||||
AND o.date::date = _date
|
||||
GROUP BY p.pid, p.sku
|
||||
HAVING COUNT(o.id) > 0 -- Only include products with actual orders for this date
|
||||
),
|
||||
ReceivingData AS (
|
||||
SELECT
|
||||
r.pid,
|
||||
-- Count receiving documents to ensure we only include products with real activity
|
||||
COUNT(DISTINCT r.receiving_id) as receiving_count,
|
||||
-- Calculate received quantity for this day
|
||||
SUM(r.qty_each) AS units_received,
|
||||
-- Calculate received cost for this day
|
||||
SUM(r.qty_each * r.cost_each) AS cost_received
|
||||
FROM public.receivings r
|
||||
WHERE r.received_date::date = _date
|
||||
GROUP BY r.pid
|
||||
HAVING COUNT(DISTINCT r.receiving_id) > 0 OR SUM(r.qty_each) > 0
|
||||
),
|
||||
-- Get stock quantities for the day - note this is approximate since we're using current products data
|
||||
StockData AS (
|
||||
SELECT
|
||||
p.pid,
|
||||
p.stock_quantity,
|
||||
COALESCE(p.landing_cost_price, p.cost_price, 0.00) as effective_cost_price,
|
||||
COALESCE(p.price, 0.00) as current_price,
|
||||
COALESCE(p.regular_price, 0.00) as current_regular_price
|
||||
FROM public.products p
|
||||
)
|
||||
INSERT INTO public.daily_product_snapshots (
|
||||
snapshot_date,
|
||||
pid,
|
||||
sku,
|
||||
eod_stock_quantity,
|
||||
eod_stock_cost,
|
||||
eod_stock_retail,
|
||||
eod_stock_gross,
|
||||
stockout_flag,
|
||||
units_sold,
|
||||
units_returned,
|
||||
gross_revenue,
|
||||
discounts,
|
||||
returns_revenue,
|
||||
net_revenue,
|
||||
cogs,
|
||||
gross_regular_revenue,
|
||||
profit,
|
||||
units_received,
|
||||
cost_received,
|
||||
calculation_timestamp
|
||||
)
|
||||
SELECT
|
||||
_date AS snapshot_date,
|
||||
COALESCE(sd.pid, rd.pid) AS pid,
|
||||
sd.sku,
|
||||
-- Use current stock as approximation, since historical stock data may not be available
|
||||
s.stock_quantity AS eod_stock_quantity,
|
||||
s.stock_quantity * s.effective_cost_price AS eod_stock_cost,
|
||||
s.stock_quantity * s.current_price AS eod_stock_retail,
|
||||
s.stock_quantity * s.current_regular_price AS eod_stock_gross,
|
||||
(s.stock_quantity <= 0) AS stockout_flag,
|
||||
-- Sales metrics
|
||||
COALESCE(sd.units_sold, 0),
|
||||
COALESCE(sd.units_returned, 0),
|
||||
COALESCE(sd.gross_revenue_unadjusted, 0.00),
|
||||
COALESCE(sd.discounts, 0.00),
|
||||
COALESCE(sd.returns_revenue, 0.00),
|
||||
COALESCE(sd.gross_revenue_unadjusted, 0.00) - COALESCE(sd.discounts, 0.00) AS net_revenue,
|
||||
COALESCE(sd.cogs, 0.00),
|
||||
COALESCE(sd.gross_regular_revenue, 0.00),
|
||||
(COALESCE(sd.gross_revenue_unadjusted, 0.00) - COALESCE(sd.discounts, 0.00)) - COALESCE(sd.cogs, 0.00) AS profit,
|
||||
-- Receiving metrics
|
||||
COALESCE(rd.units_received, 0),
|
||||
COALESCE(rd.cost_received, 0.00),
|
||||
_start_time
|
||||
FROM SalesData sd
|
||||
FULL OUTER JOIN ReceivingData rd ON sd.pid = rd.pid
|
||||
LEFT JOIN StockData s ON COALESCE(sd.pid, rd.pid) = s.pid
|
||||
WHERE (COALESCE(sd.order_count, 0) > 0 OR COALESCE(rd.receiving_count, 0) > 0);
|
||||
|
||||
-- Get record count for this day
|
||||
GET DIAGNOSTICS _count = ROW_COUNT;
|
||||
_total_records := _total_records + _count;
|
||||
|
||||
RAISE NOTICE 'Added % snapshot records for date %', _count, _date;
|
||||
|
||||
-- Move to next day
|
||||
_date := _date + INTERVAL '1 day';
|
||||
END LOOP;
|
||||
|
||||
RAISE NOTICE 'Rebuilding daily snapshots complete. Added % total records across % days.', _total_records, (_end_date - _begin_date)::integer + 1;
|
||||
|
||||
-- Update the status table for daily_snapshots
|
||||
INSERT INTO public.calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES ('daily_snapshots', _start_time)
|
||||
ON CONFLICT (module_name) DO UPDATE SET last_calculation_timestamp = _start_time;
|
||||
|
||||
-- Now update product_metrics based on the rebuilt snapshots
|
||||
RAISE NOTICE 'Triggering update of product_metrics table...';
|
||||
|
||||
-- Call the update_product_metrics procedure directly
|
||||
-- Your system might use a different method to trigger this update
|
||||
PERFORM pg_notify('recalculate_metrics', 'product_metrics');
|
||||
|
||||
RAISE NOTICE 'Rebuild complete. Duration: %', clock_timestamp() - _start_time;
|
||||
END $$;
|
||||
156
inventory-server/scripts/metrics-new/calculate_brand_metrics.sql
Normal file
156
inventory-server/scripts/metrics-new/calculate_brand_metrics.sql
Normal file
@@ -0,0 +1,156 @@
|
||||
-- Description: Calculates and updates aggregated metrics per brand.
|
||||
-- Dependencies: product_metrics, products, calculate_status table.
|
||||
-- Frequency: Daily (after product_metrics update).
|
||||
|
||||
DO $$
|
||||
DECLARE
|
||||
_module_name VARCHAR := 'brand_metrics';
|
||||
_start_time TIMESTAMPTZ := clock_timestamp();
|
||||
_min_revenue NUMERIC := 50.00; -- Minimum revenue threshold for margin calculation
|
||||
BEGIN
|
||||
RAISE NOTICE 'Running % calculation...', _module_name;
|
||||
|
||||
WITH BrandAggregates AS (
|
||||
-- Aggregate metrics from product_metrics table per brand
|
||||
SELECT
|
||||
COALESCE(p.brand, 'Unbranded') AS brand_group, -- Group NULL/empty brands together
|
||||
COUNT(DISTINCT pm.pid) AS product_count,
|
||||
COUNT(DISTINCT CASE WHEN pm.is_visible THEN pm.pid END) AS active_product_count,
|
||||
COUNT(DISTINCT CASE WHEN pm.is_replenishable THEN pm.pid END) AS replenishable_product_count,
|
||||
SUM(pm.current_stock) AS current_stock_units,
|
||||
SUM(pm.current_stock_cost) AS current_stock_cost,
|
||||
SUM(pm.current_stock_retail) AS current_stock_retail,
|
||||
-- Only include products with valid sales data in each time period
|
||||
COUNT(DISTINCT CASE WHEN pm.sales_7d > 0 THEN pm.pid END) AS products_with_sales_7d,
|
||||
SUM(CASE WHEN pm.sales_7d > 0 THEN pm.sales_7d ELSE 0 END) AS sales_7d,
|
||||
SUM(CASE WHEN pm.revenue_7d > 0 THEN pm.revenue_7d ELSE 0 END) AS revenue_7d,
|
||||
|
||||
COUNT(DISTINCT CASE WHEN pm.sales_30d > 0 THEN pm.pid END) AS products_with_sales_30d,
|
||||
SUM(CASE WHEN pm.sales_30d > 0 THEN pm.sales_30d ELSE 0 END) AS sales_30d,
|
||||
SUM(CASE WHEN pm.revenue_30d > 0 THEN pm.revenue_30d ELSE 0 END) AS revenue_30d,
|
||||
SUM(CASE WHEN pm.cogs_30d > 0 THEN pm.cogs_30d ELSE 0 END) AS cogs_30d,
|
||||
SUM(CASE WHEN pm.profit_30d != 0 THEN pm.profit_30d ELSE 0 END) AS profit_30d,
|
||||
|
||||
COUNT(DISTINCT CASE WHEN pm.sales_365d > 0 THEN pm.pid END) AS products_with_sales_365d,
|
||||
SUM(CASE WHEN pm.sales_365d > 0 THEN pm.sales_365d ELSE 0 END) AS sales_365d,
|
||||
SUM(CASE WHEN pm.revenue_365d > 0 THEN pm.revenue_365d ELSE 0 END) AS revenue_365d,
|
||||
|
||||
COUNT(DISTINCT CASE WHEN pm.lifetime_sales > 0 THEN pm.pid END) AS products_with_lifetime_sales,
|
||||
SUM(CASE WHEN pm.lifetime_sales > 0 THEN pm.lifetime_sales ELSE 0 END) AS lifetime_sales,
|
||||
SUM(CASE WHEN pm.lifetime_revenue > 0 THEN pm.lifetime_revenue ELSE 0 END) AS lifetime_revenue
|
||||
FROM public.product_metrics pm
|
||||
JOIN public.products p ON pm.pid = p.pid
|
||||
GROUP BY brand_group
|
||||
),
|
||||
PreviousPeriodBrandMetrics AS (
|
||||
-- Get previous period metrics for growth calculation
|
||||
SELECT
|
||||
COALESCE(p.brand, 'Unbranded') AS brand_group,
|
||||
SUM(CASE WHEN dps.snapshot_date >= CURRENT_DATE - INTERVAL '59 days'
|
||||
AND dps.snapshot_date < CURRENT_DATE - INTERVAL '29 days'
|
||||
THEN dps.units_sold ELSE 0 END) AS sales_prev_30d,
|
||||
SUM(CASE WHEN dps.snapshot_date >= CURRENT_DATE - INTERVAL '59 days'
|
||||
AND dps.snapshot_date < CURRENT_DATE - INTERVAL '29 days'
|
||||
THEN dps.net_revenue ELSE 0 END) AS revenue_prev_30d
|
||||
FROM public.daily_product_snapshots dps
|
||||
JOIN public.products p ON dps.pid = p.pid
|
||||
GROUP BY brand_group
|
||||
),
|
||||
AllBrands AS (
|
||||
-- Ensure all brands from products table are included, mapping NULL/empty to 'Unbranded'
|
||||
SELECT DISTINCT COALESCE(brand, 'Unbranded') as brand_group
|
||||
FROM public.products
|
||||
)
|
||||
INSERT INTO public.brand_metrics (
|
||||
brand_name, last_calculated,
|
||||
product_count, active_product_count, replenishable_product_count,
|
||||
current_stock_units, current_stock_cost, current_stock_retail,
|
||||
sales_7d, revenue_7d, sales_30d, revenue_30d, profit_30d, cogs_30d,
|
||||
sales_365d, revenue_365d, lifetime_sales, lifetime_revenue,
|
||||
avg_margin_30d,
|
||||
sales_growth_30d_vs_prev, revenue_growth_30d_vs_prev
|
||||
)
|
||||
SELECT
|
||||
b.brand_group,
|
||||
_start_time,
|
||||
-- Base Aggregates
|
||||
COALESCE(ba.product_count, 0),
|
||||
COALESCE(ba.active_product_count, 0),
|
||||
COALESCE(ba.replenishable_product_count, 0),
|
||||
COALESCE(ba.current_stock_units, 0),
|
||||
COALESCE(ba.current_stock_cost, 0.00),
|
||||
COALESCE(ba.current_stock_retail, 0.00),
|
||||
-- Sales Aggregates
|
||||
COALESCE(ba.sales_7d, 0), COALESCE(ba.revenue_7d, 0.00),
|
||||
COALESCE(ba.sales_30d, 0), COALESCE(ba.revenue_30d, 0.00),
|
||||
COALESCE(ba.profit_30d, 0.00), COALESCE(ba.cogs_30d, 0.00),
|
||||
COALESCE(ba.sales_365d, 0), COALESCE(ba.revenue_365d, 0.00),
|
||||
COALESCE(ba.lifetime_sales, 0), COALESCE(ba.lifetime_revenue, 0.00),
|
||||
-- KPIs - Calculate margin only for brands with significant revenue
|
||||
CASE
|
||||
WHEN COALESCE(ba.revenue_30d, 0) >= _min_revenue THEN
|
||||
-- Directly calculate margin from revenue and cogs for consistency
|
||||
-- This is mathematically equivalent to profit/revenue but more explicit
|
||||
((COALESCE(ba.revenue_30d, 0) - COALESCE(ba.cogs_30d, 0)) / COALESCE(ba.revenue_30d, 1)) * 100.0
|
||||
ELSE NULL -- No margin for low/no revenue brands
|
||||
END,
|
||||
-- Growth metrics
|
||||
std_numeric(safe_divide((ba.sales_30d - ppbm.sales_prev_30d) * 100.0, ppbm.sales_prev_30d), 2),
|
||||
std_numeric(safe_divide((ba.revenue_30d - ppbm.revenue_prev_30d) * 100.0, ppbm.revenue_prev_30d), 2)
|
||||
FROM AllBrands b
|
||||
LEFT JOIN BrandAggregates ba ON b.brand_group = ba.brand_group
|
||||
LEFT JOIN PreviousPeriodBrandMetrics ppbm ON b.brand_group = ppbm.brand_group
|
||||
|
||||
ON CONFLICT (brand_name) DO UPDATE SET
|
||||
last_calculated = EXCLUDED.last_calculated,
|
||||
product_count = EXCLUDED.product_count,
|
||||
active_product_count = EXCLUDED.active_product_count,
|
||||
replenishable_product_count = EXCLUDED.replenishable_product_count,
|
||||
current_stock_units = EXCLUDED.current_stock_units,
|
||||
current_stock_cost = EXCLUDED.current_stock_cost,
|
||||
current_stock_retail = EXCLUDED.current_stock_retail,
|
||||
sales_7d = EXCLUDED.sales_7d, revenue_7d = EXCLUDED.revenue_7d,
|
||||
sales_30d = EXCLUDED.sales_30d, revenue_30d = EXCLUDED.revenue_30d,
|
||||
profit_30d = EXCLUDED.profit_30d, cogs_30d = EXCLUDED.cogs_30d,
|
||||
sales_365d = EXCLUDED.sales_365d, revenue_365d = EXCLUDED.revenue_365d,
|
||||
lifetime_sales = EXCLUDED.lifetime_sales, lifetime_revenue = EXCLUDED.lifetime_revenue,
|
||||
avg_margin_30d = EXCLUDED.avg_margin_30d,
|
||||
sales_growth_30d_vs_prev = EXCLUDED.sales_growth_30d_vs_prev,
|
||||
revenue_growth_30d_vs_prev = EXCLUDED.revenue_growth_30d_vs_prev
|
||||
WHERE -- Only update if at least one value has changed
|
||||
brand_metrics.product_count IS DISTINCT FROM EXCLUDED.product_count OR
|
||||
brand_metrics.active_product_count IS DISTINCT FROM EXCLUDED.active_product_count OR
|
||||
brand_metrics.current_stock_units IS DISTINCT FROM EXCLUDED.current_stock_units OR
|
||||
brand_metrics.sales_30d IS DISTINCT FROM EXCLUDED.sales_30d OR
|
||||
brand_metrics.revenue_30d IS DISTINCT FROM EXCLUDED.revenue_30d OR
|
||||
brand_metrics.lifetime_sales IS DISTINCT FROM EXCLUDED.lifetime_sales;
|
||||
|
||||
-- Update calculate_status
|
||||
INSERT INTO public.calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES (_module_name, _start_time)
|
||||
ON CONFLICT (module_name) DO UPDATE SET last_calculation_timestamp = _start_time;
|
||||
|
||||
RAISE NOTICE 'Finished % calculation. Duration: %', _module_name, clock_timestamp() - _start_time;
|
||||
END $$;
|
||||
|
||||
-- Return metrics about the update operation for tracking
|
||||
WITH update_stats AS (
|
||||
SELECT
|
||||
COUNT(*) as total_brands,
|
||||
COUNT(*) FILTER (WHERE last_calculated >= NOW() - INTERVAL '5 minutes') as rows_processed,
|
||||
SUM(product_count) as total_products,
|
||||
SUM(active_product_count) as total_active_products,
|
||||
SUM(sales_30d) as total_sales_30d,
|
||||
SUM(revenue_30d) as total_revenue_30d,
|
||||
AVG(avg_margin_30d) as overall_avg_margin_30d
|
||||
FROM public.brand_metrics
|
||||
)
|
||||
SELECT
|
||||
rows_processed,
|
||||
total_brands,
|
||||
total_products::int,
|
||||
total_active_products::int,
|
||||
total_sales_30d::int,
|
||||
ROUND(total_revenue_30d, 2) as total_revenue_30d,
|
||||
ROUND(overall_avg_margin_30d, 2) as overall_avg_margin_30d
|
||||
FROM update_stats;
|
||||
@@ -0,0 +1,239 @@
|
||||
-- Description: Calculates and updates aggregated metrics per category with hierarchy rollups.
|
||||
-- Dependencies: product_metrics, products, categories, product_categories, category_hierarchy, calculate_status table.
|
||||
-- Frequency: Daily (after product_metrics update).
|
||||
|
||||
DO $$
|
||||
DECLARE
|
||||
_module_name VARCHAR := 'category_metrics';
|
||||
_start_time TIMESTAMPTZ := clock_timestamp();
|
||||
_min_revenue NUMERIC := 50.00; -- Minimum revenue threshold for margin calculation
|
||||
BEGIN
|
||||
RAISE NOTICE 'Running % calculation...', _module_name;
|
||||
|
||||
-- Refresh the category hierarchy materialized view first
|
||||
REFRESH MATERIALIZED VIEW CONCURRENTLY category_hierarchy;
|
||||
|
||||
-- First calculate direct metrics (products directly in each category)
|
||||
WITH DirectCategoryMetrics AS (
|
||||
SELECT
|
||||
pc.cat_id,
|
||||
COUNT(DISTINCT pm.pid) AS product_count,
|
||||
COUNT(DISTINCT CASE WHEN pm.is_visible THEN pm.pid END) AS active_product_count,
|
||||
COUNT(DISTINCT CASE WHEN pm.is_replenishable THEN pm.pid END) AS replenishable_product_count,
|
||||
SUM(pm.current_stock) AS current_stock_units,
|
||||
SUM(pm.current_stock_cost) AS current_stock_cost,
|
||||
SUM(pm.current_stock_retail) AS current_stock_retail,
|
||||
-- Sales metrics with proper filtering
|
||||
SUM(CASE WHEN pm.sales_7d > 0 THEN pm.sales_7d ELSE 0 END) AS sales_7d,
|
||||
SUM(CASE WHEN pm.revenue_7d > 0 THEN pm.revenue_7d ELSE 0 END) AS revenue_7d,
|
||||
SUM(CASE WHEN pm.sales_30d > 0 THEN pm.sales_30d ELSE 0 END) AS sales_30d,
|
||||
SUM(CASE WHEN pm.revenue_30d > 0 THEN pm.revenue_30d ELSE 0 END) AS revenue_30d,
|
||||
SUM(CASE WHEN pm.cogs_30d > 0 THEN pm.cogs_30d ELSE 0 END) AS cogs_30d,
|
||||
SUM(CASE WHEN pm.profit_30d != 0 THEN pm.profit_30d ELSE 0 END) AS profit_30d,
|
||||
SUM(CASE WHEN pm.sales_365d > 0 THEN pm.sales_365d ELSE 0 END) AS sales_365d,
|
||||
SUM(CASE WHEN pm.revenue_365d > 0 THEN pm.revenue_365d ELSE 0 END) AS revenue_365d,
|
||||
SUM(CASE WHEN pm.lifetime_sales > 0 THEN pm.lifetime_sales ELSE 0 END) AS lifetime_sales,
|
||||
SUM(CASE WHEN pm.lifetime_revenue > 0 THEN pm.lifetime_revenue ELSE 0 END) AS lifetime_revenue
|
||||
FROM public.product_categories pc
|
||||
JOIN public.product_metrics pm ON pc.pid = pm.pid
|
||||
GROUP BY pc.cat_id
|
||||
),
|
||||
-- Calculate rolled-up metrics (including all descendant categories)
|
||||
RolledUpMetrics AS (
|
||||
SELECT
|
||||
ch.cat_id,
|
||||
-- Sum metrics from this category and all its descendants
|
||||
SUM(dcm.product_count) AS product_count,
|
||||
SUM(dcm.active_product_count) AS active_product_count,
|
||||
SUM(dcm.replenishable_product_count) AS replenishable_product_count,
|
||||
SUM(dcm.current_stock_units) AS current_stock_units,
|
||||
SUM(dcm.current_stock_cost) AS current_stock_cost,
|
||||
SUM(dcm.current_stock_retail) AS current_stock_retail,
|
||||
SUM(dcm.sales_7d) AS sales_7d,
|
||||
SUM(dcm.revenue_7d) AS revenue_7d,
|
||||
SUM(dcm.sales_30d) AS sales_30d,
|
||||
SUM(dcm.revenue_30d) AS revenue_30d,
|
||||
SUM(dcm.cogs_30d) AS cogs_30d,
|
||||
SUM(dcm.profit_30d) AS profit_30d,
|
||||
SUM(dcm.sales_365d) AS sales_365d,
|
||||
SUM(dcm.revenue_365d) AS revenue_365d,
|
||||
SUM(dcm.lifetime_sales) AS lifetime_sales,
|
||||
SUM(dcm.lifetime_revenue) AS lifetime_revenue
|
||||
FROM category_hierarchy ch
|
||||
LEFT JOIN DirectCategoryMetrics dcm ON
|
||||
dcm.cat_id = ch.cat_id OR
|
||||
dcm.cat_id = ANY(SELECT cat_id FROM category_hierarchy WHERE ch.cat_id = ANY(ancestor_ids))
|
||||
GROUP BY ch.cat_id
|
||||
),
|
||||
PreviousPeriodCategoryMetrics AS (
|
||||
-- Get previous period metrics for growth calculation
|
||||
SELECT
|
||||
pc.cat_id,
|
||||
SUM(CASE WHEN dps.snapshot_date >= CURRENT_DATE - INTERVAL '59 days'
|
||||
AND dps.snapshot_date < CURRENT_DATE - INTERVAL '29 days'
|
||||
THEN dps.units_sold ELSE 0 END) AS sales_prev_30d,
|
||||
SUM(CASE WHEN dps.snapshot_date >= CURRENT_DATE - INTERVAL '59 days'
|
||||
AND dps.snapshot_date < CURRENT_DATE - INTERVAL '29 days'
|
||||
THEN dps.net_revenue ELSE 0 END) AS revenue_prev_30d
|
||||
FROM public.daily_product_snapshots dps
|
||||
JOIN public.product_categories pc ON dps.pid = pc.pid
|
||||
GROUP BY pc.cat_id
|
||||
),
|
||||
RolledUpPreviousPeriod AS (
|
||||
-- Calculate rolled-up previous period metrics
|
||||
SELECT
|
||||
ch.cat_id,
|
||||
SUM(ppcm.sales_prev_30d) AS sales_prev_30d,
|
||||
SUM(ppcm.revenue_prev_30d) AS revenue_prev_30d
|
||||
FROM category_hierarchy ch
|
||||
LEFT JOIN PreviousPeriodCategoryMetrics ppcm ON
|
||||
ppcm.cat_id = ch.cat_id OR
|
||||
ppcm.cat_id = ANY(SELECT cat_id FROM category_hierarchy WHERE ch.cat_id = ANY(ancestor_ids))
|
||||
GROUP BY ch.cat_id
|
||||
),
|
||||
AllCategories AS (
|
||||
-- Ensure all categories are included
|
||||
SELECT
|
||||
c.cat_id,
|
||||
c.name,
|
||||
c.type,
|
||||
c.parent_id
|
||||
FROM public.categories c
|
||||
WHERE c.status = 'active'
|
||||
)
|
||||
INSERT INTO public.category_metrics (
|
||||
category_id, category_name, category_type, parent_id, last_calculated,
|
||||
-- Rolled-up metrics
|
||||
product_count, active_product_count, replenishable_product_count,
|
||||
current_stock_units, current_stock_cost, current_stock_retail,
|
||||
sales_7d, revenue_7d, sales_30d, revenue_30d, profit_30d, cogs_30d,
|
||||
sales_365d, revenue_365d, lifetime_sales, lifetime_revenue,
|
||||
-- Direct metrics
|
||||
direct_product_count, direct_active_product_count, direct_replenishable_product_count,
|
||||
direct_current_stock_units, direct_stock_cost, direct_stock_retail,
|
||||
direct_sales_7d, direct_revenue_7d, direct_sales_30d, direct_revenue_30d,
|
||||
direct_profit_30d, direct_cogs_30d, direct_sales_365d, direct_revenue_365d,
|
||||
direct_lifetime_sales, direct_lifetime_revenue,
|
||||
-- KPIs
|
||||
avg_margin_30d,
|
||||
sales_growth_30d_vs_prev, revenue_growth_30d_vs_prev
|
||||
)
|
||||
SELECT
|
||||
ac.cat_id,
|
||||
ac.name,
|
||||
ac.type,
|
||||
ac.parent_id,
|
||||
_start_time,
|
||||
-- Rolled-up metrics (includes descendants)
|
||||
COALESCE(rum.product_count, 0),
|
||||
COALESCE(rum.active_product_count, 0),
|
||||
COALESCE(rum.replenishable_product_count, 0),
|
||||
COALESCE(rum.current_stock_units, 0),
|
||||
COALESCE(rum.current_stock_cost, 0.00),
|
||||
COALESCE(rum.current_stock_retail, 0.00),
|
||||
COALESCE(rum.sales_7d, 0), COALESCE(rum.revenue_7d, 0.00),
|
||||
COALESCE(rum.sales_30d, 0), COALESCE(rum.revenue_30d, 0.00),
|
||||
COALESCE(rum.profit_30d, 0.00), COALESCE(rum.cogs_30d, 0.00),
|
||||
COALESCE(rum.sales_365d, 0), COALESCE(rum.revenue_365d, 0.00),
|
||||
COALESCE(rum.lifetime_sales, 0), COALESCE(rum.lifetime_revenue, 0.00),
|
||||
-- Direct metrics (only this category)
|
||||
COALESCE(dcm.product_count, 0),
|
||||
COALESCE(dcm.active_product_count, 0),
|
||||
COALESCE(dcm.replenishable_product_count, 0),
|
||||
COALESCE(dcm.current_stock_units, 0),
|
||||
COALESCE(dcm.current_stock_cost, 0.00),
|
||||
COALESCE(dcm.current_stock_retail, 0.00),
|
||||
COALESCE(dcm.sales_7d, 0), COALESCE(dcm.revenue_7d, 0.00),
|
||||
COALESCE(dcm.sales_30d, 0), COALESCE(dcm.revenue_30d, 0.00),
|
||||
COALESCE(dcm.profit_30d, 0.00), COALESCE(dcm.cogs_30d, 0.00),
|
||||
COALESCE(dcm.sales_365d, 0), COALESCE(dcm.revenue_365d, 0.00),
|
||||
COALESCE(dcm.lifetime_sales, 0), COALESCE(dcm.lifetime_revenue, 0.00),
|
||||
-- KPIs - Calculate margin only for categories with significant revenue
|
||||
CASE
|
||||
WHEN COALESCE(rum.revenue_30d, 0) >= _min_revenue THEN
|
||||
((COALESCE(rum.revenue_30d, 0) - COALESCE(rum.cogs_30d, 0)) / COALESCE(rum.revenue_30d, 1)) * 100.0
|
||||
ELSE NULL
|
||||
END,
|
||||
-- Growth metrics for rolled-up values
|
||||
std_numeric(safe_divide((rum.sales_30d - rupp.sales_prev_30d) * 100.0, rupp.sales_prev_30d), 2),
|
||||
std_numeric(safe_divide((rum.revenue_30d - rupp.revenue_prev_30d) * 100.0, rupp.revenue_prev_30d), 2)
|
||||
FROM AllCategories ac
|
||||
LEFT JOIN DirectCategoryMetrics dcm ON ac.cat_id = dcm.cat_id
|
||||
LEFT JOIN RolledUpMetrics rum ON ac.cat_id = rum.cat_id
|
||||
LEFT JOIN RolledUpPreviousPeriod rupp ON ac.cat_id = rupp.cat_id
|
||||
|
||||
ON CONFLICT (category_id) DO UPDATE SET
|
||||
last_calculated = EXCLUDED.last_calculated,
|
||||
category_name = EXCLUDED.category_name,
|
||||
category_type = EXCLUDED.category_type,
|
||||
parent_id = EXCLUDED.parent_id,
|
||||
-- Rolled-up metrics
|
||||
product_count = EXCLUDED.product_count,
|
||||
active_product_count = EXCLUDED.active_product_count,
|
||||
replenishable_product_count = EXCLUDED.replenishable_product_count,
|
||||
current_stock_units = EXCLUDED.current_stock_units,
|
||||
current_stock_cost = EXCLUDED.current_stock_cost,
|
||||
current_stock_retail = EXCLUDED.current_stock_retail,
|
||||
sales_7d = EXCLUDED.sales_7d, revenue_7d = EXCLUDED.revenue_7d,
|
||||
sales_30d = EXCLUDED.sales_30d, revenue_30d = EXCLUDED.revenue_30d,
|
||||
profit_30d = EXCLUDED.profit_30d, cogs_30d = EXCLUDED.cogs_30d,
|
||||
sales_365d = EXCLUDED.sales_365d, revenue_365d = EXCLUDED.revenue_365d,
|
||||
lifetime_sales = EXCLUDED.lifetime_sales, lifetime_revenue = EXCLUDED.lifetime_revenue,
|
||||
-- Direct metrics
|
||||
direct_product_count = EXCLUDED.direct_product_count,
|
||||
direct_active_product_count = EXCLUDED.direct_active_product_count,
|
||||
direct_replenishable_product_count = EXCLUDED.direct_replenishable_product_count,
|
||||
direct_current_stock_units = EXCLUDED.direct_current_stock_units,
|
||||
direct_stock_cost = EXCLUDED.direct_stock_cost,
|
||||
direct_stock_retail = EXCLUDED.direct_stock_retail,
|
||||
direct_sales_7d = EXCLUDED.direct_sales_7d, direct_revenue_7d = EXCLUDED.direct_revenue_7d,
|
||||
direct_sales_30d = EXCLUDED.direct_sales_30d, direct_revenue_30d = EXCLUDED.direct_revenue_30d,
|
||||
direct_profit_30d = EXCLUDED.direct_profit_30d, direct_cogs_30d = EXCLUDED.direct_cogs_30d,
|
||||
direct_sales_365d = EXCLUDED.direct_sales_365d, direct_revenue_365d = EXCLUDED.direct_revenue_365d,
|
||||
direct_lifetime_sales = EXCLUDED.direct_lifetime_sales, direct_lifetime_revenue = EXCLUDED.direct_lifetime_revenue,
|
||||
avg_margin_30d = EXCLUDED.avg_margin_30d,
|
||||
sales_growth_30d_vs_prev = EXCLUDED.sales_growth_30d_vs_prev,
|
||||
revenue_growth_30d_vs_prev = EXCLUDED.revenue_growth_30d_vs_prev
|
||||
WHERE -- Only update if at least one value has changed
|
||||
category_metrics.product_count IS DISTINCT FROM EXCLUDED.product_count OR
|
||||
category_metrics.active_product_count IS DISTINCT FROM EXCLUDED.active_product_count OR
|
||||
category_metrics.current_stock_units IS DISTINCT FROM EXCLUDED.current_stock_units OR
|
||||
category_metrics.sales_30d IS DISTINCT FROM EXCLUDED.sales_30d OR
|
||||
category_metrics.revenue_30d IS DISTINCT FROM EXCLUDED.revenue_30d OR
|
||||
category_metrics.lifetime_sales IS DISTINCT FROM EXCLUDED.lifetime_sales OR
|
||||
category_metrics.direct_product_count IS DISTINCT FROM EXCLUDED.direct_product_count OR
|
||||
category_metrics.direct_sales_30d IS DISTINCT FROM EXCLUDED.direct_sales_30d;
|
||||
|
||||
-- Update calculate_status
|
||||
INSERT INTO public.calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES (_module_name, _start_time)
|
||||
ON CONFLICT (module_name) DO UPDATE SET last_calculation_timestamp = _start_time;
|
||||
|
||||
RAISE NOTICE 'Finished % calculation. Duration: %', _module_name, clock_timestamp() - _start_time;
|
||||
END $$;
|
||||
|
||||
-- Return metrics about the update operation for tracking
|
||||
WITH update_stats AS (
|
||||
SELECT
|
||||
COUNT(*) as total_categories,
|
||||
COUNT(*) FILTER (WHERE last_calculated >= NOW() - INTERVAL '5 minutes') as rows_processed,
|
||||
COUNT(*) FILTER (WHERE category_type = 10) as sections,
|
||||
COUNT(*) FILTER (WHERE category_type = 11) as categories,
|
||||
COUNT(*) FILTER (WHERE category_type = 12) as subcategories,
|
||||
SUM(product_count) as total_products_rolled,
|
||||
SUM(direct_product_count) as total_products_direct,
|
||||
SUM(sales_30d) as total_sales_30d,
|
||||
SUM(revenue_30d) as total_revenue_30d
|
||||
FROM public.category_metrics
|
||||
)
|
||||
SELECT
|
||||
rows_processed,
|
||||
total_categories,
|
||||
sections,
|
||||
categories,
|
||||
subcategories,
|
||||
total_products_rolled::int,
|
||||
total_products_direct::int,
|
||||
total_sales_30d::int,
|
||||
ROUND(total_revenue_30d, 2) as total_revenue_30d
|
||||
FROM update_stats;
|
||||
@@ -0,0 +1,185 @@
|
||||
-- Description: Calculates and updates aggregated metrics per vendor.
|
||||
-- Dependencies: product_metrics, products, purchase_orders, calculate_status table.
|
||||
-- Frequency: Daily (after product_metrics update).
|
||||
|
||||
DO $$
|
||||
DECLARE
|
||||
_module_name VARCHAR := 'vendor_metrics';
|
||||
_start_time TIMESTAMPTZ := clock_timestamp();
|
||||
BEGIN
|
||||
RAISE NOTICE 'Running % calculation...', _module_name;
|
||||
|
||||
WITH VendorProductAggregates AS (
|
||||
-- Aggregate metrics from product_metrics table per vendor
|
||||
SELECT
|
||||
p.vendor,
|
||||
COUNT(DISTINCT pm.pid) AS product_count,
|
||||
COUNT(DISTINCT CASE WHEN pm.is_visible THEN pm.pid END) AS active_product_count,
|
||||
COUNT(DISTINCT CASE WHEN pm.is_replenishable THEN pm.pid END) AS replenishable_product_count,
|
||||
SUM(pm.current_stock) AS current_stock_units,
|
||||
SUM(pm.current_stock_cost) AS current_stock_cost,
|
||||
SUM(pm.current_stock_retail) AS current_stock_retail,
|
||||
SUM(pm.on_order_qty) AS on_order_units,
|
||||
SUM(pm.on_order_cost) AS on_order_cost,
|
||||
-- Only include products with valid sales data in each time period
|
||||
COUNT(DISTINCT CASE WHEN pm.sales_7d > 0 THEN pm.pid END) AS products_with_sales_7d,
|
||||
SUM(CASE WHEN pm.sales_7d > 0 THEN pm.sales_7d ELSE 0 END) AS sales_7d,
|
||||
SUM(CASE WHEN pm.revenue_7d > 0 THEN pm.revenue_7d ELSE 0 END) AS revenue_7d,
|
||||
|
||||
COUNT(DISTINCT CASE WHEN pm.sales_30d > 0 THEN pm.pid END) AS products_with_sales_30d,
|
||||
SUM(CASE WHEN pm.sales_30d > 0 THEN pm.sales_30d ELSE 0 END) AS sales_30d,
|
||||
SUM(CASE WHEN pm.revenue_30d > 0 THEN pm.revenue_30d ELSE 0 END) AS revenue_30d,
|
||||
SUM(CASE WHEN pm.cogs_30d > 0 THEN pm.cogs_30d ELSE 0 END) AS cogs_30d,
|
||||
SUM(CASE WHEN pm.profit_30d != 0 THEN pm.profit_30d ELSE 0 END) AS profit_30d,
|
||||
|
||||
COUNT(DISTINCT CASE WHEN pm.sales_365d > 0 THEN pm.pid END) AS products_with_sales_365d,
|
||||
SUM(CASE WHEN pm.sales_365d > 0 THEN pm.sales_365d ELSE 0 END) AS sales_365d,
|
||||
SUM(CASE WHEN pm.revenue_365d > 0 THEN pm.revenue_365d ELSE 0 END) AS revenue_365d,
|
||||
|
||||
COUNT(DISTINCT CASE WHEN pm.lifetime_sales > 0 THEN pm.pid END) AS products_with_lifetime_sales,
|
||||
SUM(CASE WHEN pm.lifetime_sales > 0 THEN pm.lifetime_sales ELSE 0 END) AS lifetime_sales,
|
||||
SUM(CASE WHEN pm.lifetime_revenue > 0 THEN pm.lifetime_revenue ELSE 0 END) AS lifetime_revenue
|
||||
FROM public.product_metrics pm
|
||||
JOIN public.products p ON pm.pid = p.pid
|
||||
WHERE p.vendor IS NOT NULL AND p.vendor <> ''
|
||||
GROUP BY p.vendor
|
||||
),
|
||||
PreviousPeriodVendorMetrics AS (
|
||||
-- Get previous period metrics for growth calculation
|
||||
SELECT
|
||||
p.vendor,
|
||||
SUM(CASE WHEN dps.snapshot_date >= CURRENT_DATE - INTERVAL '59 days'
|
||||
AND dps.snapshot_date < CURRENT_DATE - INTERVAL '29 days'
|
||||
THEN dps.units_sold ELSE 0 END) AS sales_prev_30d,
|
||||
SUM(CASE WHEN dps.snapshot_date >= CURRENT_DATE - INTERVAL '59 days'
|
||||
AND dps.snapshot_date < CURRENT_DATE - INTERVAL '29 days'
|
||||
THEN dps.net_revenue ELSE 0 END) AS revenue_prev_30d
|
||||
FROM public.daily_product_snapshots dps
|
||||
JOIN public.products p ON dps.pid = p.pid
|
||||
WHERE p.vendor IS NOT NULL AND p.vendor <> ''
|
||||
GROUP BY p.vendor
|
||||
),
|
||||
VendorPOAggregates AS (
|
||||
-- Aggregate PO related stats including lead time calculated from POs to receivings
|
||||
SELECT
|
||||
po.vendor,
|
||||
COUNT(DISTINCT po.po_id) AS po_count_365d,
|
||||
-- Calculate lead time by averaging the days between PO date and receiving date
|
||||
AVG(GREATEST(1, CASE
|
||||
WHEN r.received_date IS NOT NULL AND po.date IS NOT NULL
|
||||
THEN (r.received_date::date - po.date::date)
|
||||
ELSE NULL
|
||||
END))::int AS avg_lead_time_days_hist -- Avg lead time from HISTORICAL received POs
|
||||
FROM public.purchase_orders po
|
||||
-- Join to receivings table to find when items were received
|
||||
LEFT JOIN public.receivings r ON r.pid = po.pid
|
||||
WHERE po.vendor IS NOT NULL AND po.vendor <> ''
|
||||
AND po.date >= CURRENT_DATE - INTERVAL '1 year' -- Look at POs created in the last year
|
||||
AND po.status = 'done' -- Only calculate lead time on completed POs
|
||||
AND r.received_date IS NOT NULL
|
||||
AND po.date IS NOT NULL
|
||||
AND r.received_date >= po.date
|
||||
GROUP BY po.vendor
|
||||
),
|
||||
AllVendors AS (
|
||||
-- Ensure all vendors from products table are included
|
||||
SELECT DISTINCT vendor FROM public.products WHERE vendor IS NOT NULL AND vendor <> ''
|
||||
)
|
||||
INSERT INTO public.vendor_metrics (
|
||||
vendor_name, last_calculated,
|
||||
product_count, active_product_count, replenishable_product_count,
|
||||
current_stock_units, current_stock_cost, current_stock_retail,
|
||||
on_order_units, on_order_cost,
|
||||
po_count_365d, avg_lead_time_days,
|
||||
sales_7d, revenue_7d, sales_30d, revenue_30d, profit_30d, cogs_30d,
|
||||
sales_365d, revenue_365d, lifetime_sales, lifetime_revenue,
|
||||
avg_margin_30d,
|
||||
sales_growth_30d_vs_prev, revenue_growth_30d_vs_prev
|
||||
)
|
||||
SELECT
|
||||
v.vendor,
|
||||
_start_time,
|
||||
-- Base Aggregates
|
||||
COALESCE(vpa.product_count, 0),
|
||||
COALESCE(vpa.active_product_count, 0),
|
||||
COALESCE(vpa.replenishable_product_count, 0),
|
||||
COALESCE(vpa.current_stock_units, 0),
|
||||
COALESCE(vpa.current_stock_cost, 0.00),
|
||||
COALESCE(vpa.current_stock_retail, 0.00),
|
||||
COALESCE(vpa.on_order_units, 0),
|
||||
COALESCE(vpa.on_order_cost, 0.00),
|
||||
-- PO Aggregates
|
||||
COALESCE(vpoa.po_count_365d, 0),
|
||||
vpoa.avg_lead_time_days_hist, -- Can be NULL if no received POs
|
||||
-- Sales Aggregates
|
||||
COALESCE(vpa.sales_7d, 0), COALESCE(vpa.revenue_7d, 0.00),
|
||||
COALESCE(vpa.sales_30d, 0), COALESCE(vpa.revenue_30d, 0.00),
|
||||
COALESCE(vpa.profit_30d, 0.00), COALESCE(vpa.cogs_30d, 0.00),
|
||||
COALESCE(vpa.sales_365d, 0), COALESCE(vpa.revenue_365d, 0.00),
|
||||
COALESCE(vpa.lifetime_sales, 0), COALESCE(vpa.lifetime_revenue, 0.00),
|
||||
-- KPIs
|
||||
(vpa.profit_30d / NULLIF(vpa.revenue_30d, 0)) * 100.0,
|
||||
-- Growth metrics
|
||||
std_numeric(safe_divide((vpa.sales_30d - ppvm.sales_prev_30d) * 100.0, ppvm.sales_prev_30d), 2),
|
||||
std_numeric(safe_divide((vpa.revenue_30d - ppvm.revenue_prev_30d) * 100.0, ppvm.revenue_prev_30d), 2)
|
||||
FROM AllVendors v
|
||||
LEFT JOIN VendorProductAggregates vpa ON v.vendor = vpa.vendor
|
||||
LEFT JOIN VendorPOAggregates vpoa ON v.vendor = vpoa.vendor
|
||||
LEFT JOIN PreviousPeriodVendorMetrics ppvm ON v.vendor = ppvm.vendor
|
||||
|
||||
ON CONFLICT (vendor_name) DO UPDATE SET
|
||||
last_calculated = EXCLUDED.last_calculated,
|
||||
product_count = EXCLUDED.product_count,
|
||||
active_product_count = EXCLUDED.active_product_count,
|
||||
replenishable_product_count = EXCLUDED.replenishable_product_count,
|
||||
current_stock_units = EXCLUDED.current_stock_units,
|
||||
current_stock_cost = EXCLUDED.current_stock_cost,
|
||||
current_stock_retail = EXCLUDED.current_stock_retail,
|
||||
on_order_units = EXCLUDED.on_order_units,
|
||||
on_order_cost = EXCLUDED.on_order_cost,
|
||||
po_count_365d = EXCLUDED.po_count_365d,
|
||||
avg_lead_time_days = EXCLUDED.avg_lead_time_days,
|
||||
sales_7d = EXCLUDED.sales_7d, revenue_7d = EXCLUDED.revenue_7d,
|
||||
sales_30d = EXCLUDED.sales_30d, revenue_30d = EXCLUDED.revenue_30d,
|
||||
profit_30d = EXCLUDED.profit_30d, cogs_30d = EXCLUDED.cogs_30d,
|
||||
sales_365d = EXCLUDED.sales_365d, revenue_365d = EXCLUDED.revenue_365d,
|
||||
lifetime_sales = EXCLUDED.lifetime_sales, lifetime_revenue = EXCLUDED.lifetime_revenue,
|
||||
avg_margin_30d = EXCLUDED.avg_margin_30d,
|
||||
sales_growth_30d_vs_prev = EXCLUDED.sales_growth_30d_vs_prev,
|
||||
revenue_growth_30d_vs_prev = EXCLUDED.revenue_growth_30d_vs_prev
|
||||
WHERE -- Only update if at least one value has changed
|
||||
vendor_metrics.product_count IS DISTINCT FROM EXCLUDED.product_count OR
|
||||
vendor_metrics.active_product_count IS DISTINCT FROM EXCLUDED.active_product_count OR
|
||||
vendor_metrics.current_stock_units IS DISTINCT FROM EXCLUDED.current_stock_units OR
|
||||
vendor_metrics.on_order_units IS DISTINCT FROM EXCLUDED.on_order_units OR
|
||||
vendor_metrics.sales_30d IS DISTINCT FROM EXCLUDED.sales_30d OR
|
||||
vendor_metrics.revenue_30d IS DISTINCT FROM EXCLUDED.revenue_30d OR
|
||||
vendor_metrics.lifetime_sales IS DISTINCT FROM EXCLUDED.lifetime_sales;
|
||||
|
||||
-- Update calculate_status
|
||||
INSERT INTO public.calculate_status (module_name, last_calculation_timestamp)
|
||||
VALUES (_module_name, _start_time)
|
||||
ON CONFLICT (module_name) DO UPDATE SET last_calculation_timestamp = _start_time;
|
||||
|
||||
RAISE NOTICE 'Finished % calculation. Duration: %', _module_name, clock_timestamp() - _start_time;
|
||||
END $$;
|
||||
|
||||
-- Return metrics about the update operation for tracking
|
||||
WITH update_stats AS (
|
||||
SELECT
|
||||
COUNT(*) as total_vendors,
|
||||
COUNT(*) FILTER (WHERE last_calculated >= NOW() - INTERVAL '5 minutes') as rows_processed,
|
||||
SUM(product_count) as total_products,
|
||||
SUM(active_product_count) as total_active_products,
|
||||
SUM(po_count_365d) as total_pos_365d,
|
||||
AVG(avg_lead_time_days) as overall_avg_lead_time
|
||||
FROM public.vendor_metrics
|
||||
)
|
||||
SELECT
|
||||
rows_processed,
|
||||
total_vendors,
|
||||
total_products::int,
|
||||
total_active_products::int,
|
||||
total_pos_365d::int,
|
||||
ROUND(overall_avg_lead_time, 1) as overall_avg_lead_time
|
||||
FROM update_stats;
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user