Enhance ai debug dialog

This commit is contained in:
2025-03-24 13:25:18 -04:00
parent 228ae8b2a9
commit 114018080a
3 changed files with 255 additions and 109 deletions

View File

@@ -373,8 +373,24 @@ async function generateDebugResponse(productsToUse, res) {
});
// Now use loadPrompt to get the actual combined prompt
const prompt = await loadPrompt(promptConnection, productsToUse, res.app.locals.pool);
const fullPrompt = prompt + "\n" + JSON.stringify(productsToUse);
const promptData = await loadPrompt(promptConnection, productsToUse, res.app.locals.pool);
const fullUserPrompt = promptData.userContent + "\n" + JSON.stringify(productsToUse);
const promptLength = promptData.systemInstructions.length + fullUserPrompt.length; // Store prompt length for performance metrics
console.log("📝 Generated prompt length:", promptLength);
console.log("📝 System instructions length:", promptData.systemInstructions.length);
console.log("📝 User content length:", fullUserPrompt.length);
// Format the messages as they would be sent to the API
const apiMessages = [
{
role: "system",
content: promptData.systemInstructions
},
{
role: "user",
content: fullUserPrompt
}
];
// Create the response with taxonomy stats
let categoriesCount = 0;
@@ -415,8 +431,9 @@ async function generateDebugResponse(productsToUse, res) {
}
: null,
basePrompt: systemPrompt ? systemPrompt.prompt_text + "\n\n" + generalPrompt.prompt_text : generalPrompt.prompt_text,
sampleFullPrompt: fullPrompt,
promptLength: fullPrompt.length,
sampleFullPrompt: fullUserPrompt,
promptLength: promptLength,
apiFormat: apiMessages,
promptSources: {
...(systemPrompt ? {
systemPrompt: {
@@ -836,11 +853,14 @@ ${JSON.stringify(mixedTaxonomy.sizeCategories)}${
----------Here is the product data to validate----------`;
// Return the filtered prompt with system instructions first
return systemInstructions + "\n\n" + combinedPrompt + "\n" + taxonomySection;
// Return both system instructions and user content separately
return {
systemInstructions,
userContent: combinedPrompt + "\n" + taxonomySection
};
}
// Generate the full unfiltered prompt
// Generate the full unfiltered prompt for taxonomy section
const taxonomySection = `
Available Categories:
${JSON.stringify(taxonomy.categories)}
@@ -868,8 +888,11 @@ ${JSON.stringify(taxonomy.artists)}
Here is the product data to validate:`;
// Return the full prompt with system instructions first
return systemInstructions + "\n\n" + combinedPrompt + "\n" + taxonomySection;
// Return both system instructions and user content separately
return {
systemInstructions,
userContent: combinedPrompt + "\n" + taxonomySection
};
} catch (error) {
console.error("Error loading prompt:", error);
throw error; // Re-throw to be handled by the calling function
@@ -917,18 +940,24 @@ router.post("/validate", async (req, res) => {
// Load the prompt with the products data to filter taxonomy
console.log("🔄 Loading prompt with filtered taxonomy...");
const prompt = await loadPrompt(connection, products, req.app.locals.pool);
const fullPrompt = prompt + "\n" + JSON.stringify(products);
promptLength = fullPrompt.length; // Store prompt length for performance metrics
const promptData = await loadPrompt(connection, products, req.app.locals.pool);
const fullUserPrompt = promptData.userContent + "\n" + JSON.stringify(products);
const promptLength = promptData.systemInstructions.length + fullUserPrompt.length; // Store prompt length for performance metrics
console.log("📝 Generated prompt length:", promptLength);
console.log("📝 System instructions length:", promptData.systemInstructions.length);
console.log("📝 User content length:", fullUserPrompt.length);
console.log("🤖 Sending request to OpenAI...");
const completion = await openai.chat.completions.create({
model: "o3-mini",
model: "gpt-4o",
messages: [
{
role: "system",
content: promptData.systemInstructions,
},
{
role: "user",
content: fullPrompt,
content: fullUserPrompt,
},
],
temperature: 0.2,