fix: fixing multiple files on one

This commit is contained in:
Carlos
2025-05-24 15:58:45 -04:00
parent 9a78ec0c7c
commit 0ce4af8833

View File

@ -8,6 +8,20 @@ const MAX_FILE_SIZE = 30000; // ~30KB per file
const MAX_TOTAL_FILES = 5; // Maximum number of files to inject const MAX_TOTAL_FILES = 5; // Maximum number of files to inject
const MAX_TOTAL_SIZE = 100000; // ~100KB total for all files const MAX_TOTAL_SIZE = 100000; // ~100KB total for all files
/**
* Extracts file blocks from content
* @param {string} content - Content containing file blocks
* @returns {Array<{path: string, language: string, content: string}>} Array of file blocks
*/
function extractFileBlocks(content) {
const fileTagRegex = /<file path="(.+?)"(?: language="(.+?)")?>([\s\S]*?)<\/file>/g;
return [...content.matchAll(fileTagRegex)].map(([, path, lang, body]) => ({
path,
language: lang || "text",
content: body.trim(),
}));
}
/** /**
* Saves an error with consistent formatting * Saves an error with consistent formatting
* @param {Error} error - The error to save * @param {Error} error - The error to save
@ -155,6 +169,158 @@ async function injectFileContent(content) {
} }
} }
/**
* Processes a single file with the model
* @param {Object} params - Parameters for processing
* @returns {Promise<Object>} The model's response for this file
*/
async function processSingleFile({
file,
model,
systemPrompt,
originalPrompt,
stream = false,
requestData
}) {
try {
// Construct a focused prompt for this file
const focusedPrompt = `Analyze this specific file:\n\n<file path="${file.path}" language="${file.language}">\n${file.content}\n</file>\n\nContext from original prompt: ${originalPrompt}`;
const ollamaUrl = "http://localhost:11434/api/chat";
const payload = {
model,
messages: [
{ role: "system", content: systemPrompt || "You are a helpful AI assistant." },
{ role: "user", content: focusedPrompt }
],
stream
};
// Save the individual file prompt
await savePrompt({
model,
prompt: focusedPrompt,
messages: payload.messages,
request_data: {
...requestData,
is_multi_file: true,
file_index: file.path,
original_prompt: originalPrompt
}
});
if (stream) {
// For streaming, we'll handle this differently in the main function
return { type: 'stream', payload, file };
}
const response = await axios.post(ollamaUrl, payload);
return {
type: 'response',
file: file.path,
content: response.data.message?.content || response.data.response || '',
model: response.data.model
};
} catch (error) {
await logError(error, {
operation: 'Single file processing',
filePath: file.path,
model,
originalPrompt
});
return {
type: 'error',
file: file.path,
error: error.message
};
}
}
/**
* Handles multi-file processing
* @param {Object} params - Parameters for processing
* @returns {Promise<Object>} Combined responses for all files
*/
async function handleMultiFileProcessing({
files,
model,
systemPrompt,
originalPrompt,
stream,
requestData,
res
}) {
if (stream) {
// For streaming, we need to handle each file sequentially
res.setHeader('Content-Type', 'application/x-ndjson');
res.setHeader('Transfer-Encoding', 'chunked');
for (const file of files) {
// Send a marker for the start of a new file
res.write(JSON.stringify({
type: 'file_start',
file: file.path
}) + '\n');
const { payload } = await processSingleFile({
file,
model,
systemPrompt,
originalPrompt,
stream: true,
requestData
});
const ollamaResponse = await axios.post(payload.ollamaUrl, payload, {
responseType: 'stream'
});
for await (const chunk of ollamaResponse.data) {
try {
const data = JSON.parse(chunk.toString());
res.write(JSON.stringify({
...data,
file: file.path
}) + '\n');
} catch (err) {
console.error('Error parsing chunk:', err);
}
}
// Send a marker for the end of this file
res.write(JSON.stringify({
type: 'file_end',
file: file.path
}) + '\n');
}
res.end();
return null;
}
// For non-streaming, process files in parallel
const responses = await Promise.all(
files.map(file => processSingleFile({
file,
model,
systemPrompt,
originalPrompt,
stream: false,
requestData
}))
);
return {
success: true,
files: responses.map(r => ({
path: r.file,
content: r.content,
error: r.error,
model: r.model
}))
};
}
/** /**
* Express handler for /api/generate * Express handler for /api/generate
* Handles both chat-style and prompt-style requests to Ollama * Handles both chat-style and prompt-style requests to Ollama
@ -174,9 +340,10 @@ async function handleGenerate(req, res) {
}, },
}; };
// Validate request body
if (!requestData.body) { if (!requestData.body) {
await savePrompt({ await savePrompt({
model: requestData.body?.model || "codellama:7b", model: "codellama:7b", // Use default model directly since body is null
prompt: null, prompt: null,
messages: null, messages: null,
request_data: requestData, request_data: requestData,
@ -184,46 +351,81 @@ async function handleGenerate(req, res) {
throw new Error("Request body is required"); throw new Error("Request body is required");
} }
const isChatRequest = const isChatRequest = requestData.body.messages && Array.isArray(requestData.body.messages);
requestData.body.messages && Array.isArray(requestData.body.messages); const model = requestData.body.model || "codellama:7b";
const stream = requestData.body.stream !== undefined ? requestData.body.stream : true;
const systemPrompt = requestData.body.system;
if ( // Get the content to process (either from messages or prompt)
isChatRequest && let contentToProcess;
(!requestData.body.messages.length || let originalPrompt;
!requestData.body.messages[0].content)
) { if (isChatRequest) {
throw new Error( const lastUserMessage = requestData.body.messages
"Chat request must include at least one message with content", .filter(m => m.role === 'user')
); .pop();
if (!lastUserMessage) {
throw new Error("No user message found in chat request");
} }
if (!isChatRequest && !requestData.body.prompt) {
contentToProcess = lastUserMessage.content;
originalPrompt = contentToProcess;
} else {
if (!requestData.body.prompt) {
throw new Error("Prompt request must include a prompt field"); throw new Error("Prompt request must include a prompt field");
} }
contentToProcess = requestData.body.prompt;
originalPrompt = contentToProcess;
}
const model = requestData.body.model || "codellama:7b"; // Process the content to inject file contents
const stream = contentToProcess = await injectFileContent(contentToProcess);
requestData.body.stream !== undefined ? requestData.body.stream : true;
// Extract file blocks
const fileBlocks = extractFileBlocks(contentToProcess);
// If we have multiple files, use the multi-file processing
if (fileBlocks.length > 1) {
const result = await handleMultiFileProcessing({
files: fileBlocks,
model,
systemPrompt,
originalPrompt,
stream,
requestData,
res
});
if (!stream && result) {
res.json(result);
}
return;
}
// For single file or no files, proceed with normal processing
const cleanedRequest = { const cleanedRequest = {
model, model,
stream, stream,
...(isChatRequest ...(isChatRequest
? { ? {
messages: await Promise.all( messages: requestData.body.messages.map((msg, index) => ({
requestData.body.messages.map(async (msg) => ({
role: msg.role || "user", role: msg.role || "user",
content: content: index === requestData.body.messages.length - 1 && msg.role === 'user'
msg.role === "user" && typeof msg.content === "string" ? contentToProcess
? await injectFileContent(msg.content.trim())
: msg.content.trim(), : msg.content.trim(),
})), })),
),
} }
: { : {
prompt: await injectFileContent(requestData.body.prompt.trim()), prompt: contentToProcess,
}), }),
}; };
if (systemPrompt) {
cleanedRequest.system = systemPrompt;
}
// Save the complete request data to database
await savePrompt({ await savePrompt({
model, model,
prompt: isChatRequest ? null : cleanedRequest.prompt, prompt: isChatRequest ? null : cleanedRequest.prompt,
@ -231,6 +433,7 @@ async function handleGenerate(req, res) {
request_data: requestData, request_data: requestData,
}); });
// Handle the request with Ollama
const ollamaUrl = isChatRequest const ollamaUrl = isChatRequest
? "http://localhost:11434/api/chat" ? "http://localhost:11434/api/chat"
: "http://localhost:11434/api/generate"; : "http://localhost:11434/api/generate";
@ -257,13 +460,10 @@ async function handleGenerate(req, res) {
}); });
ollamaResponse.data.on("error", async (err) => { ollamaResponse.data.on("error", async (err) => {
await saveError({ await logError(err, {
error_message: err.message, operation: 'Ollama stream error',
details: {
stack: err.stack,
request: requestData, request: requestData,
cleaned_request: cleanedRequest, cleaned_request: cleanedRequest
},
}); });
if (!res.headersSent) { if (!res.headersSent) {
res.status(500).json({ res.status(500).json({
@ -285,23 +485,14 @@ async function handleGenerate(req, res) {
res.status(ollamaResponse.status).json(ollamaResponse.data); res.status(ollamaResponse.status).json(ollamaResponse.data);
} }
} catch (error) { } catch (error) {
const errorDetails = { await logError(error, {
message: error.message, operation: 'Generate handler',
request: requestData, request: requestData
response: error.response?.data,
stack: error.stack,
};
await saveError({
error_message: error.message,
details: errorDetails,
}); });
if (!res.headersSent) { if (!res.headersSent) {
res.status(error.response?.status || 500).json({ res.status(error.response?.status || 500).json({
error: error.response?.status error: error.response?.status ? "Ollama API Error" : "Internal Server Error",
? "Ollama API Error"
: "Internal Server Error",
message: error.message, message: error.message,
details: error.response?.data || undefined, details: error.response?.data || undefined,
}); });