fix: fixing multiple files on one
This commit is contained in:
@ -8,6 +8,20 @@ const MAX_FILE_SIZE = 30000; // ~30KB per file
|
||||
const MAX_TOTAL_FILES = 5; // Maximum number of files to inject
|
||||
const MAX_TOTAL_SIZE = 100000; // ~100KB total for all files
|
||||
|
||||
/**
|
||||
* Extracts file blocks from content
|
||||
* @param {string} content - Content containing file blocks
|
||||
* @returns {Array<{path: string, language: string, content: string}>} Array of file blocks
|
||||
*/
|
||||
function extractFileBlocks(content) {
|
||||
const fileTagRegex = /<file path="(.+?)"(?: language="(.+?)")?>([\s\S]*?)<\/file>/g;
|
||||
return [...content.matchAll(fileTagRegex)].map(([, path, lang, body]) => ({
|
||||
path,
|
||||
language: lang || "text",
|
||||
content: body.trim(),
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Saves an error with consistent formatting
|
||||
* @param {Error} error - The error to save
|
||||
@ -155,6 +169,158 @@ async function injectFileContent(content) {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Processes a single file with the model
|
||||
* @param {Object} params - Parameters for processing
|
||||
* @returns {Promise<Object>} The model's response for this file
|
||||
*/
|
||||
async function processSingleFile({
|
||||
file,
|
||||
model,
|
||||
systemPrompt,
|
||||
originalPrompt,
|
||||
stream = false,
|
||||
requestData
|
||||
}) {
|
||||
try {
|
||||
// Construct a focused prompt for this file
|
||||
const focusedPrompt = `Analyze this specific file:\n\n<file path="${file.path}" language="${file.language}">\n${file.content}\n</file>\n\nContext from original prompt: ${originalPrompt}`;
|
||||
|
||||
const ollamaUrl = "http://localhost:11434/api/chat";
|
||||
const payload = {
|
||||
model,
|
||||
messages: [
|
||||
{ role: "system", content: systemPrompt || "You are a helpful AI assistant." },
|
||||
{ role: "user", content: focusedPrompt }
|
||||
],
|
||||
stream
|
||||
};
|
||||
|
||||
// Save the individual file prompt
|
||||
await savePrompt({
|
||||
model,
|
||||
prompt: focusedPrompt,
|
||||
messages: payload.messages,
|
||||
request_data: {
|
||||
...requestData,
|
||||
is_multi_file: true,
|
||||
file_index: file.path,
|
||||
original_prompt: originalPrompt
|
||||
}
|
||||
});
|
||||
|
||||
if (stream) {
|
||||
// For streaming, we'll handle this differently in the main function
|
||||
return { type: 'stream', payload, file };
|
||||
}
|
||||
|
||||
const response = await axios.post(ollamaUrl, payload);
|
||||
return {
|
||||
type: 'response',
|
||||
file: file.path,
|
||||
content: response.data.message?.content || response.data.response || '',
|
||||
model: response.data.model
|
||||
};
|
||||
} catch (error) {
|
||||
await logError(error, {
|
||||
operation: 'Single file processing',
|
||||
filePath: file.path,
|
||||
model,
|
||||
originalPrompt
|
||||
});
|
||||
return {
|
||||
type: 'error',
|
||||
file: file.path,
|
||||
error: error.message
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles multi-file processing
|
||||
* @param {Object} params - Parameters for processing
|
||||
* @returns {Promise<Object>} Combined responses for all files
|
||||
*/
|
||||
async function handleMultiFileProcessing({
|
||||
files,
|
||||
model,
|
||||
systemPrompt,
|
||||
originalPrompt,
|
||||
stream,
|
||||
requestData,
|
||||
res
|
||||
}) {
|
||||
if (stream) {
|
||||
// For streaming, we need to handle each file sequentially
|
||||
res.setHeader('Content-Type', 'application/x-ndjson');
|
||||
res.setHeader('Transfer-Encoding', 'chunked');
|
||||
|
||||
for (const file of files) {
|
||||
// Send a marker for the start of a new file
|
||||
res.write(JSON.stringify({
|
||||
type: 'file_start',
|
||||
file: file.path
|
||||
}) + '\n');
|
||||
|
||||
const { payload } = await processSingleFile({
|
||||
file,
|
||||
model,
|
||||
systemPrompt,
|
||||
originalPrompt,
|
||||
stream: true,
|
||||
requestData
|
||||
});
|
||||
|
||||
const ollamaResponse = await axios.post(payload.ollamaUrl, payload, {
|
||||
responseType: 'stream'
|
||||
});
|
||||
|
||||
for await (const chunk of ollamaResponse.data) {
|
||||
try {
|
||||
const data = JSON.parse(chunk.toString());
|
||||
res.write(JSON.stringify({
|
||||
...data,
|
||||
file: file.path
|
||||
}) + '\n');
|
||||
} catch (err) {
|
||||
console.error('Error parsing chunk:', err);
|
||||
}
|
||||
}
|
||||
|
||||
// Send a marker for the end of this file
|
||||
res.write(JSON.stringify({
|
||||
type: 'file_end',
|
||||
file: file.path
|
||||
}) + '\n');
|
||||
}
|
||||
|
||||
res.end();
|
||||
return null;
|
||||
}
|
||||
|
||||
// For non-streaming, process files in parallel
|
||||
const responses = await Promise.all(
|
||||
files.map(file => processSingleFile({
|
||||
file,
|
||||
model,
|
||||
systemPrompt,
|
||||
originalPrompt,
|
||||
stream: false,
|
||||
requestData
|
||||
}))
|
||||
);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
files: responses.map(r => ({
|
||||
path: r.file,
|
||||
content: r.content,
|
||||
error: r.error,
|
||||
model: r.model
|
||||
}))
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Express handler for /api/generate
|
||||
* Handles both chat-style and prompt-style requests to Ollama
|
||||
@ -174,9 +340,10 @@ async function handleGenerate(req, res) {
|
||||
},
|
||||
};
|
||||
|
||||
// Validate request body
|
||||
if (!requestData.body) {
|
||||
await savePrompt({
|
||||
model: requestData.body?.model || "codellama:7b",
|
||||
model: "codellama:7b", // Use default model directly since body is null
|
||||
prompt: null,
|
||||
messages: null,
|
||||
request_data: requestData,
|
||||
@ -184,46 +351,81 @@ async function handleGenerate(req, res) {
|
||||
throw new Error("Request body is required");
|
||||
}
|
||||
|
||||
const isChatRequest =
|
||||
requestData.body.messages && Array.isArray(requestData.body.messages);
|
||||
|
||||
if (
|
||||
isChatRequest &&
|
||||
(!requestData.body.messages.length ||
|
||||
!requestData.body.messages[0].content)
|
||||
) {
|
||||
throw new Error(
|
||||
"Chat request must include at least one message with content",
|
||||
);
|
||||
}
|
||||
if (!isChatRequest && !requestData.body.prompt) {
|
||||
throw new Error("Prompt request must include a prompt field");
|
||||
}
|
||||
|
||||
const isChatRequest = requestData.body.messages && Array.isArray(requestData.body.messages);
|
||||
const model = requestData.body.model || "codellama:7b";
|
||||
const stream =
|
||||
requestData.body.stream !== undefined ? requestData.body.stream : true;
|
||||
const stream = requestData.body.stream !== undefined ? requestData.body.stream : true;
|
||||
const systemPrompt = requestData.body.system;
|
||||
|
||||
// Get the content to process (either from messages or prompt)
|
||||
let contentToProcess;
|
||||
let originalPrompt;
|
||||
|
||||
if (isChatRequest) {
|
||||
const lastUserMessage = requestData.body.messages
|
||||
.filter(m => m.role === 'user')
|
||||
.pop();
|
||||
|
||||
if (!lastUserMessage) {
|
||||
throw new Error("No user message found in chat request");
|
||||
}
|
||||
|
||||
contentToProcess = lastUserMessage.content;
|
||||
originalPrompt = contentToProcess;
|
||||
} else {
|
||||
if (!requestData.body.prompt) {
|
||||
throw new Error("Prompt request must include a prompt field");
|
||||
}
|
||||
contentToProcess = requestData.body.prompt;
|
||||
originalPrompt = contentToProcess;
|
||||
}
|
||||
|
||||
// Process the content to inject file contents
|
||||
contentToProcess = await injectFileContent(contentToProcess);
|
||||
|
||||
// Extract file blocks
|
||||
const fileBlocks = extractFileBlocks(contentToProcess);
|
||||
|
||||
// If we have multiple files, use the multi-file processing
|
||||
if (fileBlocks.length > 1) {
|
||||
const result = await handleMultiFileProcessing({
|
||||
files: fileBlocks,
|
||||
model,
|
||||
systemPrompt,
|
||||
originalPrompt,
|
||||
stream,
|
||||
requestData,
|
||||
res
|
||||
});
|
||||
|
||||
if (!stream && result) {
|
||||
res.json(result);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// For single file or no files, proceed with normal processing
|
||||
const cleanedRequest = {
|
||||
model,
|
||||
stream,
|
||||
...(isChatRequest
|
||||
? {
|
||||
messages: await Promise.all(
|
||||
requestData.body.messages.map(async (msg) => ({
|
||||
role: msg.role || "user",
|
||||
content:
|
||||
msg.role === "user" && typeof msg.content === "string"
|
||||
? await injectFileContent(msg.content.trim())
|
||||
: msg.content.trim(),
|
||||
})),
|
||||
),
|
||||
messages: requestData.body.messages.map((msg, index) => ({
|
||||
role: msg.role || "user",
|
||||
content: index === requestData.body.messages.length - 1 && msg.role === 'user'
|
||||
? contentToProcess
|
||||
: msg.content.trim(),
|
||||
})),
|
||||
}
|
||||
: {
|
||||
prompt: await injectFileContent(requestData.body.prompt.trim()),
|
||||
prompt: contentToProcess,
|
||||
}),
|
||||
};
|
||||
|
||||
if (systemPrompt) {
|
||||
cleanedRequest.system = systemPrompt;
|
||||
}
|
||||
|
||||
// Save the complete request data to database
|
||||
await savePrompt({
|
||||
model,
|
||||
prompt: isChatRequest ? null : cleanedRequest.prompt,
|
||||
@ -231,6 +433,7 @@ async function handleGenerate(req, res) {
|
||||
request_data: requestData,
|
||||
});
|
||||
|
||||
// Handle the request with Ollama
|
||||
const ollamaUrl = isChatRequest
|
||||
? "http://localhost:11434/api/chat"
|
||||
: "http://localhost:11434/api/generate";
|
||||
@ -257,13 +460,10 @@ async function handleGenerate(req, res) {
|
||||
});
|
||||
|
||||
ollamaResponse.data.on("error", async (err) => {
|
||||
await saveError({
|
||||
error_message: err.message,
|
||||
details: {
|
||||
stack: err.stack,
|
||||
request: requestData,
|
||||
cleaned_request: cleanedRequest,
|
||||
},
|
||||
await logError(err, {
|
||||
operation: 'Ollama stream error',
|
||||
request: requestData,
|
||||
cleaned_request: cleanedRequest
|
||||
});
|
||||
if (!res.headersSent) {
|
||||
res.status(500).json({
|
||||
@ -285,23 +485,14 @@ async function handleGenerate(req, res) {
|
||||
res.status(ollamaResponse.status).json(ollamaResponse.data);
|
||||
}
|
||||
} catch (error) {
|
||||
const errorDetails = {
|
||||
message: error.message,
|
||||
request: requestData,
|
||||
response: error.response?.data,
|
||||
stack: error.stack,
|
||||
};
|
||||
|
||||
await saveError({
|
||||
error_message: error.message,
|
||||
details: errorDetails,
|
||||
await logError(error, {
|
||||
operation: 'Generate handler',
|
||||
request: requestData
|
||||
});
|
||||
|
||||
if (!res.headersSent) {
|
||||
res.status(error.response?.status || 500).json({
|
||||
error: error.response?.status
|
||||
? "Ollama API Error"
|
||||
: "Internal Server Error",
|
||||
error: error.response?.status ? "Ollama API Error" : "Internal Server Error",
|
||||
message: error.message,
|
||||
details: error.response?.data || undefined,
|
||||
});
|
||||
|
Reference in New Issue
Block a user