feat: adding more context on my prompts
This commit is contained in:
@ -3,20 +3,86 @@ const { savePrompt, saveError } = require('../controller/generate');
|
||||
|
||||
/**
|
||||
* Express handler for /api/generate
|
||||
* Handles both chat-style and prompt-style requests to Ollama
|
||||
*/
|
||||
async function handleGenerate(req, res) {
|
||||
const requestBody = req.body;
|
||||
try {
|
||||
// Determine if this is a chat-style request or generate-style
|
||||
const isChatRequest = requestBody.messages && Array.isArray(requestBody.messages);
|
||||
const model = requestBody.model || 'codellama:7b';
|
||||
const stream = requestBody.stream !== undefined ? requestBody.stream : true;
|
||||
// Log the complete request for debugging
|
||||
console.log('📝 Incoming request:', {
|
||||
method: req.method,
|
||||
url: req.originalUrl,
|
||||
params: req.params,
|
||||
query: req.query,
|
||||
body: req.body,
|
||||
headers: {
|
||||
'content-type': req.headers['content-type'],
|
||||
'user-agent': req.headers['user-agent'],
|
||||
'api-key': req.headers['api-key'] ? '[REDACTED]' : undefined
|
||||
}
|
||||
});
|
||||
|
||||
// Save the prompt to database
|
||||
const requestData = {
|
||||
method: req.method,
|
||||
url: req.originalUrl,
|
||||
params: req.params,
|
||||
body: req.body,
|
||||
query: req.query,
|
||||
headers: {
|
||||
'content-type': req.headers['content-type'],
|
||||
'user-agent': req.headers['user-agent'],
|
||||
'api-key': req.headers['api-key'] ? '[REDACTED]' : undefined
|
||||
}
|
||||
};
|
||||
|
||||
try {
|
||||
// Save request data even if body is empty
|
||||
await savePrompt({
|
||||
model: requestData.body?.model || 'codellama:7b',
|
||||
prompt: null,
|
||||
messages: null,
|
||||
request_data: requestData
|
||||
});
|
||||
|
||||
// Validate request body
|
||||
if (!requestData.body) {
|
||||
throw new Error('Request body is required');
|
||||
}
|
||||
|
||||
// Determine request type and validate required fields
|
||||
const isChatRequest = requestData.body.messages && Array.isArray(requestData.body.messages);
|
||||
if (isChatRequest && (!requestData.body.messages.length || !requestData.body.messages[0].content)) {
|
||||
throw new Error('Chat request must include at least one message with content');
|
||||
}
|
||||
if (!isChatRequest && !requestData.body.prompt) {
|
||||
throw new Error('Prompt request must include a prompt field');
|
||||
}
|
||||
|
||||
// Set defaults and validate model
|
||||
const model = requestData.body.model || 'codellama:7b';
|
||||
const stream = requestData.body.stream !== undefined ? requestData.body.stream : true;
|
||||
|
||||
// Clean and prepare the prompt/messages
|
||||
const cleanedRequest = {
|
||||
model,
|
||||
stream,
|
||||
...(isChatRequest
|
||||
? {
|
||||
messages: requestData.body.messages.map(msg => ({
|
||||
role: msg.role || 'user',
|
||||
content: msg.content.trim()
|
||||
}))
|
||||
}
|
||||
: {
|
||||
prompt: requestData.body.prompt.trim()
|
||||
}
|
||||
)
|
||||
};
|
||||
|
||||
// Save the complete request data to database
|
||||
await savePrompt({
|
||||
model,
|
||||
prompt: isChatRequest ? null : requestBody.prompt,
|
||||
messages: isChatRequest ? requestBody.messages : null
|
||||
prompt: isChatRequest ? null : cleanedRequest.prompt,
|
||||
messages: isChatRequest ? cleanedRequest.messages : null,
|
||||
request_data: requestData // Save all request data for debugging/tracking
|
||||
});
|
||||
|
||||
// Prepare Ollama endpoint and payload
|
||||
@ -24,34 +90,79 @@ async function handleGenerate(req, res) {
|
||||
? 'http://localhost:11434/api/chat'
|
||||
: 'http://localhost:11434/api/generate';
|
||||
|
||||
const payload = isChatRequest
|
||||
? { model, messages: requestBody.messages, stream }
|
||||
: { model, prompt: requestBody.prompt, stream };
|
||||
|
||||
if (stream) {
|
||||
const ollamaResponse = await axios.post(ollamaUrl, payload, { responseType: 'stream' });
|
||||
// Handle streaming response
|
||||
const ollamaResponse = await axios.post(ollamaUrl, cleanedRequest, {
|
||||
responseType: 'stream',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Accept': 'application/x-ndjson'
|
||||
}
|
||||
});
|
||||
|
||||
res.setHeader('Content-Type', 'application/x-ndjson');
|
||||
res.setHeader('Transfer-Encoding', 'chunked');
|
||||
ollamaResponse.data.pipe(res);
|
||||
|
||||
let responseContent = '';
|
||||
ollamaResponse.data.on('data', (chunk) => {
|
||||
try {
|
||||
const data = JSON.parse(chunk.toString());
|
||||
responseContent += data.response || '';
|
||||
res.write(JSON.stringify(data) + '\n');
|
||||
} catch (err) {
|
||||
console.error('Error parsing chunk:', err);
|
||||
}
|
||||
});
|
||||
|
||||
ollamaResponse.data.on('error', async (err) => {
|
||||
await saveError({ error_message: err.message, details: { stack: err.stack } });
|
||||
await saveError({
|
||||
error_message: err.message,
|
||||
details: {
|
||||
stack: err.stack,
|
||||
request: requestData,
|
||||
cleaned_request: cleanedRequest
|
||||
}
|
||||
});
|
||||
if (!res.headersSent) {
|
||||
res.status(500).json({ error: 'Ollama Stream Error', message: err.message });
|
||||
res.status(500).json({
|
||||
error: 'Ollama Stream Error',
|
||||
message: err.message
|
||||
});
|
||||
} else if (!res.writableEnded) {
|
||||
res.end();
|
||||
}
|
||||
});
|
||||
|
||||
ollamaResponse.data.on('end', () => {
|
||||
if (!res.writableEnded) res.end();
|
||||
if (!res.writableEnded) {
|
||||
res.end();
|
||||
}
|
||||
});
|
||||
} else {
|
||||
const ollamaResponse = await axios.post(ollamaUrl, payload);
|
||||
// Handle non-streaming response
|
||||
const ollamaResponse = await axios.post(ollamaUrl, cleanedRequest);
|
||||
res.status(ollamaResponse.status).json(ollamaResponse.data);
|
||||
}
|
||||
} catch (error) {
|
||||
await saveError({ error_message: error.message, details: error.response?.data || error.stack });
|
||||
// Enhanced error handling
|
||||
const errorDetails = {
|
||||
message: error.message,
|
||||
request: requestData,
|
||||
response: error.response?.data,
|
||||
stack: error.stack
|
||||
};
|
||||
|
||||
await saveError({
|
||||
error_message: error.message,
|
||||
details: errorDetails
|
||||
});
|
||||
|
||||
if (!res.headersSent) {
|
||||
res.status(500).json({ error: 'Internal Server Error', message: error.message });
|
||||
res.status(error.response?.status || 500).json({
|
||||
error: error.response?.status ? 'Ollama API Error' : 'Internal Server Error',
|
||||
message: error.message,
|
||||
details: error.response?.data || undefined
|
||||
});
|
||||
} else if (!res.writableEnded) {
|
||||
res.end();
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ CREATE TABLE IF NOT EXISTS prompts (
|
||||
model VARCHAR(100) NOT NULL,
|
||||
prompt TEXT,
|
||||
messages JSONB,
|
||||
request_data JSONB,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
|
@ -19,6 +19,11 @@ const Prompt = sequelize.define('Prompt', {
|
||||
type: DataTypes.JSONB,
|
||||
allowNull: true
|
||||
},
|
||||
request_data: {
|
||||
type: DataTypes.JSONB,
|
||||
allowNull: true,
|
||||
comment: 'Stores complete request data including params, body, query, and headers'
|
||||
},
|
||||
created_at: {
|
||||
type: DataTypes.DATE,
|
||||
defaultValue: DataTypes.NOW
|
||||
@ -39,10 +44,11 @@ const Prompt = sequelize.define('Prompt', {
|
||||
* @param {string} data.model - The model name
|
||||
* @param {string} [data.prompt] - The prompt text (for non-chat requests)
|
||||
* @param {Array} [data.messages] - The messages array (for chat requests)
|
||||
* @param {Object} [data.request_data] - Complete request data including params, body, query, and headers
|
||||
* @returns {Promise<Object>} The created prompt record
|
||||
*/
|
||||
Prompt.createPrompt = async function({ model, prompt, messages }) {
|
||||
return this.create({ model, prompt, messages });
|
||||
Prompt.createPrompt = async function({ model, prompt, messages, request_data }) {
|
||||
return this.create({ model, prompt, messages, request_data });
|
||||
};
|
||||
|
||||
/**
|
||||
|
Reference in New Issue
Block a user