adding more robust answers
This commit is contained in:
146
server.js
146
server.js
@ -125,32 +125,140 @@ app.post("/api/generate/api/chat", validateApiKey, async (req, res) => {
|
||||
}
|
||||
});
|
||||
// Forward request to localhost:11434 (ollama)
|
||||
// app.post("/api/generate", validateApiKey, async (req, res) => {
|
||||
// try {
|
||||
// // Forwarding the request to localhost:11434 with the prompt
|
||||
// const authHeader = req.headers["authorization"];
|
||||
// console.log("Authorization header:", authHeader);
|
||||
// console.log("checking api", apiKey !== process.env.API_KEY);
|
||||
// console.log("Body: ", req.body);
|
||||
// const response = await axios.post(
|
||||
// "http://localhost:11434/api/generate",
|
||||
// req.body,
|
||||
// );
|
||||
//
|
||||
// // Send the response from localhost:11434 back to the client
|
||||
// res.status(response.status).json(response.data);
|
||||
// } catch (error) {
|
||||
// // Enhanced error logging
|
||||
// console.error(
|
||||
// "Error forwarding request to localhost:11434:",
|
||||
// error.response ? error.response.data : error.message,
|
||||
// );
|
||||
// res
|
||||
// .status(500)
|
||||
// .json({ error: "Internal Server Error", message: error.message });
|
||||
// }
|
||||
// });
|
||||
|
||||
app.post("/api/generate", validateApiKey, async (req, res) => {
|
||||
try {
|
||||
// Forwarding the request to localhost:11434 with the prompt
|
||||
const authHeader = req.headers["authorization"];
|
||||
console.log("Authorization header:", authHeader);
|
||||
console.log("checking api", apiKey !== process.env.API_KEY);
|
||||
console.log("Body: ", req.body);
|
||||
const response = await axios.post(
|
||||
"http://localhost:11434/api/generate",
|
||||
req.body,
|
||||
);
|
||||
const requestBody = req.body;
|
||||
console.log("Request to /api/generate. Body:", JSON.stringify(requestBody, null, 2));
|
||||
// console.log("Headers:", JSON.stringify(req.headers, null, 2)); // For more detailed debugging if needed
|
||||
|
||||
let ollamaEndpointUrl;
|
||||
let payloadForOllama = { ...requestBody }; // Start with a copy of the incoming body
|
||||
|
||||
// Ensure the model from avante.nvim config is respected or default if not provided in body
|
||||
if (!payloadForOllama.model && req.nvim_config_model) { // Assuming you might pass this if needed
|
||||
payloadForOllama.model = req.nvim_config_model; // Example: "codellama:7b"
|
||||
} else if (!payloadForOllama.model) {
|
||||
payloadForOllama.model = "codellama:7b"; // Fallback model
|
||||
}
|
||||
|
||||
|
||||
// Determine if this is a chat-style request or generate-style
|
||||
// avante.nvim (inheriting from ollama) might send a body for /api/chat or /api/generate
|
||||
if (requestBody.messages && Array.isArray(requestBody.messages)) {
|
||||
ollamaEndpointUrl = "http://localhost:11434/api/chat";
|
||||
// Payload for /api/chat typically includes: model, messages, stream, options, format, keep_alive
|
||||
// Ensure essential fields are present if not already in requestBody
|
||||
payloadForOllama.stream = requestBody.stream !== undefined ? requestBody.stream : true;
|
||||
console.log(`Proxying to Ollama /api/chat with model ${payloadForOllama.model}`);
|
||||
} else if (requestBody.prompt) {
|
||||
ollamaEndpointUrl = "http://localhost:11434/api/generate";
|
||||
// Payload for /api/generate typically includes: model, prompt, system, stream, context, options, format, keep_alive
|
||||
// Ensure essential fields are present
|
||||
payloadForOllama.stream = requestBody.stream !== undefined ? requestBody.stream : true;
|
||||
console.log(`Proxying to Ollama /api/generate with model ${payloadForOllama.model}`);
|
||||
} else {
|
||||
console.error("Invalid request body: missing 'messages' or 'prompt'", requestBody);
|
||||
return res.status(400).json({ error: "Invalid request body: must contain 'messages' array or 'prompt' string" });
|
||||
}
|
||||
|
||||
if (payloadForOllama.stream) {
|
||||
const ollamaResponse = await axios.post(
|
||||
ollamaEndpointUrl,
|
||||
payloadForOllama,
|
||||
{ responseType: "stream" } // Crucial for getting a stream from Axios
|
||||
);
|
||||
|
||||
// Set headers for streaming newline-delimited JSON (Ollama's stream format)
|
||||
res.setHeader("Content-Type", "application/x-ndjson");
|
||||
res.setHeader("Transfer-Encoding", "chunked");
|
||||
|
||||
// Pipe the stream from Ollama directly to the client (avante.nvim)
|
||||
ollamaResponse.data.pipe(res);
|
||||
|
||||
ollamaResponse.data.on('error', (err) => {
|
||||
console.error(`Ollama stream error for ${ollamaEndpointUrl}:`, err.message);
|
||||
if (!res.headersSent) {
|
||||
res.status(500).json({ error: "Ollama Stream Error", message: err.message });
|
||||
} else if (!res.writableEnded) {
|
||||
res.end(); // End the response if headers already sent and stream is not yet ended
|
||||
}
|
||||
});
|
||||
|
||||
ollamaResponse.data.on('end', () => {
|
||||
if (!res.writableEnded) {
|
||||
res.end(); // Ensure response is ended when Ollama stream finishes
|
||||
}
|
||||
});
|
||||
|
||||
} else {
|
||||
// Non-streaming request (less common for interactive LLM use)
|
||||
const ollamaResponse = await axios.post(
|
||||
ollamaEndpointUrl,
|
||||
payloadForOllama
|
||||
);
|
||||
res.status(ollamaResponse.status).json(ollamaResponse.data);
|
||||
}
|
||||
|
||||
// Send the response from localhost:11434 back to the client
|
||||
res.status(response.status).json(response.data);
|
||||
} catch (error) {
|
||||
// Enhanced error logging
|
||||
console.error(
|
||||
"Error forwarding request to localhost:11434:",
|
||||
error.response ? error.response.data : error.message,
|
||||
);
|
||||
res
|
||||
.status(500)
|
||||
.json({ error: "Internal Server Error", message: error.message });
|
||||
let errorMessage = error.message;
|
||||
let errorData = null;
|
||||
let statusCode = 500;
|
||||
|
||||
if (error.response) { // Error from Axios request to Ollama
|
||||
statusCode = error.response.status || 500;
|
||||
errorMessage = error.response.statusText || "Error communicating with Ollama";
|
||||
errorData = error.response.data;
|
||||
console.error(
|
||||
`Error proxying to Ollama (${error.config?.url || 'N/A'}) with status ${statusCode}:`,
|
||||
typeof errorData === 'string' || Buffer.isBuffer(errorData) ? errorData.toString() : errorData || errorMessage
|
||||
);
|
||||
} else if (error.request) { // No response received from Ollama
|
||||
console.error("Error proxying to Ollama: No response received", error.request);
|
||||
errorMessage = "No response from Ollama service";
|
||||
} else { // Other errors
|
||||
console.error("Error setting up proxy request to Ollama:", error.message);
|
||||
}
|
||||
|
||||
if (!res.headersSent) {
|
||||
res.status(statusCode).json({ error: "Internal Server Error", message: errorMessage, details: errorData });
|
||||
} else if (!res.writableEnded) {
|
||||
res.end(); // Ensure the response is closed if an error occurs after starting to stream
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// It's advisable to remove or disable the old `/api/generate/api/chat` endpoint
|
||||
// if `/api/generate` now correctly handles both Ollama's /api/chat and /api/generate requests.
|
||||
// This avoids confusion and ensures avante.nvim (configured for `/api/generate`) hits the right logic.
|
||||
// For example, comment out:
|
||||
// app.post("/api/generate/api/chat", validateApiKey, async (req, res) => { ... });
|
||||
|
||||
// Start the server
|
||||
app.listen(port, () => {
|
||||
console.log(`Server running on http://localhost:${port}`);
|
||||
|
Reference in New Issue
Block a user