Files
apiAi/server.js
2025-05-10 13:22:07 -04:00

266 lines
9.3 KiB
JavaScript

require("dotenv").config(); // Load environment variables from .env file
const express = require("express");
const axios = require("axios");
const bodyParser = require("body-parser");
const app = express();
const port = 5000; // Backend server will run on this port
// Middleware to parse JSON request bodies
app.use(bodyParser.json());
// API Key validation middleware
const validateApiKey = (req, res, next) => {
const apiKey = req.headers["api-key"];
const authHeader = req.headers["authorization"];
// Try to extract token from Authorization: Bearer <token>
let token = null;
if (authHeader && authHeader.startsWith("Bearer ")) {
token = authHeader.split(" ")[1];
}
const providedKey = apiKey || token;
if (!providedKey) {
return res.status(400).json({ error: "API key is missing" });
}
if (providedKey !== process.env.API_KEY) {
return res.status(403).json({ error: "Invalid API key" });
}
next(); // Proceed if the API key is valid
};
app.get("/", (req, res) => {
res.send("Hello from the backend server!");
})
// Forward request to localhost:11434 (ollama)
app.post("/api/generate/api/chat", validateApiKey, async (req, res) => {
try {
const { model, messages, system } = req.body;
console.log(req.headers);
const prompt = messages
.map(
(msg) =>
`${msg.role === "system" ? "" : msg.role + ": "}${msg.content}`,
)
.join("\n");
console.log("🧠 Prompt for Ollama:\n", prompt);
const response = await axios.post(
"http://localhost:11434/api/generate",
{
model: model || "deepseek-r1:latest",
prompt,
stream: true,
},
{ responseType: "stream" },
);
res.setHeader("Content-Type", "application/json");
res.setHeader("Transfer-Encoding", "chunked");
let insideThink = false;
response.data.on("data", (chunk) => {
const lines = chunk.toString("utf8").split("\n").filter(Boolean);
for (const line of lines) {
try {
const json = JSON.parse(line);
const text = json.response;
if (text?.includes("<think>")) {
insideThink = true;
continue;
}
if (text?.includes("</think>")) {
insideThink = false;
continue;
}
if (!insideThink && text) {
const responseLine = JSON.stringify({
message: {
role: "assistant",
content: text,
},
done: false,
});
res.write(responseLine + "\n");
}
} catch (err) {
console.warn("Chunk parse failed:", err);
}
}
});
response.data.on("end", () => {
res.write(JSON.stringify({ done: true }) + "\n");
res.end();
});
response.data.on("error", (err) => {
console.error("Ollama stream error:", err);
res.write(
JSON.stringify({ error: "Stream error", message: err.message }) + "\n",
);
res.end();
});
} catch (error) {
console.error(
"❌ Error communicating with Ollama:",
error.response?.data || error.message,
);
res
.status(500)
.json({ error: "Internal Server Error", message: error.message });
}
});
// Forward request to localhost:11434 (ollama)
// app.post("/api/generate", validateApiKey, async (req, res) => {
// try {
// // Forwarding the request to localhost:11434 with the prompt
// const authHeader = req.headers["authorization"];
// console.log("Authorization header:", authHeader);
// console.log("checking api", apiKey !== process.env.API_KEY);
// console.log("Body: ", req.body);
// const response = await axios.post(
// "http://localhost:11434/api/generate",
// req.body,
// );
//
// // Send the response from localhost:11434 back to the client
// res.status(response.status).json(response.data);
// } catch (error) {
// // Enhanced error logging
// console.error(
// "Error forwarding request to localhost:11434:",
// error.response ? error.response.data : error.message,
// );
// res
// .status(500)
// .json({ error: "Internal Server Error", message: error.message });
// }
// });
app.post("/api/generate", validateApiKey, async (req, res) => {
try {
const requestBody = req.body;
console.log("Request to /api/generate. Body:", JSON.stringify(requestBody, null, 2));
// console.log("Headers:", JSON.stringify(req.headers, null, 2)); // For more detailed debugging if needed
let ollamaEndpointUrl;
let payloadForOllama = { ...requestBody }; // Start with a copy of the incoming body
// Ensure the model from avante.nvim config is respected or default if not provided in body
if (!payloadForOllama.model && req.nvim_config_model) { // Assuming you might pass this if needed
payloadForOllama.model = req.nvim_config_model; // Example: "codellama:7b"
} else if (!payloadForOllama.model) {
payloadForOllama.model = "codellama:7b"; // Fallback model
}
// Determine if this is a chat-style request or generate-style
// avante.nvim (inheriting from ollama) might send a body for /api/chat or /api/generate
if (requestBody.messages && Array.isArray(requestBody.messages)) {
ollamaEndpointUrl = "http://localhost:11434/api/chat";
// Payload for /api/chat typically includes: model, messages, stream, options, format, keep_alive
// Ensure essential fields are present if not already in requestBody
payloadForOllama.stream = requestBody.stream !== undefined ? requestBody.stream : true;
console.log(`Proxying to Ollama /api/chat with model ${payloadForOllama.model}`);
} else if (requestBody.prompt) {
ollamaEndpointUrl = "http://localhost:11434/api/generate";
// Payload for /api/generate typically includes: model, prompt, system, stream, context, options, format, keep_alive
// Ensure essential fields are present
payloadForOllama.stream = requestBody.stream !== undefined ? requestBody.stream : true;
console.log(`Proxying to Ollama /api/generate with model ${payloadForOllama.model}`);
} else {
console.error("Invalid request body: missing 'messages' or 'prompt'", requestBody);
return res.status(400).json({ error: "Invalid request body: must contain 'messages' array or 'prompt' string" });
}
if (payloadForOllama.stream) {
const ollamaResponse = await axios.post(
ollamaEndpointUrl,
payloadForOllama,
{ responseType: "stream" } // Crucial for getting a stream from Axios
);
// Set headers for streaming newline-delimited JSON (Ollama's stream format)
res.setHeader("Content-Type", "application/x-ndjson");
res.setHeader("Transfer-Encoding", "chunked");
// Pipe the stream from Ollama directly to the client (avante.nvim)
ollamaResponse.data.pipe(res);
ollamaResponse.data.on('error', (err) => {
console.error(`Ollama stream error for ${ollamaEndpointUrl}:`, err.message);
if (!res.headersSent) {
res.status(500).json({ error: "Ollama Stream Error", message: err.message });
} else if (!res.writableEnded) {
res.end(); // End the response if headers already sent and stream is not yet ended
}
});
ollamaResponse.data.on('end', () => {
if (!res.writableEnded) {
res.end(); // Ensure response is ended when Ollama stream finishes
}
});
} else {
// Non-streaming request (less common for interactive LLM use)
const ollamaResponse = await axios.post(
ollamaEndpointUrl,
payloadForOllama
);
res.status(ollamaResponse.status).json(ollamaResponse.data);
}
} catch (error) {
let errorMessage = error.message;
let errorData = null;
let statusCode = 500;
if (error.response) { // Error from Axios request to Ollama
statusCode = error.response.status || 500;
errorMessage = error.response.statusText || "Error communicating with Ollama";
errorData = error.response.data;
console.error(
`Error proxying to Ollama (${error.config?.url || 'N/A'}) with status ${statusCode}:`,
typeof errorData === 'string' || Buffer.isBuffer(errorData) ? errorData.toString() : errorData || errorMessage
);
} else if (error.request) { // No response received from Ollama
console.error("Error proxying to Ollama: No response received", error.request);
errorMessage = "No response from Ollama service";
} else { // Other errors
console.error("Error setting up proxy request to Ollama:", error.message);
}
if (!res.headersSent) {
res.status(statusCode).json({ error: "Internal Server Error", message: errorMessage, details: errorData });
} else if (!res.writableEnded) {
res.end(); // Ensure the response is closed if an error occurs after starting to stream
}
}
});
// It's advisable to remove or disable the old `/api/generate/api/chat` endpoint
// if `/api/generate` now correctly handles both Ollama's /api/chat and /api/generate requests.
// This avoids confusion and ensures avante.nvim (configured for `/api/generate`) hits the right logic.
// For example, comment out:
// app.post("/api/generate/api/chat", validateApiKey, async (req, res) => { ... });
// Start the server
app.listen(port, () => {
console.log(`Server running on http://localhost:${port}`);
});