feat: add ollama as supported provider (#1543)

* feat: add ollama as supported provider

*This implementation is only working with `stream = true`*
- Uses the actual ollama api and allows for passing additional options
- Properly passes the system prompt to api

Use ollama as provider in opts like this:
opts = {
        debug = true,
        provider = "ollama",
        ollama = {
                api_key_name = "",
                endpoint = "http://127.0.0.1:11434",
                model = "qwen2.5-coder:latest",
                options = {
                        num_ctx = 32768,
                        temperature = 0,
                },
                stream = true,
        },

* fix: ollama types

---------

Co-authored-by: jtabke <25010496+jtabke@users.noreply.github.com>
This commit is contained in:
yetone
2025-03-10 02:23:56 +08:00
committed by GitHub
parent 4976807a33
commit 750ee80971
5 changed files with 100 additions and 5 deletions

View File

@@ -20,7 +20,7 @@ local M = {}
---@field custom_tools AvanteLLMToolPublic[]
M._defaults = {
debug = false,
---@alias ProviderName "claude" | "openai" | "azure" | "gemini" | "vertex" | "cohere" | "copilot" | "bedrock" | string
---@alias ProviderName "claude" | "openai" | "azure" | "gemini" | "vertex" | "cohere" | "copilot" | "bedrock" | "ollama" | string
provider = "claude",
-- WARNING: Since auto-suggestions are a high-frequency operation and therefore expensive,
-- currently designating it as `copilot` provider is dangerous because: https://github.com/yetone/avante.nvim/issues/1048
@@ -255,6 +255,15 @@ M._defaults = {
temperature = 0,
max_tokens = 4096,
},
---@type AvanteSupportedProvider
ollama = {
endpoint = "http://127.0.0.1:11434",
timeout = 30000, -- Timeout in milliseconds
options = {
temperature = 0,
num_ctx = 4096,
},
},
---To add support for custom provider, follow the format below
---See https://github.com/yetone/avante.nvim/wiki#custom-providers for more details
---@type {[string]: AvanteProvider}

View File

@@ -18,6 +18,7 @@ local DressingState = { winid = nil, input_winid = nil, input_bufnr = nil }
---@field gemini AvanteProviderFunctor
---@field cohere AvanteProviderFunctor
---@field bedrock AvanteBedrockProviderFunctor
---@field ollama AvanteProviderFunctor
local M = {}
---@class EnvironmentHandler
@@ -152,8 +153,13 @@ M = setmetatable(M, {
__index = function(t, k)
local provider_config = M.get_config(k)
if Config.vendors[k] ~= nil and k == "ollama" then
Utils.warn(
"ollama is now a first-class provider in avante.nvim, please stop using vendors to define ollama, for migration guide please refer to: https://github.com/yetone/avante.nvim/wiki/Custom-providers#ollama"
)
end
---@diagnostic disable: undefined-field,no-unknown,inject-field
if Config.vendors[k] ~= nil then
if Config.vendors[k] ~= nil and k ~= "ollama" then
if provider_config.parse_response_data ~= nil then
Utils.error("parse_response_data is not supported for avante.nvim vendors")
end

View File

@@ -0,0 +1,70 @@
local Utils = require("avante.utils")
local P = require("avante.providers")
---@class AvanteProviderFunctor
local M = {}
M.api_key_name = "" -- Ollama typically doesn't require API keys for local use
M.role_map = {
user = "user",
assistant = "assistant",
}
M.parse_messages = P.openai.parse_messages
M.is_o_series_model = P.openai.is_o_series_model
function M:is_disable_stream() return false end
function M:parse_stream_data(ctx, data, handler_opts)
local ok, json_data = pcall(vim.json.decode, data)
if not ok or not json_data then
-- Add debug logging
Utils.debug("Failed to parse JSON", data)
return
end
if json_data.message and json_data.message.content then
local content = json_data.message.content
if content and content ~= "" then handler_opts.on_chunk(content) end
end
if json_data.done then
handler_opts.on_stop({ reason = "complete" })
return
end
end
---@param prompt_opts AvantePromptOptions
function M:parse_curl_args(prompt_opts)
local provider_conf, request_body = P.parse_config(self)
if not provider_conf.model or provider_conf.model == "" then error("Ollama model must be specified in config") end
if not provider_conf.endpoint then error("Ollama requires endpoint configuration") end
return {
url = Utils.url_join(provider_conf.endpoint, "/api/chat"),
headers = {
["Content-Type"] = "application/json",
["Accept"] = "application/json",
},
body = vim.tbl_deep_extend("force", {
model = provider_conf.model,
messages = self:parse_messages(prompt_opts),
stream = true,
system = prompt_opts.system_prompt,
}, request_body),
}
end
---@param result table
M.on_error = function(result)
local error_msg = "Ollama API error"
if result.body then
local ok, body = pcall(vim.json.decode, result.body)
if ok and body.error then error_msg = body.error end
end
Utils.error(error_msg, { title = "Ollama" })
end
return M

View File

@@ -67,12 +67,11 @@ function M.is_o_series_model(model) return model and string.match(model, "^o%d+"
function M:parse_messages(opts)
local messages = {}
local provider = P[Config.provider]
local base, _ = P.parse_config(provider)
local provider_conf, _ = P.parse_config(self)
-- NOTE: Handle the case where the selected model is the `o1` model
-- "o1" models are "smart" enough to understand user prompt as a system prompt in this context
if self.is_o_series_model(base.model) then
if self.is_o_series_model(provider_conf.model) then
table.insert(messages, { role = "user", content = opts.system_prompt })
else
table.insert(messages, { role = "system", content = opts.system_prompt })