diff --git a/README.md b/README.md index 489c350..b97c410 100644 --- a/README.md +++ b/README.md @@ -1163,14 +1163,17 @@ This approach ensures that the apply model can quickly and accurately merge your ## Ollama -ollama is a first-class provider for avante.nvim. You can use it by setting `provider = "ollama"` in the configuration, and set the `model` field in `ollama` to the model you want to use. For example: +Ollama is a first-class provider for avante.nvim. To start using it you need to set `provider = "ollama"` +in the configuration, set the `model` field in `ollama` to the model you want to use. Ollama is disabled +by default, you need to provide an implementation for its `is_env_set` method to properly enable it. +For example: ```lua provider = "ollama", providers = { ollama = { - endpoint = "http://localhost:11434", model = "qwq:32b", + is_env_set = require("avante.providers.ollama").check_endpoint_alive, }, } ``` diff --git a/README_zh.md b/README_zh.md index cede9ef..ac48f3a 100644 --- a/README_zh.md +++ b/README_zh.md @@ -790,13 +790,14 @@ return { ## Ollama -ollama 是 avante.nvim 的一流提供者。您可以通过在配置中设置 `provider = "ollama"` 来使用它,并在 `ollama` 中设置 `model` 字段为您想要使用的模型。例如: +ollama 是 avante.nvim 的一流提供者。要开始使用它,您需要在配置中设置 `provider = "ollama"`,并将 `ollama` 中的 `model` 字段设置为您想要使用的模型。Ollama 默认是禁用的,您需要为其 `is_env_set` 方法提供一个实现来正确地启用它。例如: ```lua provider = "ollama", providers = { ollama = { model = "qwq:32b", + is_env_set = require("avante.providers.ollama").check_endpoint_alive, }, } ``` diff --git a/lua/avante/providers/ollama.lua b/lua/avante/providers/ollama.lua index da66e88..8966a8a 100644 --- a/lua/avante/providers/ollama.lua +++ b/lua/avante/providers/ollama.lua @@ -23,6 +23,10 @@ M.role_map = { assistant = "assistant", } +-- Ollama is disabled by default. Users should override is_env_set() +-- implementation in their configs to enable it. There is a helper +-- check_endpoint_alive() that can be used to test if configured +-- endpoint is alive that can be used in place of is_env_set(). function M.is_env_set() return false end function M:parse_messages(opts) @@ -262,17 +266,15 @@ local curl_errors = { [60] = "Peer certificate cannot be authenticated with known CA certificates (SSL cert issue)", } --- List available models using Ollama's tags API -function M:list_models() - -- Return cached models if available - if self._model_list_cache then return self._model_list_cache end - +---Queries configured endpoint for the list of available models +---@param opts AvanteProviderFunctor Provider settings +---@param timeout? integer Timeout in milliseconds +---@return table[]|nil models List of available models +---@return string|nil error Error message in case of failure +local function query_models(opts, timeout) -- Parse provider config and construct tags endpoint URL - local provider_conf = Providers.parse_config(self) - if not provider_conf.endpoint then - Utils.error("Ollama requires endpoint configuration") - return {} - end + local provider_conf = Providers.parse_config(opts) + if not provider_conf.endpoint then return nil, "Ollama requires endpoint configuration" end local curl = require("plenary.curl") local tags_url = Utils.url_join(provider_conf.endpoint, "/api/tags") @@ -280,7 +282,7 @@ function M:list_models() ["Content-Type"] = "application/json", ["Accept"] = "application/json", } - local headers = Utils.tbl_override(base_headers, self.extra_headers) + local headers = Utils.tbl_override(base_headers, opts.extra_headers) -- Request the model tags from Ollama local response = {} @@ -289,22 +291,33 @@ function M:list_models() callback = function(output) response = output end, on_error = function(err) response = { exit = err.exit } end, }) - local job_ok, error = pcall(job.wait, job, 10000) + local job_ok, error = pcall(job.wait, job, timeout or 10000) if not job_ok then - Utils.error("Ollama: curl command invocation failed: " .. error) - return {} + return nil, "Ollama: curl command invocation failed: " .. error elseif response.exit ~= 0 then local err_msg = curl_errors[response.exit] or ("curl returned error: " .. response.exit) - Utils.error("Ollama: " .. err_msg) - return {} + return nil, "Ollama: " .. err_msg elseif response.status ~= 200 then - Utils.error("Failed to fetch Ollama models: " .. (response.body or response.status)) - return {} + return nil, "Failed to fetch Ollama models: " .. (response.body or response.status) end -- Parse the response body local ok, res_body = pcall(vim.json.decode, response.body) - if not ok or not res_body.models then return {} end + if not ok then return nil, "Failed to parse model list query response" end + return res_body.models or {} +end + +-- List available models using Ollama's tags API +function M:list_models() + -- Return cached models if available + if self._model_list_cache then return self._model_list_cache end + + local result, error = query_models(self) + if not result then + assert(error) + Utils.error(error) + return {} + end -- Helper to format model display string from its details local function format_display_name(details) @@ -317,7 +330,7 @@ function M:list_models() -- Format the models list local models = {} - for _, model in ipairs(res_body.models) do + for _, model in ipairs(result) do local details = model.details or {} local display = format_display_name(details) table.insert(models, { @@ -333,4 +346,9 @@ function M:list_models() return models end +function M.check_endpoint_alive() + local result = query_models(Providers.ollama, 1000) + return result ~= nil +end + return M