Files
avante.nvim/lua/avante/providers/azure.lua
kernitus 10ce065d9e feat: update openai/azure params (#1604)
* feat(openai): use max_completion_tokens & reasoning_effort params

* feat(openai): use developer prompt for reasoning models

* docs: update openai config in readme

* refactor: follow lua style quotes

* fix(azure): rename max_tokens to max_completion_tokens

* refactor(azure): remove duplicate field

* refactor: update types

* refactor(azure): update type
2025-03-18 19:40:20 +08:00

59 lines
1.6 KiB
Lua

---@class AvanteAzureProvider: AvanteDefaultBaseProvider
---@field deployment string
---@field api_version string
---@field temperature number
---@field max_completion_tokens number
---@field reasoning_effort? string
local Utils = require("avante.utils")
local P = require("avante.providers")
local O = require("avante.providers").openai
---@class AvanteProviderFunctor
local M = {}
M.api_key_name = "AZURE_OPENAI_API_KEY"
-- Inherit from OpenAI class
setmetatable(M, { __index = O })
function M:parse_curl_args(prompt_opts)
local provider_conf, request_body = P.parse_config(self)
local headers = {
["Content-Type"] = "application/json",
}
if P.env.require_api_key(provider_conf) then
if provider_conf.entra then
headers["Authorization"] = "Bearer " .. self.parse_api_key()
else
headers["api-key"] = self.parse_api_key()
end
end
-- NOTE: When using reasoning models set supported parameters
self.set_reasoning_params(provider_conf, request_body)
return {
url = Utils.url_join(
provider_conf.endpoint,
"/openai/deployments/"
---@diagnostic disable-next-line: undefined-field
.. provider_conf.deployment
.. "/chat/completions?api-version="
---@diagnostic disable-next-line: undefined-field
.. provider_conf.api_version
),
proxy = provider_conf.proxy,
insecure = provider_conf.allow_insecure,
headers = headers,
body = vim.tbl_deep_extend("force", {
messages = self:parse_messages(prompt_opts),
stream = true,
}, request_body),
}
end
return M