fix: revert max_completion_tokens to max_tokens (#1741)

This commit is contained in:
yetone
2025-03-27 16:53:55 +08:00
committed by GitHub
parent a06bb97db6
commit cd13eeb7d9
7 changed files with 6 additions and 9 deletions

View File

@@ -82,7 +82,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
nvim_version: [ stable ]
nvim_version: [ "v0.10.4" ]
luals_version: [ 3.13.6 ]
steps:
- name: Checkout Code

View File

@@ -67,7 +67,7 @@ For building binary if you wish to build from source, then `cargo` is required.
model = "gpt-4o", -- your desired model (or use gpt-4o, etc.)
timeout = 30000, -- Timeout in milliseconds, increase this for reasoning models
temperature = 0,
max_completion_tokens = 8192, -- Increase this to include reasoning tokens (for reasoning models)
max_tokens = 8192, -- Increase this to include reasoning tokens (for reasoning models)
--reasoning_effort = "medium", -- low|medium|high, only used for reasoning models
},
},

View File

@@ -35,7 +35,7 @@ Then enable it in avante.nvim:
api_key_name = 'GROQ_API_KEY',
endpoint = 'https://api.groq.com/openai/v1/',
model = 'llama-3.3-70b-versatile',
max_completion_tokens = 32768, -- remember to increase this value, otherwise it will stop generating halfway
max_tokens = 32768, -- remember to increase this value, otherwise it will stop generating halfway
},
},
--- ... existing configurations

View File

@@ -196,7 +196,7 @@ M._defaults = {
model = "gpt-4o",
timeout = 30000, -- Timeout in milliseconds, increase this for reasoning models
temperature = 0,
max_completion_tokens = 16384, -- Increase this to include reasoning tokens (for reasoning models)
max_tokens = 16384, -- Increase this to include reasoning tokens (for reasoning models)
reasoning_effort = "medium", -- low|medium|high, only used for reasoning models
},
---@type AvanteSupportedProvider
@@ -216,7 +216,7 @@ M._defaults = {
api_version = "2024-12-01-preview",
timeout = 30000, -- Timeout in milliseconds, increase this for reasoning models
temperature = 0,
max_completion_tokens = 20480, -- Increase this to include reasoning tokens (for reasoning models)
max_tokens = 20480, -- Increase this to include reasoning tokens (for reasoning models)
reasoning_effort = "medium", -- low|medium|high, only used for reasoning models
},
---@type AvanteSupportedProvider

View File

@@ -2,7 +2,7 @@
---@field deployment string
---@field api_version string
---@field temperature number
---@field max_completion_tokens number
---@field max_tokens number
---@field reasoning_effort? string
local Utils = require("avante.utils")

View File

@@ -67,8 +67,6 @@ function M.set_allowed_params(provider_conf, request_body)
else
request_body.reasoning_effort = nil
end
-- If max_tokens is set in config, unset max_completion_tokens
if request_body.max_tokens then request_body.max_completion_tokens = nil end
end
function M:parse_messages(opts)

View File

@@ -217,7 +217,6 @@ vim.g.avante_login = vim.g.avante_login
---@field __inherited_from? string
---@field temperature? number
---@field max_tokens? number
---@field max_completion_tokens? number
---@field reasoning_effort? string
---@field display_name? string
---