feat: supports OpenAI Response API and copilot's gpt-5-codex model (#2802)

* fix: upgrade vscode version

* feat: support openai response api

* refactor: refine todos tools

* fix: trim suffix empty lines
This commit is contained in:
yetone
2025-10-30 02:18:48 +08:00
committed by GitHub
parent 7e9f7b57de
commit b95e27b5a6
15 changed files with 484 additions and 169 deletions

View File

@@ -286,10 +286,15 @@ M._defaults = {
model = "gpt-4o",
timeout = 30000, -- Timeout in milliseconds, increase this for reasoning models
context_window = 128000, -- Number of tokens to send to the model for context
use_response_api = false, -- Set to true to use OpenAI's new Response API (/responses) instead of Chat Completions API (/chat/completions)
support_previous_response_id = true, -- OpenAI Response API supports previous_response_id for stateful conversations
-- NOTE: Response API automatically manages conversation state using previous_response_id for tool calling
extra_request_body = {
temperature = 0.75,
max_completion_tokens = 16384, -- Increase this to include reasoning tokens (for reasoning models)
reasoning_effort = "medium", -- low|medium|high, only used for reasoning models
max_completion_tokens = 16384, -- Increase this to include reasoning tokens (for reasoning models). For Response API, will be converted to max_output_tokens
reasoning_effort = "medium", -- low|medium|high, only used for reasoning models. For Response API, this will be converted to reasoning.effort
-- background = false, -- Response API only: set to true to start a background task
-- NOTE: previous_response_id is automatically managed by the provider for tool calling - don't set manually
},
},
---@type AvanteSupportedProvider
@@ -300,8 +305,12 @@ M._defaults = {
allow_insecure = false, -- Allow insecure server connections
timeout = 30000, -- Timeout in milliseconds
context_window = 64000, -- Number of tokens to send to the model for context
use_response_api = true, -- Copilot uses Response API input format
support_previous_response_id = false, -- Copilot doesn't support previous_response_id, must send full history
-- NOTE: Copilot doesn't support previous_response_id, always sends full conversation history including tool_calls
-- NOTE: Response API doesn't support some parameters like top_p, frequency_penalty, presence_penalty
extra_request_body = {
temperature = 0.75,
-- temperature is not supported by Response API for reasoning models
max_tokens = 20480,
},
},

View File

@@ -104,14 +104,14 @@ end
---@param cb fun(error: string | nil): nil
function M.generate_todos(user_input, cb)
local system_prompt =
[[You are an expert coding assistant. Please generate a todo list to complete the task based on the user input and pass the todo list to the add_todos tool.]]
[[You are an expert coding assistant. Please generate a todo list to complete the task based on the user input and pass the todo list to the write_todos tool.]]
local messages = {
{ role = "user", content = user_input },
}
local provider = Providers[Config.provider]
local tools = {
require("avante.llm_tools.add_todos"),
require("avante.llm_tools.write_todos"),
}
local history_messages = {}
@@ -153,7 +153,7 @@ function M.generate_todos(user_input, cb)
if stop_opts.reason == "tool_use" then
local pending_tools = History.get_pending_tools(history_messages)
for _, pending_tool in ipairs(pending_tools) do
if pending_tool.state == "generated" and pending_tool.name == "add_todos" then
if pending_tool.state == "generated" and pending_tool.name == "write_todos" then
local result = LLMTools.process_tool_use(tools, pending_tool, {
session_ctx = {},
on_complete = function() cb() end,
@@ -442,30 +442,6 @@ function M.generate_prompts(opts)
messages = vim.list_extend(messages, { { role = "user", content = opts.instructions } })
end
if opts.get_todos then
local todos = opts.get_todos()
if todos and #todos > 0 then
-- Remove existing todos-related messages - use more precise <todos> tag matching
messages = vim
.iter(messages)
:filter(function(msg)
if not msg.content or type(msg.content) ~= "string" then return true end
-- Only filter out messages that start with <todos> and end with </todos> to avoid accidentally deleting other messages
return not msg.content:match("^<todos>.*</todos>$")
end)
:totable()
-- Add the latest todos to the end of messages, wrapped in <todos> tags
local todos_content = vim.json.encode(todos)
table.insert(messages, {
role = "user",
content = "<todos>\n" .. todos_content .. "\n</todos>",
visible = false,
is_context = true,
})
end
end
opts.session_ctx = opts.session_ctx or {}
opts.session_ctx.system_prompt = system_prompt
opts.session_ctx.messages = messages
@@ -1885,7 +1861,7 @@ function M._stream(opts)
if #unfinished_todos > 0 then
message = History.Message:new(
"user",
"<system-reminder>You should use tool calls to answer the question, for example, use update_todo_status if the task step is done or cancelled.</system-reminder>",
"<system-reminder>You should use tool calls to answer the question, for example, use write_todos if the task step is done or cancelled.</system-reminder>",
{
visible = false,
}

View File

@@ -783,8 +783,8 @@ M._tools = {
require("avante.llm_tools.ls"),
require("avante.llm_tools.grep"),
require("avante.llm_tools.delete_tool_use_messages"),
require("avante.llm_tools.add_todos"),
require("avante.llm_tools.update_todo_status"),
require("avante.llm_tools.read_todos"),
require("avante.llm_tools.write_todos"),
{
name = "read_file_toplevel_symbols",
description = [[Read the top-level symbols of a file in current project scope.

View File

@@ -0,0 +1,40 @@
local Base = require("avante.llm_tools.base")
---@class AvanteLLMTool
local M = setmetatable({}, Base)
M.name = "read_todos"
M.description = "Read TODOs from the current task"
---@type AvanteLLMToolParam
M.param = {
type = "table",
fields = {},
usage = {},
}
---@type AvanteLLMToolReturn[]
M.returns = {
{
name = "todos",
description = "The TODOs from the current task",
type = "array",
},
}
M.on_render = function() return {} end
function M.func(input, opts)
local on_complete = opts.on_complete
local sidebar = require("avante").get()
if not sidebar then return false, "Avante sidebar not found" end
local todos = sidebar.chat_history.todos or {}
if on_complete then
on_complete(vim.json.encode(todos), nil)
return nil, nil
end
return todos, nil
end
return M

View File

@@ -8,6 +8,15 @@ local M = setmetatable({}, Base)
M.name = "think"
function M.enabled()
local Providers = require("avante.providers")
local Config = require("avante.config")
local provider = Providers[Config.provider]
local model = provider.model
if model and model:match("gpt%-5") then return false end
return true
end
M.description =
[[Use the tool to think about something. It will not obtain new information or make any changes to the repository, but just log the thought. Use it when complex reasoning or brainstorming is needed. For example, if you explore the repo and discover the source of a bug, call this tool to brainstorm several unique ways of fixing the bug, and assess which change(s) are likely to be simplest and most effective. Alternatively, if you receive some test results, call this tool to brainstorm ways to fix the failing tests.

View File

@@ -1,66 +0,0 @@
local Base = require("avante.llm_tools.base")
---@class AvanteLLMTool
local M = setmetatable({}, Base)
M.name = "update_todo_status"
M.description = "Update the status of TODO"
---@type AvanteLLMToolParam
M.param = {
type = "table",
fields = {
{
name = "id",
description = "The ID of the TODO to update",
type = "string",
},
{
name = "status",
description = "The status of the TODO to update",
type = "string",
choices = { "todo", "doing", "done", "cancelled" },
},
},
}
---@type AvanteLLMToolReturn[]
M.returns = {
{
name = "success",
description = "Whether the TODO was updated successfully",
type = "boolean",
},
{
name = "error",
description = "Error message if the TODOs could not be updated",
type = "string",
optional = true,
},
}
M.on_render = function() return {} end
---@type AvanteLLMToolFunc<{ id: string, status: string }>
function M.func(input, opts)
local on_complete = opts.on_complete
local sidebar = require("avante").get()
if not sidebar then return false, "Avante sidebar not found" end
local todos = sidebar.chat_history.todos
if #todos == 0 then return false, "No todos found" end
for _, todo in ipairs(todos) do
if tostring(todo.id) == tostring(input.id) then
todo.status = input.status
break
end
end
sidebar:update_todos(todos)
if on_complete then
on_complete(true, nil)
return nil, nil
end
return true, nil
end
return M

View File

@@ -3,9 +3,9 @@ local Base = require("avante.llm_tools.base")
---@class AvanteLLMTool
local M = setmetatable({}, Base)
M.name = "add_todos"
M.name = "write_todos"
M.description = "Add TODOs to the current task"
M.description = "Write TODOs to the current task"
---@type AvanteLLMToolParam
M.param = {
@@ -13,7 +13,7 @@ M.param = {
fields = {
{
name = "todos",
description = "The TODOs to add",
description = "The entire TODOs array to write",
type = "array",
items = {
name = "items",

View File

@@ -138,6 +138,7 @@ end
H.chat_auth_url = "https://api.github.com/copilot_internal/v2/token"
function H.chat_completion_url(base_url) return Utils.url_join(base_url, "/chat/completions") end
function H.response_url(base_url) return Utils.url_join(base_url, "/responses") end
function H.refresh_token(async, force)
if not M.state then error("internal initialization error") end
@@ -268,7 +269,7 @@ function M:build_headers()
return {
["Authorization"] = "Bearer " .. M.state.github_token.token,
["User-Agent"] = "GitHubCopilotChat/0.26.7",
["Editor-Version"] = "vscode/1.99.3",
["Editor-Version"] = "vscode/1.105.1",
["Editor-Plugin-Version"] = "copilot-chat/0.26.7",
["Copilot-Integration-Id"] = "vscode-chat",
["Openai-Intent"] = "conversation-edits",
@@ -283,12 +284,28 @@ function M:parse_curl_args(prompt_opts)
local provider_conf, request_body = Providers.parse_config(self)
local disable_tools = provider_conf.disable_tools or false
-- Apply OpenAI's set_allowed_params for Response API compatibility
OpenAI.set_allowed_params(provider_conf, request_body)
local use_ReAct_prompt = provider_conf.use_ReAct_prompt == true
local tools = {}
if not use_ReAct_prompt and not disable_tools and prompt_opts.tools then
local tools = nil
if not disable_tools and prompt_opts.tools and not use_ReAct_prompt then
tools = {}
for _, tool in ipairs(prompt_opts.tools) do
table.insert(tools, OpenAI:transform_tool(tool))
local transformed_tool = OpenAI:transform_tool(tool)
-- Response API uses flattened tool structure
if provider_conf.use_response_api then
if transformed_tool.type == "function" and transformed_tool["function"] then
transformed_tool = {
type = "function",
name = transformed_tool["function"].name,
description = transformed_tool["function"].description,
parameters = transformed_tool["function"].parameters,
}
end
end
table.insert(tools, transformed_tool)
end
end
@@ -300,18 +317,50 @@ function M:parse_curl_args(prompt_opts)
headers["X-Initiator"] = initiator
end
local parsed_messages = self:parse_messages(prompt_opts)
-- Build base body
local base_body = {
model = provider_conf.model,
stream = true,
tools = tools,
}
-- Response API uses 'input' instead of 'messages'
-- NOTE: Copilot doesn't support previous_response_id, always send full history
if provider_conf.use_response_api then
base_body.input = parsed_messages
-- Response API uses max_output_tokens instead of max_tokens/max_completion_tokens
if request_body.max_completion_tokens then
request_body.max_output_tokens = request_body.max_completion_tokens
request_body.max_completion_tokens = nil
end
if request_body.max_tokens then
request_body.max_output_tokens = request_body.max_tokens
request_body.max_tokens = nil
end
-- Response API doesn't use stream_options
base_body.stream_options = nil
base_body.include = { "reasoning.encrypted_content" }
base_body.reasoning = {
summary = "detailed",
}
base_body.truncation = "disabled"
else
base_body.messages = parsed_messages
base_body.stream_options = {
include_usage = true,
}
end
return {
url = H.chat_completion_url(M.state.github_token.endpoints.api or provider_conf.endpoint),
url = H.response_url(M.state.github_token.endpoints.api or provider_conf.endpoint),
timeout = provider_conf.timeout,
proxy = provider_conf.proxy,
insecure = provider_conf.allow_insecure,
headers = Utils.tbl_override(headers, self.extra_headers),
body = vim.tbl_deep_extend("force", {
model = provider_conf.model,
messages = self:parse_messages(prompt_opts),
stream = true,
tools = tools,
}, request_body),
body = vim.tbl_deep_extend("force", base_body, request_body),
}
end

View File

@@ -66,16 +66,48 @@ function M.get_user_message(opts)
)
end
function M.is_reasoning_model(model) return model and string.match(model, "^o%d+") ~= nil end
function M.is_reasoning_model(model)
return model and (string.match(model, "^o%d+") ~= nil or string.match(model, "gpt%-5") ~= nil)
end
function M.set_allowed_params(provider_conf, request_body)
if M.is_reasoning_model(provider_conf.model) then
-- Reasoning models have specific parameter requirements
request_body.temperature = 1
-- Response API doesn't support temperature for reasoning models
if provider_conf.use_response_api then request_body.temperature = nil end
else
request_body.reasoning_effort = nil
request_body.reasoning = nil
end
-- If max_tokens is set in config, unset max_completion_tokens
if request_body.max_tokens then request_body.max_completion_tokens = nil end
-- Handle Response API specific parameters
if provider_conf.use_response_api then
-- Convert reasoning_effort to reasoning object for Response API
if request_body.reasoning_effort then
request_body.reasoning = {
effort = request_body.reasoning_effort,
}
request_body.reasoning_effort = nil
end
-- Response API doesn't support some parameters
-- Remove unsupported parameters for Response API
local unsupported_params = {
"top_p",
"frequency_penalty",
"presence_penalty",
"logit_bias",
"logprobs",
"top_logprobs",
"n",
}
for _, param in ipairs(unsupported_params) do
request_body[param] = nil
end
end
end
function M:parse_messages(opts)
@@ -99,6 +131,18 @@ function M:parse_messages(opts)
if type(msg.content) == "string" then
table.insert(messages, { role = self.role_map[msg.role], content = msg.content })
elseif type(msg.content) == "table" then
-- Check if this is a reasoning message (object with type "reasoning")
if msg.content.type == "reasoning" then
-- Add reasoning message directly (for Response API)
table.insert(messages, {
type = "reasoning",
id = msg.content.id,
encrypted_content = msg.content.encrypted_content,
summary = msg.content.summary,
})
return
end
local content = {}
local tool_calls = {}
local tool_results = {}
@@ -114,6 +158,14 @@ function M:parse_messages(opts)
url = "data:" .. item.source.media_type .. ";" .. item.source.type .. "," .. item.source.data,
},
})
elseif item.type == "reasoning" then
-- Add reasoning message directly (for Response API)
table.insert(messages, {
type = "reasoning",
id = item.id,
encrypted_content = item.encrypted_content,
summary = item.summary,
})
elseif item.type == "tool_use" and not use_ReAct_prompt then
has_tool_use = true
table.insert(tool_calls, {
@@ -155,21 +207,53 @@ function M:parse_messages(opts)
if #content > 0 then table.insert(messages, { role = self.role_map[msg.role], content = content }) end
if not provider_conf.disable_tools and not use_ReAct_prompt then
if #tool_calls > 0 then
local last_message = messages[#messages]
if last_message and last_message.role == self.role_map["assistant"] and last_message.tool_calls then
last_message.tool_calls = vim.list_extend(last_message.tool_calls, tool_calls)
-- Only skip tool_calls if using Response API with previous_response_id support
-- Copilot uses Response API format but doesn't support previous_response_id
local should_include_tool_calls = not provider_conf.use_response_api
or not provider_conf.support_previous_response_id
if not last_message.content then last_message.content = "" end
else
table.insert(messages, { role = self.role_map["assistant"], tool_calls = tool_calls, content = "" })
if should_include_tool_calls then
-- For Response API without previous_response_id support (like Copilot),
-- convert tool_calls to function_call items in input
if provider_conf.use_response_api then
for _, tool_call in ipairs(tool_calls) do
table.insert(messages, {
type = "function_call",
call_id = tool_call.id,
name = tool_call["function"].name,
arguments = tool_call["function"].arguments,
})
end
else
-- Chat Completions API format
local last_message = messages[#messages]
if last_message and last_message.role == self.role_map["assistant"] and last_message.tool_calls then
last_message.tool_calls = vim.list_extend(last_message.tool_calls, tool_calls)
if not last_message.content then last_message.content = "" end
else
table.insert(messages, { role = self.role_map["assistant"], tool_calls = tool_calls, content = "" })
end
end
end
-- If support_previous_response_id is true, Response API manages function call history
-- So we can skip adding tool_calls to input messages
end
if #tool_results > 0 then
for _, tool_result in ipairs(tool_results) do
table.insert(
messages,
{ role = "tool", tool_call_id = tool_result.tool_call_id, content = tool_result.content or "" }
)
-- Response API uses different format for function outputs
if provider_conf.use_response_api then
table.insert(messages, {
type = "function_call_output",
call_id = tool_result.tool_call_id,
output = tool_result.content or "",
})
else
table.insert(
messages,
{ role = "tool", tool_call_id = tool_result.tool_call_id, content = tool_result.content or "" }
)
end
end
end
end
@@ -194,10 +278,16 @@ function M:parse_messages(opts)
local final_messages = {}
local prev_role = nil
local prev_type = nil
vim.iter(messages):each(function(message)
local role = message.role
if role == prev_role and role ~= "tool" then
if
role == prev_role
and role ~= "tool"
and prev_type ~= "function_call"
and prev_type ~= "function_call_output"
then
if role == self.role_map["assistant"] then
table.insert(final_messages, { role = self.role_map["user"], content = "Ok" })
else
@@ -209,6 +299,7 @@ function M:parse_messages(opts)
end
end
prev_role = role
prev_type = message.type
table.insert(final_messages, message)
end)
@@ -217,8 +308,8 @@ end
function M:finish_pending_messages(ctx, opts)
if ctx.content ~= nil and ctx.content ~= "" then self:add_text_message(ctx, "", "generated", opts) end
if ctx.tool_use_list then
for _, tool_use in pairs(ctx.tool_use_list) do
if ctx.tool_use_map then
for _, tool_use in pairs(ctx.tool_use_map) do
if tool_use.state == "generating" then self:add_tool_use_message(ctx, tool_use, "generated", opts) end
end
end
@@ -308,17 +399,18 @@ function M:add_text_message(ctx, text, state, opts)
turn_id = ctx.turn_id,
})
msgs[#msgs + 1] = msg_
ctx.tool_use_list = ctx.tool_use_list or {}
ctx.tool_use_map = ctx.tool_use_map or {}
local input_json = type(input) == "string" and input or vim.json.encode(input)
local exists = false
for _, tool_use in ipairs(ctx.tool_use_list) do
for _, tool_use in pairs(ctx.tool_use_map) do
if tool_use.id == tool_use_id then
tool_use.input_json = input_json
exists = true
end
end
if not exists then
ctx.tool_use_list[#ctx.tool_use_list + 1] = {
local tool_key = tostring(vim.tbl_count(ctx.tool_use_map))
ctx.tool_use_map[tool_key] = {
uuid = tool_use_id,
id = tool_use_id,
name = item.tool_name,
@@ -369,6 +461,20 @@ function M:add_tool_use_message(ctx, tool_use, state, opts)
if state == "generating" then opts.on_stop({ reason = "tool_use", streaming_tool_use = true }) end
end
function M:add_reasoning_message(ctx, reasoning_item, opts)
local msg = HistoryMessage:new("assistant", {
type = "reasoning",
id = reasoning_item.id,
encrypted_content = reasoning_item.encrypted_content,
summary = reasoning_item.summary,
}, {
state = "generated",
uuid = Utils.uuid(),
turn_id = ctx.turn_id,
})
if opts.on_messages_add then opts.on_messages_add({ msg }) end
end
---@param usage avante.OpenAITokenUsage | nil
---@return avante.LLMTokenUsage | nil
function M.transform_openai_usage(usage)
@@ -385,15 +491,118 @@ end
function M:parse_response(ctx, data_stream, _, opts)
if data_stream:match('"%[DONE%]":') or data_stream == "[DONE]" then
self:finish_pending_messages(ctx, opts)
if ctx.tool_use_list and #ctx.tool_use_list > 0 then
ctx.tool_use_list = {}
if ctx.tool_use_map and vim.tbl_count(ctx.tool_use_map) > 0 then
ctx.tool_use_map = {}
opts.on_stop({ reason = "tool_use" })
else
opts.on_stop({ reason = "complete" })
end
return
end
local jsn = vim.json.decode(data_stream)
-- Check if this is a Response API event (has 'type' field)
if jsn.type and type(jsn.type) == "string" then
-- Response API event-driven format
if jsn.type == "response.output_text.delta" then
-- Text content delta
if jsn.delta and jsn.delta ~= vim.NIL and jsn.delta ~= "" then
if opts.on_chunk then opts.on_chunk(jsn.delta) end
self:add_text_message(ctx, jsn.delta, "generating", opts)
end
elseif jsn.type == "response.reasoning_summary_text.delta" then
-- Reasoning summary delta
if jsn.delta and jsn.delta ~= vim.NIL and jsn.delta ~= "" then
if ctx.returned_think_start_tag == nil or not ctx.returned_think_start_tag then
ctx.returned_think_start_tag = true
if opts.on_chunk then opts.on_chunk("<think>\n") end
end
ctx.last_think_content = jsn.delta
self:add_thinking_message(ctx, jsn.delta, "generating", opts)
if opts.on_chunk then opts.on_chunk(jsn.delta) end
end
elseif jsn.type == "response.function_call_arguments.delta" then
-- Function call arguments delta
if jsn.delta and jsn.delta ~= vim.NIL and jsn.delta ~= "" then
if not ctx.tool_use_map then ctx.tool_use_map = {} end
local tool_key = tostring(jsn.output_index or 0)
if not ctx.tool_use_map[tool_key] then
ctx.tool_use_map[tool_key] = {
name = jsn.name or "",
id = jsn.call_id or "",
input_json = jsn.delta,
}
else
ctx.tool_use_map[tool_key].input_json = ctx.tool_use_map[tool_key].input_json .. jsn.delta
end
end
elseif jsn.type == "response.output_item.added" then
-- Output item added (could be function call or reasoning)
if jsn.item and jsn.item.type == "function_call" then
local tool_key = tostring(jsn.output_index or 0)
if not ctx.tool_use_map then ctx.tool_use_map = {} end
ctx.tool_use_map[tool_key] = {
name = jsn.item.name or "",
id = jsn.item.call_id or jsn.item.id or "",
input_json = "",
}
self:add_tool_use_message(ctx, ctx.tool_use_map[tool_key], "generating", opts)
elseif jsn.item and jsn.item.type == "reasoning" then
-- Add reasoning item to history
self:add_reasoning_message(ctx, jsn.item, opts)
end
elseif jsn.type == "response.output_item.done" then
-- Output item done (finalize function call)
if jsn.item and jsn.item.type == "function_call" then
local tool_key = tostring(jsn.output_index or 0)
if ctx.tool_use_map and ctx.tool_use_map[tool_key] then
local tool_use = ctx.tool_use_map[tool_key]
if jsn.item.arguments then tool_use.input_json = jsn.item.arguments end
self:add_tool_use_message(ctx, tool_use, "generated", opts)
end
end
elseif jsn.type == "response.completed" or jsn.type == "response.done" then
-- Response completed - save response.id for future requests
if jsn.response and jsn.response.id then
ctx.last_response_id = jsn.response.id
-- Store in provider for next request
self.last_response_id = jsn.response.id
end
if
ctx.returned_think_start_tag ~= nil and (ctx.returned_think_end_tag == nil or not ctx.returned_think_end_tag)
then
ctx.returned_think_end_tag = true
if opts.on_chunk then
if
ctx.last_think_content
and ctx.last_think_content ~= vim.NIL
and ctx.last_think_content:sub(-1) ~= "\n"
then
opts.on_chunk("\n</think>\n")
else
opts.on_chunk("</think>\n")
end
end
self:add_thinking_message(ctx, "", "generated", opts)
end
self:finish_pending_messages(ctx, opts)
local usage = nil
if jsn.response and jsn.response.usage then usage = self.transform_openai_usage(jsn.response.usage) end
if ctx.tool_use_map and vim.tbl_count(ctx.tool_use_map) > 0 then
opts.on_stop({ reason = "tool_use", usage = usage })
else
opts.on_stop({ reason = "complete", usage = usage })
end
elseif jsn.type == "error" then
-- Error event
local error_msg = jsn.error and vim.inspect(jsn.error) or "Unknown error"
opts.on_stop({ reason = "error", error = error_msg })
end
return
end
-- Chat Completions API format (original code)
if jsn.usage and jsn.usage ~= vim.NIL then
if opts.update_tokens_usage then
local usage = self.transform_openai_usage(jsn.usage)
@@ -435,10 +644,12 @@ function M:parse_response(ctx, data_stream, _, opts)
for idx, tool_call in ipairs(delta.tool_calls) do
--- In Gemini's so-called OpenAI Compatible API, tool_call.index is nil, which is quite absurd! Therefore, a compatibility fix is needed here.
if tool_call.index == nil then tool_call.index = choice_index + idx - 1 end
if not ctx.tool_use_list then ctx.tool_use_list = {} end
if not ctx.tool_use_list[tool_call.index + 1] then
if tool_call.index > 0 and ctx.tool_use_list[tool_call.index] then
local prev_tool_use = ctx.tool_use_list[tool_call.index]
if not ctx.tool_use_map then ctx.tool_use_map = {} end
local tool_key = tostring(tool_call.index)
local prev_tool_key = tostring(tool_call.index - 1)
if not ctx.tool_use_map[tool_key] then
local prev_tool_use = ctx.tool_use_map[prev_tool_key]
if tool_call.index > 0 and prev_tool_use then
self:add_tool_use_message(ctx, prev_tool_use, "generated", opts)
end
local tool_use = {
@@ -446,10 +657,10 @@ function M:parse_response(ctx, data_stream, _, opts)
id = tool_call.id,
input_json = type(tool_call["function"].arguments) == "string" and tool_call["function"].arguments or "",
}
ctx.tool_use_list[tool_call.index + 1] = tool_use
ctx.tool_use_map[tool_key] = tool_use
self:add_tool_use_message(ctx, tool_use, "generating", opts)
else
local tool_use = ctx.tool_use_list[tool_call.index + 1]
local tool_use = ctx.tool_use_map[tool_key]
if tool_call["function"].arguments == vim.NIL then tool_call["function"].arguments = "" end
tool_use.input_json = tool_use.input_json .. tool_call["function"].arguments
-- self:add_tool_use_message(ctx, tool_use, "generating", opts)
@@ -476,7 +687,7 @@ function M:parse_response(ctx, data_stream, _, opts)
end
if choice.finish_reason == "stop" or choice.finish_reason == "eos_token" or choice.finish_reason == "length" then
self:finish_pending_messages(ctx, opts)
if ctx.tool_use_list and #ctx.tool_use_list > 0 then
if ctx.tool_use_map and vim.tbl_count(ctx.tool_use_map) > 0 then
opts.on_stop({ reason = "tool_use", usage = self.transform_openai_usage(jsn.usage) })
else
opts.on_stop({ reason = "complete", usage = self.transform_openai_usage(jsn.usage) })
@@ -537,7 +748,21 @@ function M:parse_curl_args(prompt_opts)
if not disable_tools and prompt_opts.tools and not use_ReAct_prompt then
tools = {}
for _, tool in ipairs(prompt_opts.tools) do
table.insert(tools, self:transform_tool(tool))
local transformed_tool = self:transform_tool(tool)
-- Response API uses flattened tool structure
if provider_conf.use_response_api then
-- Convert from {type: "function", function: {name, description, parameters}}
-- to {type: "function", name, description, parameters}
if transformed_tool.type == "function" and transformed_tool["function"] then
transformed_tool = {
type = "function",
name = transformed_tool["function"].name,
description = transformed_tool["function"].description,
parameters = transformed_tool["function"].parameters,
}
end
end
table.insert(tools, transformed_tool)
end
end
@@ -547,21 +772,70 @@ function M:parse_curl_args(prompt_opts)
local stop = nil
if use_ReAct_prompt then stop = { "</tool_use>" } end
-- Determine endpoint path based on use_response_api
local endpoint_path = provider_conf.use_response_api and "/responses" or "/chat/completions"
local parsed_messages = self:parse_messages(prompt_opts)
-- Build base body
local base_body = {
model = provider_conf.model,
stop = stop,
stream = true,
tools = tools,
}
-- Response API uses 'input' instead of 'messages'
if provider_conf.use_response_api then
-- Check if we have tool results - if so, use previous_response_id
local has_function_outputs = false
for _, msg in ipairs(parsed_messages) do
if msg.type == "function_call_output" then
has_function_outputs = true
break
end
end
if has_function_outputs and self.last_response_id then
-- When sending function outputs, use previous_response_id
base_body.previous_response_id = self.last_response_id
-- Only send the function outputs, not the full history
local function_outputs = {}
for _, msg in ipairs(parsed_messages) do
if msg.type == "function_call_output" then table.insert(function_outputs, msg) end
end
base_body.input = function_outputs
-- Clear the stored response_id after using it
self.last_response_id = nil
else
-- Normal request without tool results
base_body.input = parsed_messages
end
-- Response API uses max_output_tokens instead of max_tokens/max_completion_tokens
if request_body.max_completion_tokens then
request_body.max_output_tokens = request_body.max_completion_tokens
request_body.max_completion_tokens = nil
end
if request_body.max_tokens then
request_body.max_output_tokens = request_body.max_tokens
request_body.max_tokens = nil
end
-- Response API doesn't use stream_options
base_body.stream_options = nil
else
base_body.messages = parsed_messages
base_body.stream_options = not M.is_mistral(provider_conf.endpoint) and {
include_usage = true,
} or nil
end
return {
url = Utils.url_join(provider_conf.endpoint, "/chat/completions"),
url = Utils.url_join(provider_conf.endpoint, endpoint_path),
proxy = provider_conf.proxy,
insecure = provider_conf.allow_insecure,
headers = Utils.tbl_override(headers, self.extra_headers),
body = vim.tbl_deep_extend("force", {
model = provider_conf.model,
messages = self:parse_messages(prompt_opts),
stop = stop,
stream = true,
stream_options = not M.is_mistral(provider_conf.endpoint) and {
include_usage = true,
} or nil,
tools = tools,
}, request_body),
body = vim.tbl_deep_extend("force", base_body, request_body),
}
end

View File

@@ -67,7 +67,7 @@ local function parse_response_wo_stream(self, data, _, opts)
content = content:gsub("<file>\n?", "")
content = content:gsub("\n?</file>", "")
content = content:gsub("\n?<memory>.-</memory>\n?", "")
content = content:gsub("\n?<update_todo_status>.-</update_todo_status>\n?", "")
content = content:gsub("\n?<write_todos>.-</write_todos>\n?", "")
content = content:gsub("\n?<attempt_completion>.-</attempt_completion>\n?", "")
-- Trim excessive whitespace but preserve structure

View File

@@ -1960,10 +1960,20 @@ function Sidebar:get_message_lines(ctx, message, messages, ignore_record_prefix)
elseif type(content) == "string" then
text_len = #content
end
local cache_key = message.uuid .. ":" .. tostring(text_len) .. ":" .. tostring(expanded == true)
local cache_key = message.uuid
.. ":"
.. message.state
.. ":"
.. tostring(text_len)
.. ":"
.. tostring(expanded == true)
local cached_lines = _message_to_lines_lru_cache:get(cache_key)
if cached_lines then return cached_lines end
local lines = self:_get_message_lines(ctx, message, messages, ignore_record_prefix)
--- trim suffix empty lines
while #lines > 0 and tostring(lines[#lines]) == "" do
table.remove(lines)
end
_message_to_lines_lru_cache:set(cache_key, lines)
return lines
end
@@ -2518,7 +2528,7 @@ function Sidebar:get_history_messages_for_api(opts)
if not Config.acp_providers[Config.provider] then
local tool_limit
if Providers[Config.provider].use_ReAct_prompt then
if Providers[Config.provider].use_ReAct_prompt or Providers[Config.provider].use_response_api then
tool_limit = nil
else
tool_limit = 25

View File

@@ -1,5 +1,5 @@
# Task Management
You have access to the add_todos and update_todo_status tools to help you manage and plan tasks. Use these tools VERY frequently to ensure that you are tracking your tasks and giving the user visibility into your progress.
You have access to the read_todos and write_todos tools to help you manage and plan tasks. Use these tools VERY frequently to ensure that you are tracking your tasks and giving the user visibility into your progress.
These tools are also EXTREMELY helpful for planning tasks, and for breaking down larger complex tasks into smaller steps. If you do not use this tool when planning, you may forget to do important tasks - and that is unacceptable.
It is critical that you mark todos as completed as soon as you are done with a task. Do not batch up multiple tasks before marking them as completed.
@@ -8,13 +8,13 @@ Examples:
<example>
user: Run the build and fix any type errors
assistant: I'm going to use the add_todos tool to write the following items to the todo list:
assistant: I'm going to use the write_todos tool to write the following items to the todo list:
- Run the build
- Fix any type errors
I'm now going to run the build using Bash.
Looks like I found 10 type errors. I'm going to use the add_todos tool to write 10 items to the todo list.
Looks like I found 10 type errors. I'm going to use the write_todos tool to write 10 items to the todo list.
marking the first todo as in_progress
@@ -29,7 +29,7 @@ In the above example, the assistant completes all the tasks, including the 10 er
<example>
user: Help me write a new feature that allows users to track their usage metrics and export them to various formats
assistant: I'll help you implement a usage metrics tracking and export feature. Let me first use the add_todos tool to plan this task.
assistant: I'll help you implement a usage metrics tracking and export feature. Let me first use the write_todos tool to plan this task.
Adding the following todos to the todo list:
1. Research existing metrics tracking in the codebase
2. Design the metrics collection system
@@ -48,10 +48,14 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
# Doing tasks
The user will primarily request you perform software engineering tasks. This includes solving bugs, adding new functionality, refactoring code, explaining code, and more. For these tasks the following steps are recommended:
- Use the add_todos tool to plan the task if required
- Use the update_todo_status tool to mark todos as doing, done, or cancelled
- Use the read_todos tool to get the list of todos
- Use the write_todos tool to plan the task if required
- Use the write_todos tool to mark todos as doing, done, or cancelled
- Use the available search tools to understand the codebase and the user's query. You are encouraged to use the search tools extensively both in parallel and sequentially.
- Implement the solution using all tools available to you
- Verify the solution if possible with tests. NEVER assume specific test framework or test script. Check the README or search codebase to determine the testing approach.
- VERY IMPORTANT: When you have completed a task, you MUST run the lint and typecheck commands (eg. npm run lint, npm run typecheck, ruff, etc.) with Bash if they were provided to you to ensure your code is correct. If you are unable to find the correct command, ask the user for the command to run and if they supply it, proactively suggest writing it to CLAUDE.md so that you will know to run it next time.
NEVER commit changes unless the user explicitly asks you to. It is VERY IMPORTANT to only commit when explicitly asked, otherwise the user will feel that you are being too proactive.
# Rules
- The write_todos tool must receive the entire todos array, not just a few elements from it.

View File

@@ -90,7 +90,7 @@ By thoughtfully selecting between write_to_file and replace_in_file, you can mak
RULES
- Strictly follow the TODOs step by step to complete the task without stopping, and after completing each step, use the update_todo_status tool to update the status of the TODOs.
- Strictly follow the TODOs step by step to complete the task without stopping, and after completing each step, use the write_todos tool to update the status of the TODOs.
- NEVER reply the updated code.

View File

@@ -204,10 +204,22 @@ vim.g.avante_login = vim.g.avante_login
---@field reasoning_content? string
---@field reasoning? string
---@field tool_calls? AvanteOpenAIMessageToolCall[]
---@field type? "reasoning" | "function_call" | "function_call_output"
---@field id? string
---@field encrypted_content? string
---@field summary? string
---@field call_id? string
---@field name? string
---@field arguments? string
---@field output? string
---
---@class AvanteOpenAITool
---@field type "function"
---@field function AvanteOpenAIToolFunction
---@field function? AvanteOpenAIToolFunction
---@field name? string
---@field description? string | nil
---@field parameters? AvanteOpenAIToolFunctionParameters | nil
---@field strict? boolean | nil
---
---@class AvanteOpenAIToolFunction
---@field name string
@@ -251,6 +263,8 @@ vim.g.avante_login = vim.g.avante_login
---@field hide_in_model_selector? boolean
---@field use_ReAct_prompt? boolean
---@field context_window? integer
---@field use_response_api? boolean
---@field support_previous_response_id? boolean
---
---@class AvanteSupportedProvider: AvanteDefaultBaseProvider
---@field __inherited_from? string
@@ -484,7 +498,7 @@ vim.g.avante_login = vim.g.avante_login
---@class AvanteLLMToolReturn
---@field name string
---@field description string
---@field type 'string' | 'string[]' | 'boolean'
---@field type 'string' | 'string[]' | 'boolean' | 'array'
---@field optional? boolean
---
---@class avante.ChatHistoryEntry

View File

@@ -136,13 +136,9 @@ Parameters:
<tool_use>{"name": "attempt_completion", "input": {"result": "I've successfully created the requested React component with the following features:\n- Responsive layout\n- Dark/light mode toggle\n- Form validation\n- API integration"}}</tool_use>
## Example 5: Add todos
## Example 5: Write todos
<tool_use>{"name": "add_todos", "input": {"todos": [{"id": "1", "content": "Implement a responsive layout", "status": "todo", "priority": "low"}, {"id": "2", "content": "Add dark/light mode toggle", "status": "todo", "priority": "medium"}]}}</tool_use>
## Example 6: Update todo status
<tool_use>{"name": "update_todo_status", "input": {"id": "1", "status": "done"}}</tool_use>
<tool_use>{"name": "write_todos", "input": {"todos": [{"id": "1", "content": "Implement a responsive layout", "status": "todo", "priority": "low"}, {"id": "2", "content": "Add dark/light mode toggle", "status": "todo", "priority": "medium"}]}}</tool_use>
]]
end
return system_prompt