feat: support multiple models for bedrock (#2648)

This commit is contained in:
brook hong
2025-08-28 20:23:05 +08:00
committed by GitHub
parent 9fe429eb62
commit 751e4c0913
2 changed files with 13 additions and 10 deletions

View File

@@ -291,7 +291,14 @@ M._defaults = {
},
---@type AvanteSupportedProvider
bedrock = {
model = "anthropic.claude-3-5-sonnet-20241022-v2:0",
model = "us.anthropic.claude-3-7-sonnet-20250219-v1:0",
model_names = {
"anthropic.claude-3-5-sonnet-20241022-v2:0",
"us.anthropic.claude-3-7-sonnet-20250219-v1:0",
"us.anthropic.claude-opus-4-20250514-v1:0",
"us.anthropic.claude-opus-4-1-20250805-v1:0",
"us.anthropic.claude-sonnet-4-20250514-v1:0",
},
timeout = 30000, -- Timeout in milliseconds
extra_request_body = {
temperature = 0.75,
@@ -404,13 +411,6 @@ M._defaults = {
model = "claude-3-7-sonnet-20250219",
api_key_name = "AIHUBMIX_API_KEY",
},
["bedrock-claude-3.7-sonnet"] = {
__inherited_from = "bedrock",
model = "us.anthropic.claude-3-7-sonnet-20250219-v1:0",
extra_request_body = {
max_tokens = 4096,
},
},
morph = {
__inherited_from = "openai",
endpoint = "https://api.morphllm.com/v1",

View File

@@ -139,9 +139,12 @@ function M.transform_anthropic_usage(usage)
if not usage then return nil end
---@type avante.LLMTokenUsage
local res = {
prompt_tokens = usage.input_tokens + usage.cache_creation_input_tokens,
completion_tokens = usage.output_tokens + usage.cache_read_input_tokens,
prompt_tokens = usage.cache_creation_input_tokens and (usage.input_tokens + usage.cache_creation_input_tokens)
or usage.input_tokens,
completion_tokens = usage.cache_read_input_tokens and (usage.output_tokens + usage.cache_read_input_tokens)
or usage.output_tokens,
}
return res
end