fix: revert max_completion_tokens to max_tokens (#1741)

This commit is contained in:
yetone
2025-03-27 16:53:55 +08:00
committed by GitHub
parent a06bb97db6
commit cd13eeb7d9
7 changed files with 6 additions and 9 deletions

View File

@@ -35,7 +35,7 @@ Then enable it in avante.nvim:
api_key_name = 'GROQ_API_KEY',
endpoint = 'https://api.groq.com/openai/v1/',
model = 'llama-3.3-70b-versatile',
max_completion_tokens = 32768, -- remember to increase this value, otherwise it will stop generating halfway
max_tokens = 32768, -- remember to increase this value, otherwise it will stop generating halfway
},
},
--- ... existing configurations