feat: initial release of codetyper.nvim v0.2.0

AI-powered coding partner for Neovim with LLM integration.

Features:
- Split view for coder files (*.coder.*) and target files
- Tag-based prompts with /@ and @/ syntax
- Claude API and Ollama (local) LLM support
- Smart prompt detection (refactor, add, document, explain)
- Automatic code injection into target files
- Project tree logging (.coder/tree.log)
- Auto .gitignore management

Ask Panel (chat interface):
- Fixed at 1/4 screen width
- File attachment with @ key
- Ctrl+n for new chat
- Ctrl+Enter to submit
- Proper window close behavior
- Navigation with Ctrl+h/j/k/l

Commands: Coder, CoderOpen, CoderClose, CoderToggle,
CoderProcess, CoderAsk, CoderTree, CoderTreeView
This commit is contained in:
2026-01-11 15:24:06 -05:00
commit bba0647b47
29 changed files with 5503 additions and 0 deletions

View File

@@ -0,0 +1,154 @@
---@mod codetyper.llm.claude Claude API client for Codetyper.nvim
local M = {}
local utils = require("codetyper.utils")
local llm = require("codetyper.llm")
--- Claude API endpoint
local API_URL = "https://api.anthropic.com/v1/messages"
--- Get API key from config or environment
---@return string|nil API key
local function get_api_key()
local codetyper = require("codetyper")
local config = codetyper.get_config()
return config.llm.claude.api_key or vim.env.ANTHROPIC_API_KEY
end
--- Get model from config
---@return string Model name
local function get_model()
local codetyper = require("codetyper")
local config = codetyper.get_config()
return config.llm.claude.model
end
--- Build request body for Claude API
---@param prompt string User prompt
---@param context table Context information
---@return table Request body
local function build_request_body(prompt, context)
local system_prompt = llm.build_system_prompt(context)
return {
model = get_model(),
max_tokens = 4096,
system = system_prompt,
messages = {
{
role = "user",
content = prompt,
},
},
}
end
--- Make HTTP request to Claude API
---@param body table Request body
---@param callback fun(response: string|nil, error: string|nil) Callback function
local function make_request(body, callback)
local api_key = get_api_key()
if not api_key then
callback(nil, "Claude API key not configured")
return
end
local json_body = vim.json.encode(body)
-- Use curl for HTTP request (plenary.curl alternative)
local cmd = {
"curl",
"-s",
"-X", "POST",
API_URL,
"-H", "Content-Type: application/json",
"-H", "x-api-key: " .. api_key,
"-H", "anthropic-version: 2023-06-01",
"-d", json_body,
}
vim.fn.jobstart(cmd, {
stdout_buffered = true,
on_stdout = function(_, data)
if not data or #data == 0 or (data[1] == "" and #data == 1) then
return
end
local response_text = table.concat(data, "\n")
local ok, response = pcall(vim.json.decode, response_text)
if not ok then
vim.schedule(function()
callback(nil, "Failed to parse Claude response")
end)
return
end
if response.error then
vim.schedule(function()
callback(nil, response.error.message or "Claude API error")
end)
return
end
if response.content and response.content[1] and response.content[1].text then
local code = llm.extract_code(response.content[1].text)
vim.schedule(function()
callback(code, nil)
end)
else
vim.schedule(function()
callback(nil, "No content in Claude response")
end)
end
end,
on_stderr = function(_, data)
if data and #data > 0 and data[1] ~= "" then
vim.schedule(function()
callback(nil, "Claude API request failed: " .. table.concat(data, "\n"))
end)
end
end,
on_exit = function(_, code)
if code ~= 0 then
vim.schedule(function()
callback(nil, "Claude API request failed with code: " .. code)
end)
end
end,
})
end
--- Generate code using Claude API
---@param prompt string The user's prompt
---@param context table Context information
---@param callback fun(response: string|nil, error: string|nil) Callback function
function M.generate(prompt, context, callback)
utils.notify("Sending request to Claude...", vim.log.levels.INFO)
local body = build_request_body(prompt, context)
make_request(body, function(response, err)
if err then
utils.notify(err, vim.log.levels.ERROR)
callback(nil, err)
else
utils.notify("Code generated successfully", vim.log.levels.INFO)
callback(response, nil)
end
end)
end
--- Check if Claude is properly configured
---@return boolean, string? Valid status and optional error message
function M.validate()
local api_key = get_api_key()
if not api_key or api_key == "" then
return false, "Claude API key not configured"
end
return true
end
return M

101
lua/codetyper/llm/init.lua Normal file
View File

@@ -0,0 +1,101 @@
---@mod codetyper.llm LLM interface for Codetyper.nvim
local M = {}
local utils = require("codetyper.utils")
--- Get the appropriate LLM client based on configuration
---@return table LLM client module
function M.get_client()
local codetyper = require("codetyper")
local config = codetyper.get_config()
if config.llm.provider == "claude" then
return require("codetyper.llm.claude")
elseif config.llm.provider == "ollama" then
return require("codetyper.llm.ollama")
else
error("Unknown LLM provider: " .. config.llm.provider)
end
end
--- Generate code from a prompt
---@param prompt string The user's prompt
---@param context table Context information (file content, language, etc.)
---@param callback fun(response: string|nil, error: string|nil) Callback function
function M.generate(prompt, context, callback)
local client = M.get_client()
client.generate(prompt, context, callback)
end
--- Build the system prompt for code generation
---@param context table Context information
---@return string System prompt
function M.build_system_prompt(context)
local prompts = require("codetyper.prompts")
-- Select appropriate system prompt based on context
local prompt_type = context.prompt_type or "code_generation"
local system_prompts = prompts.system
local system = system_prompts[prompt_type] or system_prompts.code_generation
-- Substitute variables
system = system:gsub("{{language}}", context.language or "unknown")
system = system:gsub("{{filepath}}", context.file_path or "unknown")
if context.file_content then
system = system .. "\n\nExisting file content:\n```\n" .. context.file_content .. "\n```"
end
return system
end
--- Build context for LLM request
---@param target_path string Path to target file
---@param prompt_type string Type of prompt
---@return table Context object
function M.build_context(target_path, prompt_type)
local content = utils.read_file(target_path)
local ext = vim.fn.fnamemodify(target_path, ":e")
-- Map extension to language
local lang_map = {
ts = "TypeScript",
tsx = "TypeScript React",
js = "JavaScript",
jsx = "JavaScript React",
py = "Python",
lua = "Lua",
go = "Go",
rs = "Rust",
rb = "Ruby",
java = "Java",
c = "C",
cpp = "C++",
cs = "C#",
}
return {
file_content = content,
language = lang_map[ext] or ext,
extension = ext,
prompt_type = prompt_type,
file_path = target_path,
}
end
--- Parse LLM response and extract code
---@param response string Raw LLM response
---@return string Extracted code
function M.extract_code(response)
-- Remove markdown code blocks if present
local code = response:gsub("```%w*\n?", ""):gsub("\n?```", "")
-- Trim whitespace
code = code:match("^%s*(.-)%s*$")
return code
end
return M

View File

@@ -0,0 +1,173 @@
---@mod codetyper.llm.ollama Ollama API client for Codetyper.nvim
local M = {}
local utils = require("codetyper.utils")
local llm = require("codetyper.llm")
--- Get Ollama host from config
---@return string Host URL
local function get_host()
local codetyper = require("codetyper")
local config = codetyper.get_config()
return config.llm.ollama.host
end
--- Get model from config
---@return string Model name
local function get_model()
local codetyper = require("codetyper")
local config = codetyper.get_config()
return config.llm.ollama.model
end
--- Build request body for Ollama API
---@param prompt string User prompt
---@param context table Context information
---@return table Request body
local function build_request_body(prompt, context)
local system_prompt = llm.build_system_prompt(context)
return {
model = get_model(),
system = system_prompt,
prompt = prompt,
stream = false,
options = {
temperature = 0.2,
num_predict = 4096,
},
}
end
--- Make HTTP request to Ollama API
---@param body table Request body
---@param callback fun(response: string|nil, error: string|nil) Callback function
local function make_request(body, callback)
local host = get_host()
local url = host .. "/api/generate"
local json_body = vim.json.encode(body)
local cmd = {
"curl",
"-s",
"-X", "POST",
url,
"-H", "Content-Type: application/json",
"-d", json_body,
}
vim.fn.jobstart(cmd, {
stdout_buffered = true,
on_stdout = function(_, data)
if not data or #data == 0 or (data[1] == "" and #data == 1) then
return
end
local response_text = table.concat(data, "\n")
local ok, response = pcall(vim.json.decode, response_text)
if not ok then
vim.schedule(function()
callback(nil, "Failed to parse Ollama response")
end)
return
end
if response.error then
vim.schedule(function()
callback(nil, response.error or "Ollama API error")
end)
return
end
if response.response then
local code = llm.extract_code(response.response)
vim.schedule(function()
callback(code, nil)
end)
else
vim.schedule(function()
callback(nil, "No response from Ollama")
end)
end
end,
on_stderr = function(_, data)
if data and #data > 0 and data[1] ~= "" then
vim.schedule(function()
callback(nil, "Ollama API request failed: " .. table.concat(data, "\n"))
end)
end
end,
on_exit = function(_, code)
if code ~= 0 then
vim.schedule(function()
callback(nil, "Ollama API request failed with code: " .. code)
end)
end
end,
})
end
--- Generate code using Ollama API
---@param prompt string The user's prompt
---@param context table Context information
---@param callback fun(response: string|nil, error: string|nil) Callback function
function M.generate(prompt, context, callback)
utils.notify("Sending request to Ollama...", vim.log.levels.INFO)
local body = build_request_body(prompt, context)
make_request(body, function(response, err)
if err then
utils.notify(err, vim.log.levels.ERROR)
callback(nil, err)
else
utils.notify("Code generated successfully", vim.log.levels.INFO)
callback(response, nil)
end
end)
end
--- Check if Ollama is reachable
---@param callback fun(ok: boolean, error: string|nil) Callback function
function M.health_check(callback)
local host = get_host()
local cmd = { "curl", "-s", host .. "/api/tags" }
vim.fn.jobstart(cmd, {
stdout_buffered = true,
on_stdout = function(_, data)
if data and #data > 0 and data[1] ~= "" then
vim.schedule(function()
callback(true, nil)
end)
end
end,
on_exit = function(_, code)
if code ~= 0 then
vim.schedule(function()
callback(false, "Cannot connect to Ollama at " .. host)
end)
end
end,
})
end
--- Check if Ollama is properly configured
---@return boolean, string? Valid status and optional error message
function M.validate()
local host = get_host()
if not host or host == "" then
return false, "Ollama host not configured"
end
local model = get_model()
if not model or model == "" then
return false, "Ollama model not configured"
end
return true
end
return M