Adding the migration to pure files only

This commit is contained in:
2026-03-24 22:31:49 -04:00
parent 69c8061b8e
commit 75de3198cd
46 changed files with 576 additions and 653 deletions

View File

@@ -0,0 +1,14 @@
local config = {
enabled = true,
auto_trigger = true,
debounce = 150,
use_copilot = true, -- Use copilot when available
keymap = {
accept = "<Tab>",
next = "<M-]>",
prev = "<M-[>",
dismiss = "<C-]>",
},
}
return config

View File

@@ -0,0 +1,23 @@
local M = {}
M.throb_time = 1200
M.cooldown_time = 100
M.tick_time = 100
M.Throbber = {}
M.AUGROUP = "Codetyper"
M.tree_update_timer = nil
M.TREE_UPDATE_DEBOUNCE_MS = 1000 -- 1 second debounce
M.processed_prompts = {}
M.asking_preference = false
M.is_processing = false
M.previous_mode = "n"
M.prompt_process_timer = nil
M.PROMPT_PROCESS_DEBOUNCE_MS = 200 -- Wait 200ms after mode change before processing
M.hl_group = "CmpGhostText"
M.throb_icons = {
{ "", "", "", "", "", "", "", "", "", "" },
{ "", "", "", "" },
{ "", "", "", "", "", "", "", "" },
}
return M

View File

@@ -0,0 +1,105 @@
---@mod codetyper.config Configuration module for Codetyper.nvim
local M = {}
---@type CoderConfig
local defaults = {
llm = {
provider = "ollama", -- Options: "ollama", "copilot"
ollama = {
host = "http://localhost:11434",
model = "deepseek-coder:6.7b",
},
copilot = {
model = "claude-sonnet-4", -- Uses GitHub Copilot authentication
},
},
auto_gitignore = true,
auto_index = false, -- Auto-create coder companion files on file open
indexer = {
enabled = true, -- Enable project indexing
auto_index = true, -- Index files on save
index_on_open = false, -- Index project when opening
max_file_size = 100000, -- Skip files larger than 100KB
excluded_dirs = { "node_modules", "dist", "build", ".git", ".codetyper", "__pycache__", "vendor", "target" },
index_extensions = { "lua", "ts", "tsx", "js", "jsx", "py", "go", "rs", "rb", "java", "c", "cpp", "h", "hpp" },
memory = {
enabled = true, -- Enable memory persistence
max_memories = 1000, -- Maximum stored memories
prune_threshold = 0.1, -- Remove low-weight memories
},
},
brain = {
enabled = true, -- Enable brain learning system
auto_learn = true, -- Auto-learn from events
auto_commit = true, -- Auto-commit after threshold
commit_threshold = 10, -- Changes before auto-commit
max_nodes = 5000, -- Maximum nodes before pruning
max_deltas = 500, -- Maximum delta history
prune = {
enabled = true, -- Enable auto-pruning
threshold = 0.1, -- Remove nodes below this weight
unused_days = 90, -- Remove unused nodes after N days
},
output = {
max_tokens = 4000, -- Token budget for LLM context
format = "compact", -- "compact"|"json"|"natural"
},
},
}
--- Deep merge two tables
---@param t1 table Base table
---@param t2 table Table to merge into base
---@return table Merged table
local function deep_merge(t1, t2)
local result = vim.deepcopy(t1)
for k, v in pairs(t2) do
if type(v) == "table" and type(result[k]) == "table" then
result[k] = deep_merge(result[k], v)
else
result[k] = v
end
end
return result
end
--- Setup configuration with user options
---@param opts? CoderConfig User configuration options
---@return CoderConfig Final configuration
function M.setup(opts)
opts = opts or {}
return deep_merge(defaults, opts)
end
--- Get default configuration
---@return CoderConfig Default configuration
function M.get_defaults()
return vim.deepcopy(defaults)
end
--- Validate configuration
---@param config CoderConfig Configuration to validate
---@return boolean, string? Valid status and optional error message
function M.validate(config)
if not config.llm then
return false, "Missing LLM configuration"
end
local valid_providers = { "ollama", "copilot" }
local is_valid_provider = false
for _, p in ipairs(valid_providers) do
if config.llm.provider == p then
is_valid_provider = true
break
end
end
if not is_valid_provider then
return false, "Invalid LLM provider. Must be one of: " .. table.concat(valid_providers, ", ")
end
return true
end
return M

View File

@@ -0,0 +1,88 @@
--- Model pricing table (per 1M tokens in USD)
---@type table<string, {input: number, cached_input: number|nil, output: number|nil}>
local pricing = {
-- GPT-5.x series
["gpt-5.2"] = { input = 1.75, cached_input = 0.175, output = 14.00 },
["gpt-5.1"] = { input = 1.25, cached_input = 0.125, output = 10.00 },
["gpt-5"] = { input = 1.25, cached_input = 0.125, output = 10.00 },
["gpt-5-mini"] = { input = 0.25, cached_input = 0.025, output = 2.00 },
["gpt-5-nano"] = { input = 0.05, cached_input = 0.005, output = 0.40 },
["gpt-5.2-chat-latest"] = { input = 1.75, cached_input = 0.175, output = 14.00 },
["gpt-5.1-chat-latest"] = { input = 1.25, cached_input = 0.125, output = 10.00 },
["gpt-5-chat-latest"] = { input = 1.25, cached_input = 0.125, output = 10.00 },
["gpt-5.2-codex"] = { input = 1.75, cached_input = 0.175, output = 14.00 },
["gpt-5.1-codex-max"] = { input = 1.25, cached_input = 0.125, output = 10.00 },
["gpt-5.1-codex"] = { input = 1.25, cached_input = 0.125, output = 10.00 },
["gpt-5-codex"] = { input = 1.25, cached_input = 0.125, output = 10.00 },
["gpt-5.2-pro"] = { input = 21.00, cached_input = nil, output = 168.00 },
["gpt-5-pro"] = { input = 15.00, cached_input = nil, output = 120.00 },
["gpt-5.1-codex-mini"] = { input = 0.25, cached_input = 0.025, output = 2.00 },
["gpt-5-search-api"] = { input = 1.25, cached_input = 0.125, output = 10.00 },
-- GPT-4.x series
["gpt-4.1"] = { input = 2.00, cached_input = 0.50, output = 8.00 },
["gpt-4.1-mini"] = { input = 0.40, cached_input = 0.10, output = 1.60 },
["gpt-4.1-nano"] = { input = 0.10, cached_input = 0.025, output = 0.40 },
["gpt-4o"] = { input = 2.50, cached_input = 1.25, output = 10.00 },
["gpt-4o-2024-05-13"] = { input = 5.00, cached_input = nil, output = 15.00 },
["gpt-4o-mini"] = { input = 0.15, cached_input = 0.075, output = 0.60 },
-- Realtime models
["gpt-realtime"] = { input = 4.00, cached_input = 0.40, output = 16.00 },
["gpt-realtime-mini"] = { input = 0.60, cached_input = 0.06, output = 2.40 },
["gpt-4o-realtime-preview"] = { input = 5.00, cached_input = 2.50, output = 20.00 },
["gpt-4o-mini-realtime-preview"] = { input = 0.60, cached_input = 0.30, output = 2.40 },
-- Audio models
["gpt-audio"] = { input = 2.50, cached_input = nil, output = 10.00 },
["gpt-audio-mini"] = { input = 0.60, cached_input = nil, output = 2.40 },
["gpt-4o-audio-preview"] = { input = 2.50, cached_input = nil, output = 10.00 },
["gpt-4o-mini-audio-preview"] = { input = 0.15, cached_input = nil, output = 0.60 },
-- O-series reasoning models
["o1"] = { input = 15.00, cached_input = 7.50, output = 60.00 },
["o1-pro"] = { input = 150.00, cached_input = nil, output = 600.00 },
["o3-pro"] = { input = 20.00, cached_input = nil, output = 80.00 },
["o3"] = { input = 2.00, cached_input = 0.50, output = 8.00 },
["o3-deep-research"] = { input = 10.00, cached_input = 2.50, output = 40.00 },
["o4-mini"] = { input = 1.10, cached_input = 0.275, output = 4.40 },
["o4-mini-deep-research"] = { input = 2.00, cached_input = 0.50, output = 8.00 },
["o3-mini"] = { input = 1.10, cached_input = 0.55, output = 4.40 },
["o1-mini"] = { input = 1.10, cached_input = 0.55, output = 4.40 },
-- Codex
["codex-mini-latest"] = { input = 1.50, cached_input = 0.375, output = 6.00 },
-- Search models
["gpt-4o-mini-search-preview"] = { input = 0.15, cached_input = nil, output = 0.60 },
["gpt-4o-search-preview"] = { input = 2.50, cached_input = nil, output = 10.00 },
-- Computer use
["computer-use-preview"] = { input = 3.00, cached_input = nil, output = 12.00 },
-- Image models
["gpt-image-1.5"] = { input = 5.00, cached_input = 1.25, output = 10.00 },
["chatgpt-image-latest"] = { input = 5.00, cached_input = 1.25, output = 10.00 },
["gpt-image-1"] = { input = 5.00, cached_input = 1.25, output = nil },
["gpt-image-1-mini"] = { input = 2.00, cached_input = 0.20, output = nil },
-- Claude models
["claude-3-opus"] = { input = 15.00, cached_input = 7.50, output = 75.00 },
["claude-3-sonnet"] = { input = 3.00, cached_input = 1.50, output = 15.00 },
["claude-3-haiku"] = { input = 0.25, cached_input = 0.125, output = 1.25 },
["claude-3.5-sonnet"] = { input = 3.00, cached_input = 1.50, output = 15.00 },
["claude-3.5-haiku"] = { input = 0.80, cached_input = 0.40, output = 4.00 },
-- Ollama/Local models (free)
["ollama"] = { input = 0, cached_input = 0, output = 0 },
["codellama"] = { input = 0, cached_input = 0, output = 0 },
["llama2"] = { input = 0, cached_input = 0, output = 0 },
["llama3"] = { input = 0, cached_input = 0, output = 0 },
["mistral"] = { input = 0, cached_input = 0, output = 0 },
["deepseek-coder"] = { input = 0, cached_input = 0, output = 0 },
-- Copilot (included in subscription, but tracking usage)
["copilot"] = { input = 0, cached_input = 0, output = 0 },
}
return pricing