diff --git a/lua/avante/config.lua b/lua/avante/config.lua index aca3c0c..359dc58 100644 --- a/lua/avante/config.lua +++ b/lua/avante/config.lua @@ -43,6 +43,7 @@ M._defaults = { llm_model = "", -- The LLM model to use for RAG service embed_model = "", -- The embedding model to use for RAG service endpoint = "https://api.openai.com/v1", -- The API endpoint for RAG service + docker_extra_args = "", -- Extra arguments to pass to the docker command }, web_search_engine = { provider = "tavily", diff --git a/lua/avante/rag_service.lua b/lua/avante/rag_service.lua index 253749d..c3dee61 100644 --- a/lua/avante/rag_service.lua +++ b/lua/avante/rag_service.lua @@ -74,7 +74,7 @@ function M.launch_rag_service(cb) M.stop_rag_service() end local cmd_ = string.format( - "docker run -d -p %d:8000 --name %s -v %s:/data -v %s:/host:ro -e ALLOW_RESET=TRUE -e DATA_DIR=/data -e RAG_PROVIDER=%s -e %s_API_KEY=%s -e %s_API_BASE=%s -e RAG_LLM_MODEL=%s -e RAG_EMBED_MODEL=%s %s", + "docker run -d -p %d:8000 --name %s -v %s:/data -v %s:/host:ro -e ALLOW_RESET=TRUE -e DATA_DIR=/data -e RAG_PROVIDER=%s -e %s_API_KEY=%s -e %s_API_BASE=%s -e RAG_LLM_MODEL=%s -e RAG_EMBED_MODEL=%s %s %s", port, container_name, data_path, @@ -86,6 +86,7 @@ function M.launch_rag_service(cb) Config.rag_service.endpoint, Config.rag_service.llm_model, Config.rag_service.embed_model, + Config.rag_service.docker_extra_args, image ) vim.fn.jobstart(cmd_, {