feat: implement system prompt builder with modes, tiers, and providers

- Added a system prompt builder to create dynamic prompts based on different modes (ask, code review, composer, debug, implement, plan, refactor).
- Introduced prompt tiers (balanced, fast, thorough) to tailor responses based on user needs
- Integrated multiple AI providers (Anthropic, Copilot, Google, Ollama, OpenAI) for flexible backend support.
- Updated agent and multi-agent services to utilize the new prompt system.
This commit is contained in:
2026-02-04 23:01:34 -05:00
parent db79856b08
commit b519f2e8a7
30 changed files with 5625 additions and 64 deletions

View File

@@ -20,6 +20,8 @@ import { webFetchTool } from "@tools/web-fetch/execute";
import { multiEditTool } from "@tools/multi-edit/execute";
import { lspTool } from "@/tools/lsp";
import { applyPatchTool } from "@/tools/apply-patch";
import { planApprovalTool } from "@/tools/plan-approval/execute";
import { taskAgentTool } from "@/tools/task-agent/execute";
import {
isMCPTool,
executeMCPTool,
@@ -49,6 +51,8 @@ export const tools: ToolDefinition[] = [
webFetchTool,
lspTool,
applyPatchTool,
planApprovalTool,
taskAgentTool,
];
/**

View File

@@ -0,0 +1,450 @@
/**
* Plan Approval Tool
*
* Allows agents to submit implementation plans for user approval.
* This tool implements the approval gate pattern from claude-code and opencode.
*/
import { z } from "zod";
import type { ToolDefinition, ToolContext, ToolResult } from "@/types/tools";
import {
createPlan,
addPlanStep,
updatePlanContext,
addPlanRisk,
submitPlanForApproval,
formatPlanForDisplay,
getPlan,
analyzeTask,
} from "@services/plan-mode/plan-service";
/**
* Parameters for the plan approval tool
*/
const planApprovalSchema = z.object({
action: z
.enum([
"create",
"add_step",
"add_context",
"add_risk",
"submit",
"check_status",
"analyze_task",
])
.describe("The action to perform"),
// For create action
title: z
.string()
.optional()
.describe("Title of the implementation plan"),
summary: z
.string()
.optional()
.describe("Summary of what the plan will accomplish"),
// For add_step action
plan_id: z
.string()
.optional()
.describe("ID of the plan to modify"),
step_title: z
.string()
.optional()
.describe("Title of the step"),
step_description: z
.string()
.optional()
.describe("Description of what this step does"),
files_affected: z
.array(z.string())
.optional()
.describe("Files that will be affected by this step"),
risk_level: z
.enum(["low", "medium", "high"])
.optional()
.describe("Risk level of this step"),
// For add_context action
files_analyzed: z
.array(z.string())
.optional()
.describe("Files that were analyzed for this plan"),
current_architecture: z
.string()
.optional()
.describe("Description of current architecture"),
dependencies: z
.array(z.string())
.optional()
.describe("Dependencies identified"),
// For add_risk action
risk_description: z
.string()
.optional()
.describe("Description of the risk"),
risk_impact: z
.enum(["low", "medium", "high"])
.optional()
.describe("Impact level of the risk"),
risk_mitigation: z
.string()
.optional()
.describe("How to mitigate this risk"),
// For submit action
testing_strategy: z
.string()
.optional()
.describe("How the changes will be tested"),
rollback_plan: z
.string()
.optional()
.describe("How to rollback if something goes wrong"),
// For analyze_task action
task_description: z
.string()
.optional()
.describe("The task to analyze for complexity"),
});
type PlanApprovalParams = z.infer<typeof planApprovalSchema>;
/**
* Execute the plan approval tool
*/
export const executePlanApproval = async (
params: PlanApprovalParams,
_ctx: ToolContext,
): Promise<ToolResult> => {
const actionHandlers: Record<string, () => ToolResult> = {
create: () => handleCreate(params),
add_step: () => handleAddStep(params),
add_context: () => handleAddContext(params),
add_risk: () => handleAddRisk(params),
submit: () => handleSubmit(params),
check_status: () => handleCheckStatus(params),
analyze_task: () => handleAnalyzeTask(params),
};
const handler = actionHandlers[params.action];
if (!handler) {
return {
success: false,
title: "Unknown action",
output: "",
error: `Unknown action: ${params.action}`,
};
}
return handler();
};
/**
* Handle create action
*/
const handleCreate = (params: PlanApprovalParams): ToolResult => {
if (!params.title || !params.summary) {
return {
success: false,
title: "Missing parameters",
output: "",
error: "Both title and summary are required for create action",
};
}
const plan = createPlan(params.title, params.summary);
return {
success: true,
title: "Plan created",
output: `Created implementation plan with ID: ${plan.id}\n\nNow add steps, context, and risks before submitting for approval.`,
};
};
/**
* Handle add_step action
*/
const handleAddStep = (params: PlanApprovalParams): ToolResult => {
if (!params.plan_id || !params.step_title || !params.step_description) {
return {
success: false,
title: "Missing parameters",
output: "",
error: "plan_id, step_title, and step_description are required",
};
}
const step = addPlanStep(params.plan_id, {
title: params.step_title,
description: params.step_description,
filesAffected: params.files_affected ?? [],
riskLevel: params.risk_level ?? "low",
});
if (!step) {
return {
success: false,
title: "Failed to add step",
output: "",
error: "Plan not found or not in drafting status",
};
}
return {
success: true,
title: "Step added",
output: `Added step: ${step.title} (ID: ${step.id})`,
};
};
/**
* Handle add_context action
*/
const handleAddContext = (params: PlanApprovalParams): ToolResult => {
if (!params.plan_id) {
return {
success: false,
title: "Missing parameters",
output: "",
error: "plan_id is required",
};
}
const context: Partial<{
filesAnalyzed: string[];
currentArchitecture: string;
dependencies: string[];
}> = {};
if (params.files_analyzed) {
context.filesAnalyzed = params.files_analyzed;
}
if (params.current_architecture) {
context.currentArchitecture = params.current_architecture;
}
if (params.dependencies) {
context.dependencies = params.dependencies;
}
const success = updatePlanContext(params.plan_id, context);
if (!success) {
return {
success: false,
title: "Failed to update context",
output: "",
error: "Plan not found",
};
}
return {
success: true,
title: "Context updated",
output: "Plan context has been updated",
};
};
/**
* Handle add_risk action
*/
const handleAddRisk = (params: PlanApprovalParams): ToolResult => {
if (!params.plan_id || !params.risk_description || !params.risk_impact || !params.risk_mitigation) {
return {
success: false,
title: "Missing parameters",
output: "",
error: "plan_id, risk_description, risk_impact, and risk_mitigation are required",
};
}
const success = addPlanRisk(params.plan_id, {
description: params.risk_description,
impact: params.risk_impact,
mitigation: params.risk_mitigation,
});
if (!success) {
return {
success: false,
title: "Failed to add risk",
output: "",
error: "Plan not found",
};
}
return {
success: true,
title: "Risk added",
output: `Added risk: ${params.risk_description}`,
};
};
/**
* Handle submit action
*/
const handleSubmit = (params: PlanApprovalParams): ToolResult => {
if (!params.plan_id) {
return {
success: false,
title: "Missing parameters",
output: "",
error: "plan_id is required",
};
}
const success = submitPlanForApproval(
params.plan_id,
params.testing_strategy ?? "Run existing tests and verify changes manually",
params.rollback_plan ?? "Revert changes using git",
);
if (!success) {
return {
success: false,
title: "Failed to submit plan",
output: "",
error: "Plan not found or not in drafting status",
};
}
const plan = getPlan(params.plan_id);
if (!plan) {
return {
success: false,
title: "Plan not found",
output: "",
error: "Plan was submitted but could not be retrieved",
};
}
const formattedPlan = formatPlanForDisplay(plan);
return {
success: true,
title: "Plan submitted for approval",
output: formattedPlan,
metadata: {
planId: plan.id,
planStatus: "pending",
requiresApproval: true,
},
};
};
/**
* Handle check_status action
*/
const handleCheckStatus = (params: PlanApprovalParams): ToolResult => {
if (!params.plan_id) {
return {
success: false,
title: "Missing parameters",
output: "",
error: "plan_id is required",
};
}
const plan = getPlan(params.plan_id);
if (!plan) {
return {
success: false,
title: "Plan not found",
output: "",
error: `No plan found with ID: ${params.plan_id}`,
};
}
const statusMessages: Record<string, string> = {
drafting: "Plan is being drafted. Add steps, context, and risks, then submit for approval.",
pending: "Plan is awaiting user approval. Wait for the user to approve or provide feedback.",
approved: "Plan has been approved. You may proceed with implementation.",
rejected: `Plan was rejected. Reason: ${plan.rejectionReason ?? "No reason provided"}`,
executing: "Plan is currently being executed.",
completed: "Plan execution completed successfully.",
failed: "Plan execution failed.",
};
return {
success: true,
title: `Plan status: ${plan.status}`,
output: statusMessages[plan.status] ?? `Unknown status: ${plan.status}`,
metadata: {
planId: plan.id,
planStatus: plan.status,
stepsCompleted: plan.steps.filter(s => s.status === "completed").length,
totalSteps: plan.steps.length,
},
};
};
/**
* Handle analyze_task action
*/
const handleAnalyzeTask = (params: PlanApprovalParams): ToolResult => {
if (!params.task_description) {
return {
success: false,
title: "Missing parameters",
output: "",
error: "task_description is required",
};
}
const analysis = analyzeTask(params.task_description);
const output = [
`## Task Analysis`,
``,
`**Complexity**: ${analysis.complexity}`,
`**Requires Plan Approval**: ${analysis.requiresPlanApproval ? "Yes" : "No"}`,
``,
`**Suggested Approach**: ${analysis.suggestedApproach}`,
];
if (analysis.reasons.length > 0) {
output.push(``, `**Reasons**:`);
analysis.reasons.forEach(r => output.push(`- ${r}`));
}
return {
success: true,
title: `Task complexity: ${analysis.complexity}`,
output: output.join("\n"),
metadata: {
complexity: analysis.complexity,
requiresPlanApproval: analysis.requiresPlanApproval,
},
};
};
/**
* Tool definition for plan approval
*/
export const planApprovalTool: ToolDefinition<typeof planApprovalSchema> = {
name: "plan_approval",
description: `Submit implementation plans for user approval before executing complex tasks.
Use this tool when:
- Making changes to 3+ files
- Refactoring code
- Architectural changes
- Security-related changes
- Database modifications
Workflow:
1. analyze_task - Check if task needs plan approval
2. create - Create a new plan with title and summary
3. add_context - Add files analyzed, architecture info
4. add_step - Add each implementation step (repeat)
5. add_risk - Add identified risks
6. submit - Submit plan and wait for user approval
7. check_status - Check if user approved
After approval, proceed with implementation. If rejected, address feedback and resubmit.`,
parameters: planApprovalSchema,
execute: executePlanApproval,
};

View File

@@ -0,0 +1,391 @@
/**
* Task Agent Tool
*
* Allows spawning specialized sub-agents for complex tasks.
* Implements the agent delegation pattern from claude-code.
*/
import { z } from "zod";
import { v4 as uuidv4 } from "uuid";
import type { ToolDefinition, ToolContext, ToolResult } from "@/types/tools";
/**
* Agent types available for delegation
*/
const AGENT_TYPES = {
explore: {
description: "Fast codebase exploration (read-only)",
tools: ["glob", "grep", "read"],
tier: "fast",
maxTurns: 10,
},
implement: {
description: "Code writing and modification",
tools: ["glob", "grep", "read", "write", "edit", "bash"],
tier: "balanced",
maxTurns: 20,
},
test: {
description: "Test creation and execution",
tools: ["glob", "grep", "read", "write", "edit", "bash"],
tier: "balanced",
maxTurns: 15,
},
review: {
description: "Code review and suggestions",
tools: ["glob", "grep", "read"],
tier: "balanced",
maxTurns: 10,
},
refactor: {
description: "Code refactoring and improvement",
tools: ["glob", "grep", "read", "write", "edit"],
tier: "thorough",
maxTurns: 25,
},
plan: {
description: "Planning and architecture design",
tools: ["glob", "grep", "read", "plan_approval"],
tier: "thorough",
maxTurns: 15,
},
} as const;
type AgentType = keyof typeof AGENT_TYPES;
/**
* Parameters for the task agent tool
*/
const taskAgentSchema = z.object({
agent_type: z
.enum(["explore", "implement", "test", "review", "refactor", "plan"])
.describe("The type of specialized agent to spawn"),
task: z
.string()
.describe("The task for the agent to perform"),
context_files: z
.array(z.string())
.optional()
.describe("Files to include as context for the agent"),
run_in_background: z
.boolean()
.optional()
.default(false)
.describe("Whether to run the agent in the background"),
max_turns: z
.number()
.optional()
.describe("Maximum number of turns for the agent"),
});
type TaskAgentParams = z.infer<typeof taskAgentSchema>;
/**
* Active background agents
*/
const backgroundAgents = new Map<string, {
type: AgentType;
task: string;
startTime: number;
promise: Promise<ToolResult>;
}>();
/**
* Execute the task agent tool
*/
export const executeTaskAgent = async (
params: TaskAgentParams,
ctx: ToolContext,
): Promise<ToolResult> => {
const agentConfig = AGENT_TYPES[params.agent_type];
// Build the agent system prompt
const systemPrompt = buildAgentSystemPrompt(params.agent_type, agentConfig);
// Build the task prompt
const taskPrompt = buildTaskPrompt(params);
if (params.run_in_background) {
return runAgentInBackground(params, systemPrompt, taskPrompt, ctx);
}
return runAgentInForeground(params, systemPrompt, taskPrompt, ctx);
};
/**
* Build system prompt for the agent
*/
const buildAgentSystemPrompt = (
agentType: AgentType,
config: typeof AGENT_TYPES[AgentType],
): string => {
const prompts: Record<AgentType, string> = {
explore: `You are an EXPLORATION agent. Your task is to quickly understand code.
## Rules
- USE ONLY: glob, grep, read
- DO NOT modify any files
- Focus on finding and understanding relevant code
- Report findings concisely
## Output Format
Provide structured findings:
- Key files found
- Important patterns
- Relevant code locations (file:line)`,
implement: `You are an IMPLEMENTATION agent. Your task is to write code.
## Rules
- Read existing code before modifying
- Follow existing patterns and conventions
- Make minimal, focused changes
- Verify changes with type checks or tests
## Output Format
- Summarize changes made
- List files modified
- Report any issues encountered`,
test: `You are a TESTING agent. Your task is to write and run tests.
## Rules
- Write deterministic tests
- Mock external dependencies
- Cover edge cases
- Run tests after writing
## Output Format
- Tests created
- Test results
- Coverage notes`,
review: `You are a REVIEW agent. Your task is to review code quality.
## Rules
- Check for bugs and issues
- Evaluate code quality
- Suggest improvements
- Be constructive
## Output Format
- Issues found (severity, location, description)
- Suggestions for improvement
- Overall assessment`,
refactor: `You are a REFACTORING agent. Your task is to improve code structure.
## Rules
- Preserve existing behavior
- Improve readability and maintainability
- Follow SOLID principles
- Verify with tests after changes
## Output Format
- Changes made
- Improvements achieved
- Verification results`,
plan: `You are a PLANNING agent. Your task is to design implementation plans.
## Rules
- Explore thoroughly before planning
- Consider edge cases and risks
- Break down into manageable steps
- Use plan_approval tool to submit plans
## Output Format
- Context analysis
- Implementation steps
- Risks and mitigations
- Testing strategy`,
};
return `${prompts[agentType]}
## Available Tools
${config.tools.join(", ")}
## Tier
${config.tier} (max ${config.maxTurns} turns)`;
};
/**
* Build task prompt with context
*/
const buildTaskPrompt = (params: TaskAgentParams): string => {
const parts = [`## Task\n${params.task}`];
if (params.context_files?.length) {
parts.push(`\n## Context Files\n${params.context_files.map(f => `- ${f}`).join("\n")}`);
}
return parts.join("\n");
};
/**
* Run agent in foreground (blocking)
*/
const runAgentInForeground = async (
params: TaskAgentParams,
systemPrompt: string,
taskPrompt: string,
_ctx: ToolContext,
): Promise<ToolResult> => {
const startTime = Date.now();
try {
// Dynamic import to avoid circular dependencies
const { runAgent } = await import("@services/core/agent");
const agentConfig = AGENT_TYPES[params.agent_type];
// Get appropriate model for tier
const tierModels: Record<string, string> = {
fast: "gpt-4o-mini",
balanced: "gpt-4o",
thorough: "gpt-4o",
};
const model = tierModels[agentConfig.tier] ?? "gpt-4o";
const result = await runAgent(taskPrompt, systemPrompt, {
provider: "copilot",
model,
autoApprove: true,
maxIterations: params.max_turns ?? agentConfig.maxTurns,
});
const duration = Date.now() - startTime;
return {
success: result.success,
title: `${params.agent_type} agent completed`,
output: `## Agent Result (${params.agent_type})\n\nDuration: ${(duration / 1000).toFixed(1)}s\nIterations: ${result.iterations}\nTool calls: ${result.toolCalls.length}\n\n${result.finalResponse}`,
metadata: {
agentType: params.agent_type,
duration,
iterations: result.iterations,
toolCalls: result.toolCalls.length,
},
};
} catch (error) {
return {
success: false,
title: `${params.agent_type} agent failed`,
output: "",
error: error instanceof Error ? error.message : String(error),
};
}
};
/**
* Run agent in background (non-blocking)
*/
const runAgentInBackground = async (
params: TaskAgentParams,
systemPrompt: string,
taskPrompt: string,
ctx: ToolContext,
): Promise<ToolResult> => {
const agentId = uuidv4();
const promise = runAgentInForeground(params, systemPrompt, taskPrompt, ctx);
backgroundAgents.set(agentId, {
type: params.agent_type,
task: params.task,
startTime: Date.now(),
promise,
});
// Cleanup after completion
promise.then(() => {
// Keep result for 5 minutes
setTimeout(() => backgroundAgents.delete(agentId), 5 * 60 * 1000);
});
return {
success: true,
title: `${params.agent_type} agent started in background`,
output: `Agent ID: ${agentId}\n\nThe ${params.agent_type} agent is now running in the background.\nUse the agent ID to check status or retrieve results.`,
metadata: {
agentId,
agentType: params.agent_type,
runningInBackground: true,
},
};
};
/**
* Get background agent status
*/
export const getBackgroundAgentStatus = async (
agentId: string,
): Promise<ToolResult> => {
const agent = backgroundAgents.get(agentId);
if (!agent) {
return {
success: false,
title: "Agent not found",
output: "",
error: `No background agent found with ID: ${agentId}`,
};
}
// Check if completed
const result = await Promise.race([
agent.promise.then(r => ({ completed: true, result: r })),
new Promise<{ completed: false }>(resolve =>
setTimeout(() => resolve({ completed: false }), 100),
),
]);
if (result.completed) {
return result.result;
}
const runningTime = Date.now() - agent.startTime;
return {
success: true,
title: "Agent still running",
output: `Agent ${agent.type} is still running.\nTask: ${agent.task}\nRunning for: ${(runningTime / 1000).toFixed(1)}s`,
metadata: {
agentId,
agentType: agent.type,
runningTime,
status: "running",
},
};
};
/**
* Tool definition for task agent
*/
export const taskAgentTool: ToolDefinition<typeof taskAgentSchema> = {
name: "task_agent",
description: `Spawn a specialized sub-agent for complex tasks.
Available agent types:
- explore: Fast codebase exploration (read-only)
- implement: Code writing and modification
- test: Test creation and execution
- review: Code review and suggestions
- refactor: Code refactoring and improvement
- plan: Planning and architecture design
Use agents when:
- Task requires specialized focus
- Multiple parallel investigations needed
- Complex implementation that benefits from isolation
Agents run with their own context and tools, returning results when complete.`,
parameters: taskAgentSchema,
execute: executeTaskAgent,
};