feat: implement parallel agent execution and unified agent registry
- Fix streaming JSON parsing issues (buffer processing on stream end) - Increase max_tokens for tool-enabled requests (16384) - Add parallel tool execution for task_agent, read, glob, grep (up to 3 concurrent) - Register task_agent tool with queue system and concurrency control - Add session-based isolation with parentSessionId tracking - Create unified agent registry mapping agents from: - OpenCode (7 agents: build, plan, explore, general, etc.) - Claude Code (12 agents: code-explorer, code-architect, etc.) - Cursor (3 agents: pair-programmer, cli, chat) - CodeTyper native (6 agents) - Add agent/skill creation system with LLM-based generation - Store custom agents in .codetyper/agents/*.md - Store custom skills in .codetyper/skills/*/SKILL.md
This commit is contained in:
255
src/services/agent-creator-service.ts
Normal file
255
src/services/agent-creator-service.ts
Normal file
@@ -0,0 +1,255 @@
|
||||
/**
|
||||
* Agent Creator Service
|
||||
*
|
||||
* Creates new agents and skills from user descriptions using LLM.
|
||||
* Stores them in .codetyper/agents/ and .codetyper/skills/.
|
||||
*/
|
||||
|
||||
import { mkdir, writeFile, readdir } from "fs/promises";
|
||||
import { join } from "path";
|
||||
import { chat } from "@providers/core/chat";
|
||||
import {
|
||||
AGENT_CREATION_PROMPT,
|
||||
SKILL_CREATION_PROMPT,
|
||||
CODETYPER_DIRS,
|
||||
} from "@constants/agent-templates";
|
||||
import type { ProviderName } from "@/types/providers";
|
||||
|
||||
/**
|
||||
* Result of agent/skill creation
|
||||
*/
|
||||
export interface CreationResult {
|
||||
success: boolean;
|
||||
filePath?: string;
|
||||
name?: string;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Options for creating an agent
|
||||
*/
|
||||
export interface CreateAgentOptions {
|
||||
description: string;
|
||||
workingDir: string;
|
||||
provider?: ProviderName;
|
||||
model?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Options for creating a skill
|
||||
*/
|
||||
export interface CreateSkillOptions {
|
||||
description: string;
|
||||
workingDir: string;
|
||||
provider?: ProviderName;
|
||||
model?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract name from generated markdown
|
||||
*/
|
||||
const extractName = (markdown: string): string | null => {
|
||||
const match = markdown.match(/^name:\s*(.+)$/m);
|
||||
return match ? match[1].trim() : null;
|
||||
};
|
||||
|
||||
/**
|
||||
* Clean markdown content (remove code fences if present)
|
||||
*/
|
||||
const cleanMarkdown = (content: string): string => {
|
||||
// Remove ```markdown and ``` wrappers if present
|
||||
let cleaned = content.trim();
|
||||
if (cleaned.startsWith("```markdown")) {
|
||||
cleaned = cleaned.slice("```markdown".length);
|
||||
} else if (cleaned.startsWith("```")) {
|
||||
cleaned = cleaned.slice(3);
|
||||
}
|
||||
if (cleaned.endsWith("```")) {
|
||||
cleaned = cleaned.slice(0, -3);
|
||||
}
|
||||
return cleaned.trim();
|
||||
};
|
||||
|
||||
/**
|
||||
* Ensure directory exists
|
||||
*/
|
||||
const ensureDir = async (dirPath: string): Promise<void> => {
|
||||
await mkdir(dirPath, { recursive: true });
|
||||
};
|
||||
|
||||
/**
|
||||
* Create a new agent from description using LLM
|
||||
*/
|
||||
export const createAgentFromDescription = async (
|
||||
options: CreateAgentOptions,
|
||||
): Promise<CreationResult> => {
|
||||
const { description, workingDir, provider = "copilot", model } = options;
|
||||
|
||||
try {
|
||||
// Generate agent using LLM
|
||||
const prompt = AGENT_CREATION_PROMPT.replace("{{description}}", description);
|
||||
|
||||
const response = await chat(
|
||||
provider,
|
||||
[
|
||||
{ role: "system", content: "You are an expert agent configuration generator." },
|
||||
{ role: "user", content: prompt },
|
||||
],
|
||||
{ model, temperature: 0.7 },
|
||||
);
|
||||
|
||||
if (!response.content) {
|
||||
return { success: false, error: "No response from LLM" };
|
||||
}
|
||||
|
||||
// Clean and parse the generated markdown
|
||||
const markdown = cleanMarkdown(response.content);
|
||||
const name = extractName(markdown);
|
||||
|
||||
if (!name) {
|
||||
return { success: false, error: "Could not extract agent name from generated content" };
|
||||
}
|
||||
|
||||
// Validate name format
|
||||
if (!/^[a-z][a-z0-9-]*[a-z0-9]$/.test(name) || name.length < 3 || name.length > 50) {
|
||||
return {
|
||||
success: false,
|
||||
error: `Invalid agent name: ${name}. Must be 3-50 chars, lowercase, hyphens only.`,
|
||||
};
|
||||
}
|
||||
|
||||
// Create agents directory and write file
|
||||
const agentsDir = join(workingDir, CODETYPER_DIRS.agents);
|
||||
await ensureDir(agentsDir);
|
||||
|
||||
const filePath = join(agentsDir, `${name}.md`);
|
||||
await writeFile(filePath, markdown, "utf-8");
|
||||
|
||||
return {
|
||||
success: true,
|
||||
filePath,
|
||||
name,
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Create a new skill from description using LLM
|
||||
*/
|
||||
export const createSkillFromDescription = async (
|
||||
options: CreateSkillOptions,
|
||||
): Promise<CreationResult> => {
|
||||
const { description, workingDir, provider = "copilot", model } = options;
|
||||
|
||||
try {
|
||||
// Generate skill using LLM
|
||||
const prompt = SKILL_CREATION_PROMPT.replace("{{description}}", description);
|
||||
|
||||
const response = await chat(
|
||||
provider,
|
||||
[
|
||||
{ role: "system", content: "You are an expert skill documentation generator." },
|
||||
{ role: "user", content: prompt },
|
||||
],
|
||||
{ model, temperature: 0.7 },
|
||||
);
|
||||
|
||||
if (!response.content) {
|
||||
return { success: false, error: "No response from LLM" };
|
||||
}
|
||||
|
||||
// Clean and parse the generated markdown
|
||||
const markdown = cleanMarkdown(response.content);
|
||||
const name = extractName(markdown);
|
||||
|
||||
if (!name) {
|
||||
return { success: false, error: "Could not extract skill name from generated content" };
|
||||
}
|
||||
|
||||
// Convert name to directory-safe format
|
||||
const dirName = name.toLowerCase().replace(/\s+/g, "-").replace(/[^a-z0-9-]/g, "");
|
||||
|
||||
// Create skills directory structure and write file
|
||||
const skillDir = join(workingDir, CODETYPER_DIRS.skills, dirName);
|
||||
await ensureDir(skillDir);
|
||||
|
||||
const filePath = join(skillDir, "SKILL.md");
|
||||
await writeFile(filePath, markdown, "utf-8");
|
||||
|
||||
return {
|
||||
success: true,
|
||||
filePath,
|
||||
name,
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* List existing agents in .codetyper/agents/
|
||||
*/
|
||||
export const listCustomAgents = async (
|
||||
workingDir: string,
|
||||
): Promise<string[]> => {
|
||||
const agentsDir = join(workingDir, CODETYPER_DIRS.agents);
|
||||
|
||||
try {
|
||||
const files = await readdir(agentsDir);
|
||||
return files
|
||||
.filter((f) => f.endsWith(".md"))
|
||||
.map((f) => f.replace(".md", ""));
|
||||
} catch {
|
||||
return [];
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* List existing skills in .codetyper/skills/
|
||||
*/
|
||||
export const listCustomSkills = async (
|
||||
workingDir: string,
|
||||
): Promise<string[]> => {
|
||||
const skillsDir = join(workingDir, CODETYPER_DIRS.skills);
|
||||
|
||||
try {
|
||||
const dirs = await readdir(skillsDir, { withFileTypes: true });
|
||||
return dirs.filter((d) => d.isDirectory()).map((d) => d.name);
|
||||
} catch {
|
||||
return [];
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Get suggested agent types based on description
|
||||
*/
|
||||
export const suggestAgentType = (description: string): string => {
|
||||
const lower = description.toLowerCase();
|
||||
|
||||
const patterns: Record<string, string[]> = {
|
||||
explore: ["search", "find", "explore", "understand", "analyze codebase"],
|
||||
review: ["review", "check", "audit", "inspect", "quality"],
|
||||
implement: ["implement", "create", "build", "write code", "develop"],
|
||||
test: ["test", "testing", "unit test", "coverage"],
|
||||
refactor: ["refactor", "improve", "clean up", "optimize"],
|
||||
security: ["security", "vulnerability", "secure", "audit"],
|
||||
documentation: ["document", "docs", "readme", "api docs"],
|
||||
plan: ["plan", "design", "architect", "strategy"],
|
||||
};
|
||||
|
||||
for (const [type, keywords] of Object.entries(patterns)) {
|
||||
if (keywords.some((kw) => lower.includes(kw))) {
|
||||
return type;
|
||||
}
|
||||
}
|
||||
|
||||
return "general";
|
||||
};
|
||||
@@ -175,6 +175,44 @@ const getToolCallIndex = (
|
||||
return accumulator.toolCalls.size;
|
||||
};
|
||||
|
||||
/**
|
||||
* Check if JSON appears to be truncated (incomplete)
|
||||
*/
|
||||
const isLikelyTruncatedJson = (jsonStr: string): boolean => {
|
||||
const trimmed = jsonStr.trim();
|
||||
if (!trimmed) return false;
|
||||
|
||||
// Count braces and brackets
|
||||
let braceCount = 0;
|
||||
let bracketCount = 0;
|
||||
let inString = false;
|
||||
let escaped = false;
|
||||
|
||||
for (const char of trimmed) {
|
||||
if (escaped) {
|
||||
escaped = false;
|
||||
continue;
|
||||
}
|
||||
if (char === "\\") {
|
||||
escaped = true;
|
||||
continue;
|
||||
}
|
||||
if (char === '"') {
|
||||
inString = !inString;
|
||||
continue;
|
||||
}
|
||||
if (!inString) {
|
||||
if (char === "{") braceCount++;
|
||||
if (char === "}") braceCount--;
|
||||
if (char === "[") bracketCount++;
|
||||
if (char === "]") bracketCount--;
|
||||
}
|
||||
}
|
||||
|
||||
// If counts are unbalanced, JSON is truncated
|
||||
return braceCount !== 0 || bracketCount !== 0 || inString;
|
||||
};
|
||||
|
||||
/**
|
||||
* Convert partial tool call to complete tool call
|
||||
*/
|
||||
@@ -188,10 +226,16 @@ const finalizeToolCall = (partial: PartialToolCall): ToolCall => {
|
||||
try {
|
||||
args = JSON.parse(rawBuffer);
|
||||
} catch (e) {
|
||||
const isTruncated = isLikelyTruncatedJson(rawBuffer);
|
||||
const errorType = isTruncated
|
||||
? "JSON truncated (likely max_tokens exceeded)"
|
||||
: "JSON parse failed";
|
||||
|
||||
args = {
|
||||
__debug_error: "JSON parse failed",
|
||||
__debug_error: errorType,
|
||||
__debug_buffer: rawBuffer.substring(0, 200),
|
||||
__debug_parseError: e instanceof Error ? e.message : String(e),
|
||||
__debug_truncated: isTruncated,
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -211,6 +255,23 @@ const executeTool = async (
|
||||
state: StreamAgentState,
|
||||
toolCall: ToolCall,
|
||||
): Promise<ToolResult> => {
|
||||
// Check for debug error markers from truncated/malformed JSON
|
||||
const debugError = toolCall.arguments.__debug_error as string | undefined;
|
||||
if (debugError) {
|
||||
const isTruncated = toolCall.arguments.__debug_truncated === true;
|
||||
const title = isTruncated ? "Tool call truncated" : "Tool validation error";
|
||||
const hint = isTruncated
|
||||
? "\nHint: The model's response was cut off. Try a simpler request or increase max_tokens."
|
||||
: "";
|
||||
|
||||
return {
|
||||
success: false,
|
||||
title,
|
||||
output: "",
|
||||
error: `Tool validation error: ${toolCall.name}: ${debugError}${hint}\nReceived: ${JSON.stringify(toolCall.arguments)}`,
|
||||
};
|
||||
}
|
||||
|
||||
const tool = getTool(toolCall.name);
|
||||
|
||||
if (!tool) {
|
||||
@@ -246,6 +307,103 @@ const executeTool = async (
|
||||
}
|
||||
};
|
||||
|
||||
// =============================================================================
|
||||
// Parallel Tool Execution
|
||||
// =============================================================================
|
||||
|
||||
/**
|
||||
* Tools that are safe to execute in parallel (read-only or isolated)
|
||||
*/
|
||||
const PARALLEL_SAFE_TOOLS = new Set([
|
||||
"task_agent", // Subagent spawning - designed for parallel execution
|
||||
"read", // Read-only
|
||||
"glob", // Read-only
|
||||
"grep", // Read-only
|
||||
"web_search", // External API, no local state
|
||||
"web_fetch", // External API, no local state
|
||||
"todo_read", // Read-only
|
||||
"lsp", // Read-only queries
|
||||
]);
|
||||
|
||||
/**
|
||||
* Maximum number of parallel tool executions
|
||||
*/
|
||||
const MAX_PARALLEL_TOOLS = 3;
|
||||
|
||||
/**
|
||||
* Execute tool calls with intelligent parallelism
|
||||
* - Parallel-safe tools (task_agent, read, glob, grep) run concurrently
|
||||
* - File-modifying tools (write, edit, bash) run sequentially
|
||||
*/
|
||||
const executeToolCallsWithParallelism = async (
|
||||
state: StreamAgentState,
|
||||
toolCalls: ToolCall[],
|
||||
): Promise<Array<{ toolCall: ToolCall; result: ToolResult }>> => {
|
||||
// Separate into parallel-safe and sequential groups
|
||||
const parallelCalls: ToolCall[] = [];
|
||||
const sequentialCalls: ToolCall[] = [];
|
||||
|
||||
for (const tc of toolCalls) {
|
||||
if (PARALLEL_SAFE_TOOLS.has(tc.name)) {
|
||||
parallelCalls.push(tc);
|
||||
} else {
|
||||
sequentialCalls.push(tc);
|
||||
}
|
||||
}
|
||||
|
||||
const results: Array<{ toolCall: ToolCall; result: ToolResult }> = [];
|
||||
|
||||
// Execute parallel-safe tools in parallel (up to MAX_PARALLEL_TOOLS at a time)
|
||||
if (parallelCalls.length > 0) {
|
||||
const parallelResults = await executeInParallelChunks(
|
||||
state,
|
||||
parallelCalls,
|
||||
MAX_PARALLEL_TOOLS,
|
||||
);
|
||||
results.push(...parallelResults);
|
||||
}
|
||||
|
||||
// Execute sequential tools one at a time
|
||||
for (const toolCall of sequentialCalls) {
|
||||
const result = await executeTool(state, toolCall);
|
||||
results.push({ toolCall, result });
|
||||
}
|
||||
|
||||
// Return results in original order
|
||||
return toolCalls.map((tc) => {
|
||||
const found = results.find((r) => r.toolCall.id === tc.id);
|
||||
return found ?? { toolCall: tc, result: { success: false, title: "Error", output: "", error: "Tool result not found" } };
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Execute tools in parallel chunks
|
||||
*/
|
||||
const executeInParallelChunks = async (
|
||||
state: StreamAgentState,
|
||||
toolCalls: ToolCall[],
|
||||
chunkSize: number,
|
||||
): Promise<Array<{ toolCall: ToolCall; result: ToolResult }>> => {
|
||||
const results: Array<{ toolCall: ToolCall; result: ToolResult }> = [];
|
||||
|
||||
// Process in chunks of chunkSize
|
||||
for (let i = 0; i < toolCalls.length; i += chunkSize) {
|
||||
const chunk = toolCalls.slice(i, i + chunkSize);
|
||||
|
||||
// Execute chunk in parallel
|
||||
const chunkResults = await Promise.all(
|
||||
chunk.map(async (toolCall) => {
|
||||
const result = await executeTool(state, toolCall);
|
||||
return { toolCall, result };
|
||||
}),
|
||||
);
|
||||
|
||||
results.push(...chunkResults);
|
||||
}
|
||||
|
||||
return results;
|
||||
};
|
||||
|
||||
// =============================================================================
|
||||
// Streaming LLM Call
|
||||
// =============================================================================
|
||||
@@ -368,13 +526,16 @@ export const runAgentLoopStream = async (
|
||||
// Track if all tool calls in this iteration failed
|
||||
let allFailed = true;
|
||||
|
||||
// Execute each tool call
|
||||
for (const toolCall of response.toolCalls) {
|
||||
// Execute tool calls with parallel execution for safe tools
|
||||
const toolResults = await executeToolCallsWithParallelism(
|
||||
state,
|
||||
response.toolCalls,
|
||||
);
|
||||
|
||||
// Process results in order
|
||||
for (const { toolCall, result } of toolResults) {
|
||||
state.options.onToolCall?.(toolCall);
|
||||
|
||||
const result = await executeTool(state, toolCall);
|
||||
allToolCalls.push({ call: toolCall, result });
|
||||
|
||||
state.options.onToolResult?.(toolCall.id, result);
|
||||
|
||||
// Track success/failure
|
||||
|
||||
@@ -31,6 +31,25 @@ import {
|
||||
import { MAX_ITERATIONS } from "@constants/agent";
|
||||
import { usageStore } from "@stores/core/usage-store";
|
||||
|
||||
/**
|
||||
* Tools that are safe to execute in parallel (read-only or isolated)
|
||||
*/
|
||||
const PARALLEL_SAFE_TOOLS = new Set([
|
||||
"task_agent", // Subagent spawning - designed for parallel execution
|
||||
"read", // Read-only
|
||||
"glob", // Read-only
|
||||
"grep", // Read-only
|
||||
"web_search", // External API, no local state
|
||||
"web_fetch", // External API, no local state
|
||||
"todo_read", // Read-only
|
||||
"lsp", // Read-only queries
|
||||
]);
|
||||
|
||||
/**
|
||||
* Maximum number of parallel tool executions
|
||||
*/
|
||||
const MAX_PARALLEL_TOOLS = 3;
|
||||
|
||||
/**
|
||||
* Agent state interface
|
||||
*/
|
||||
@@ -225,6 +244,80 @@ const executeTool = async (
|
||||
return result;
|
||||
};
|
||||
|
||||
/**
|
||||
* Execute tool calls with intelligent parallelism
|
||||
* - Parallel-safe tools (task_agent, read, glob, grep) run concurrently
|
||||
* - File-modifying tools (write, edit, bash) run sequentially
|
||||
*/
|
||||
const executeToolCallsWithParallelism = async (
|
||||
state: AgentState,
|
||||
toolCalls: ToolCall[],
|
||||
): Promise<Array<{ toolCall: ToolCall; result: ToolResult }>> => {
|
||||
// Separate into parallel-safe and sequential groups
|
||||
const parallelCalls: ToolCall[] = [];
|
||||
const sequentialCalls: ToolCall[] = [];
|
||||
|
||||
for (const tc of toolCalls) {
|
||||
if (PARALLEL_SAFE_TOOLS.has(tc.name)) {
|
||||
parallelCalls.push(tc);
|
||||
} else {
|
||||
sequentialCalls.push(tc);
|
||||
}
|
||||
}
|
||||
|
||||
const results: Array<{ toolCall: ToolCall; result: ToolResult }> = [];
|
||||
|
||||
// Execute parallel-safe tools in parallel (up to MAX_PARALLEL_TOOLS at a time)
|
||||
if (parallelCalls.length > 0) {
|
||||
const parallelResults = await executeInParallelChunks(
|
||||
state,
|
||||
parallelCalls,
|
||||
MAX_PARALLEL_TOOLS,
|
||||
);
|
||||
results.push(...parallelResults);
|
||||
}
|
||||
|
||||
// Execute sequential tools one at a time
|
||||
for (const toolCall of sequentialCalls) {
|
||||
const result = await executeTool(state, toolCall);
|
||||
results.push({ toolCall, result });
|
||||
}
|
||||
|
||||
// Return results in original order
|
||||
return toolCalls.map((tc) => {
|
||||
const found = results.find((r) => r.toolCall.id === tc.id);
|
||||
return found ?? { toolCall: tc, result: { success: false, title: "Error", output: "", error: "Tool result not found" } };
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Execute tools in parallel chunks
|
||||
*/
|
||||
const executeInParallelChunks = async (
|
||||
state: AgentState,
|
||||
toolCalls: ToolCall[],
|
||||
chunkSize: number,
|
||||
): Promise<Array<{ toolCall: ToolCall; result: ToolResult }>> => {
|
||||
const results: Array<{ toolCall: ToolCall; result: ToolResult }> = [];
|
||||
|
||||
// Process in chunks of chunkSize
|
||||
for (let i = 0; i < toolCalls.length; i += chunkSize) {
|
||||
const chunk = toolCalls.slice(i, i + chunkSize);
|
||||
|
||||
// Execute chunk in parallel
|
||||
const chunkResults = await Promise.all(
|
||||
chunk.map(async (toolCall) => {
|
||||
const result = await executeTool(state, toolCall);
|
||||
return { toolCall, result };
|
||||
}),
|
||||
);
|
||||
|
||||
results.push(...chunkResults);
|
||||
}
|
||||
|
||||
return results;
|
||||
};
|
||||
|
||||
/**
|
||||
* Run the agent with the given messages
|
||||
*/
|
||||
@@ -282,8 +375,14 @@ export const runAgentLoop = async (
|
||||
state.options.onText?.(response.content);
|
||||
}
|
||||
|
||||
// Execute each tool call
|
||||
for (const toolCall of response.toolCalls) {
|
||||
// Execute tool calls with parallel execution for safe tools
|
||||
const toolResults = await executeToolCallsWithParallelism(
|
||||
state,
|
||||
response.toolCalls,
|
||||
);
|
||||
|
||||
// Process results in order
|
||||
for (const { toolCall, result } of toolResults) {
|
||||
state.options.onToolCall?.(toolCall);
|
||||
|
||||
if (state.options.verbose) {
|
||||
@@ -293,9 +392,7 @@ export const runAgentLoop = async (
|
||||
);
|
||||
}
|
||||
|
||||
const result = await executeTool(state, toolCall);
|
||||
allToolCalls.push({ call: toolCall, result });
|
||||
|
||||
state.options.onToolResult?.(toolCall.id, result);
|
||||
|
||||
// Add tool result message
|
||||
|
||||
@@ -1,9 +1,19 @@
|
||||
import fs from "fs/promises";
|
||||
import path from "path";
|
||||
import type { AgentType, ChatSession, ChatMessage } from "@/types/common";
|
||||
import type { SessionInfo } from "@/types/session";
|
||||
import type { SessionInfo, SubagentSessionConfig } from "@/types/session";
|
||||
import { DIRS } from "@constants/paths";
|
||||
|
||||
/**
|
||||
* Extended ChatSession with subagent support
|
||||
*/
|
||||
interface SubagentChatSession extends ChatSession {
|
||||
parentSessionId?: string;
|
||||
isSubagent?: boolean;
|
||||
subagentType?: string;
|
||||
task?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Current session state
|
||||
*/
|
||||
@@ -252,5 +262,87 @@ export const setWorkingDirectory = async (dir: string): Promise<void> => {
|
||||
await saveSession();
|
||||
};
|
||||
|
||||
/**
|
||||
* Create a subagent session (child of a parent session)
|
||||
* Used by task_agent for proper session-based isolation like opencode
|
||||
*/
|
||||
export const createSubagentSession = async (
|
||||
config: SubagentSessionConfig,
|
||||
): Promise<SubagentChatSession> => {
|
||||
const session: SubagentChatSession = {
|
||||
id: generateId(),
|
||||
agent: "subagent" as AgentType,
|
||||
messages: [],
|
||||
contextFiles: config.contextFiles ?? [],
|
||||
createdAt: Date.now(),
|
||||
updatedAt: Date.now(),
|
||||
parentSessionId: config.parentSessionId,
|
||||
isSubagent: true,
|
||||
subagentType: config.subagentType,
|
||||
task: config.task,
|
||||
};
|
||||
|
||||
// Set working directory
|
||||
(session as SubagentChatSession & { workingDirectory?: string }).workingDirectory =
|
||||
config.workingDirectory;
|
||||
|
||||
// Save but don't set as current (subagents run independently)
|
||||
await saveSession(session);
|
||||
return session;
|
||||
};
|
||||
|
||||
/**
|
||||
* Get all subagent sessions for a parent session
|
||||
*/
|
||||
export const getSubagentSessions = async (
|
||||
parentSessionId: string,
|
||||
): Promise<SubagentChatSession[]> => {
|
||||
const sessions = await listSessions();
|
||||
return sessions.filter(
|
||||
(s) => (s as SubagentChatSession).parentSessionId === parentSessionId,
|
||||
) as SubagentChatSession[];
|
||||
};
|
||||
|
||||
/**
|
||||
* Add message to a specific session (for subagents)
|
||||
*/
|
||||
export const addMessageToSession = async (
|
||||
sessionId: string,
|
||||
role: "user" | "assistant" | "system" | "tool",
|
||||
content: string,
|
||||
): Promise<void> => {
|
||||
const session = await loadSession(sessionId);
|
||||
if (!session) {
|
||||
throw new Error(`Session not found: ${sessionId}`);
|
||||
}
|
||||
|
||||
const message: ChatMessage = {
|
||||
role: role as "user" | "assistant" | "system",
|
||||
content,
|
||||
timestamp: Date.now(),
|
||||
};
|
||||
|
||||
session.messages.push(message);
|
||||
await saveSession(session);
|
||||
};
|
||||
|
||||
/**
|
||||
* Update subagent session with result
|
||||
*/
|
||||
export const completeSubagentSession = async (
|
||||
sessionId: string,
|
||||
result: { success: boolean; output: string; error?: string },
|
||||
): Promise<void> => {
|
||||
const session = await loadSession(sessionId);
|
||||
if (!session) return;
|
||||
|
||||
// Add final result as assistant message
|
||||
const resultContent = result.success
|
||||
? `## Subagent Result\n\n${result.output}`
|
||||
: `## Subagent Error\n\n${result.error}\n\n${result.output}`;
|
||||
|
||||
await addMessageToSession(sessionId, "assistant", resultContent);
|
||||
};
|
||||
|
||||
// Re-export types
|
||||
export type { SessionInfo } from "@/types/session";
|
||||
export type { SessionInfo, SubagentSessionConfig } from "@/types/session";
|
||||
|
||||
Reference in New Issue
Block a user